From bba0166716e66f60e5e52f9e8cd4f00b2d9a96af Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Fri, 31 May 2024 12:47:22 -0400 Subject: [PATCH 01/69] generate help files for all the scripts in the scripts folder --- os | 0 scilpy-bot-scripts/generate_help_files.py | 42 +++++ scripts/.hidden/scil_NODDI_maps.py.help | 57 ++++++ scripts/.hidden/scil_NODDI_priors.py.help | 70 +++++++ scripts/.hidden/scil_aodf_metrics.py.help | 93 +++++++++ scripts/.hidden/scil_bids_validate.py.help | 42 +++++ scripts/.hidden/scil_bingham_metrics.py.help | 52 ++++++ scripts/.hidden/scil_btensor_metrics.py.help | 94 ++++++++++ .../scil_bundle_clean_qbx_clusters.py.help | 54 ++++++ .../scil_bundle_compute_centroid.py.help | 24 +++ .../scil_bundle_compute_endpoints_map.py.help | 42 +++++ scripts/.hidden/scil_bundle_diameter.py.help | 70 +++++++ .../scil_bundle_filter_by_occurence.py.help | 38 ++++ .../scil_bundle_generate_priors.py.help | 58 ++++++ scripts/.hidden/scil_bundle_label_map.py.help | 43 +++++ .../scil_bundle_mean_fixel_afd.py.help | 50 +++++ ...il_bundle_mean_fixel_afd_from_hdf5.py.help | 50 +++++ ...l_bundle_mean_fixel_bingham_metric.py.help | 48 +++++ scripts/.hidden/scil_bundle_mean_std.py.help | 51 +++++ .../scil_bundle_pairwise_comparison.py.help | 57 ++++++ .../scil_bundle_reject_outliers.py.help | 35 ++++ ..._score_many_bundles_one_tractogram.py.help | 110 +++++++++++ ...ore_same_bundle_many_segmentations.py.help | 62 ++++++ .../scil_bundle_shape_measures.py.help | 64 +++++++ .../scil_bundle_uniformize_endpoints.py.help | 44 +++++ .../scil_bundle_volume_per_label.py.help | 31 +++ ...l_connectivity_compare_populations.py.help | 63 +++++++ ...scil_connectivity_compute_matrices.py.help | 91 +++++++++ .../scil_connectivity_compute_pca.py.help | 75 ++++++++ .../.hidden/scil_connectivity_filter.py.help | 56 ++++++ .../scil_connectivity_graph_measures.py.help | 63 +++++++ ...nectivity_hdf5_average_density_map.py.help | 36 ++++ .../.hidden/scil_connectivity_math.py.help | 150 +++++++++++++++ .../scil_connectivity_normalize.py.help | 76 ++++++++ ...il_connectivity_pairwise_agreement.py.help | 33 ++++ .../scil_connectivity_print_filenames.py.help | 32 ++++ .../scil_connectivity_reorder_rois.py.help | 51 +++++ .../.hidden/scil_denoising_nlmeans.py.help | 28 +++ scripts/.hidden/scil_dki_metrics.py.help | 105 +++++++++++ .../.hidden/scil_dti_convert_tensors.py.help | 37 ++++ scripts/.hidden/scil_dti_metrics.py.help | 101 ++++++++++ .../.hidden/scil_dwi_apply_bias_field.py.help | 24 +++ scripts/.hidden/scil_dwi_compute_snr.py.help | 59 ++++++ scripts/.hidden/scil_dwi_concatenate.py.help | 31 +++ scripts/.hidden/scil_dwi_convert_FDF.py.help | 31 +++ .../scil_dwi_detect_volume_outliers.py.help | 39 ++++ scripts/.hidden/scil_dwi_extract_b0.py.help | 46 +++++ .../.hidden/scil_dwi_extract_shell.py.help | 45 +++++ .../.hidden/scil_dwi_powder_average.py.help | 40 ++++ .../scil_dwi_prepare_eddy_command.py.help | 64 +++++++ .../scil_dwi_prepare_topup_command.py.help | 44 +++++ .../.hidden/scil_dwi_reorder_philips.py.help | 24 +++ .../.hidden/scil_dwi_split_by_indices.py.help | 28 +++ scripts/.hidden/scil_dwi_to_sh.py.help | 50 +++++ .../scil_fodf_max_in_ventricles.py.help | 56 ++++++ scripts/.hidden/scil_fodf_memsmt.py.help | 99 ++++++++++ scripts/.hidden/scil_fodf_metrics.py.help | 88 +++++++++ scripts/.hidden/scil_fodf_msmt.py.help | 71 +++++++ scripts/.hidden/scil_fodf_ssst.py.help | 52 ++++++ scripts/.hidden/scil_fodf_to_bingham.py.help | 51 +++++ scripts/.hidden/scil_freewater_maps.py.help | 58 ++++++ scripts/.hidden/scil_freewater_priors.py.help | 71 +++++++ scripts/.hidden/scil_frf_mean.py.help | 22 +++ scripts/.hidden/scil_frf_memsmt.py.help | 122 ++++++++++++ scripts/.hidden/scil_frf_msmt.py.help | 114 ++++++++++++ .../scil_frf_set_diffusivities.py.help | 30 +++ scripts/.hidden/scil_frf_ssst.py.help | 61 ++++++ scripts/.hidden/scil_get_version.py.help | 16 ++ .../scil_gradients_apply_transform.py.help | 21 +++ .../.hidden/scil_gradients_convert.py.help | 22 +++ .../scil_gradients_generate_sampling.py.help | 67 +++++++ .../scil_gradients_modify_axes.py.help | 28 +++ .../scil_gradients_round_bvals.py.help | 33 ++++ .../scil_gradients_validate_correct.py.help | 48 +++++ ...il_gradients_validate_correct_eddy.py.help | 25 +++ .../.hidden/scil_header_print_info.py.help | 20 ++ ...scil_header_validate_compatibility.py.help | 22 +++ .../scil_json_convert_entries_to_xlsx.py.help | 29 +++ .../scil_json_harmonize_entries.py.help | 31 +++ .../.hidden/scil_json_merge_entries.py.help | 55 ++++++ scripts/.hidden/scil_labels_combine.py.help | 48 +++++ scripts/.hidden/scil_labels_dilate.py.help | 51 +++++ scripts/.hidden/scil_labels_remove.py.help | 31 +++ .../scil_labels_split_volume_by_ids.py.help | 32 ++++ .../scil_labels_split_volume_from_lut.py.help | 31 +++ scripts/.hidden/scil_lesions_info.py.help | 50 +++++ .../.hidden/scil_mti_adjust_B1_header.py.help | 17 ++ scripts/.hidden/scil_mti_maps_MT.py.help | 150 +++++++++++++++ scripts/.hidden/scil_mti_maps_ihMT.py.help | 164 ++++++++++++++++ .../.hidden/scil_plot_stats_per_point.py.help | 33 ++++ scripts/.hidden/scil_qball_metrics.py.help | 71 +++++++ scripts/.hidden/scil_rgb_convert.py.help | 33 ++++ scripts/.hidden/scil_sh_convert.py.help | 39 ++++ scripts/.hidden/scil_sh_fusion.py.help | 36 ++++ scripts/.hidden/scil_sh_to_aodf.py.help | 96 ++++++++++ scripts/.hidden/scil_sh_to_rish.py.help | 36 ++++ scripts/.hidden/scil_sh_to_sf.py.help | 67 +++++++ .../scil_stats_group_comparison.py.help | 70 +++++++ .../scil_surface_apply_transform.py.help | 38 ++++ scripts/.hidden/scil_surface_convert.py.help | 32 ++++ scripts/.hidden/scil_surface_flip.py.help | 25 +++ scripts/.hidden/scil_surface_smooth.py.help | 36 ++++ scripts/.hidden/scil_tracking_local.py.help | 167 +++++++++++++++++ .../.hidden/scil_tracking_local_dev.py.help | 158 ++++++++++++++++ scripts/.hidden/scil_tracking_pft.py.help | 107 +++++++++++ .../.hidden/scil_tracking_pft_maps.py.help | 31 +++ .../scil_tracking_pft_maps_edit.py.help | 21 +++ .../scil_tractogram_apply_transform.py.help | 78 ++++++++ ...tractogram_apply_transform_to_hdf5.py.help | 52 ++++++ ...cil_tractogram_assign_custom_color.py.help | 0 ...il_tractogram_assign_uniform_color.py.help | 50 +++++ .../.hidden/scil_tractogram_commit.py.help | 160 ++++++++++++++++ .../.hidden/scil_tractogram_compress.py.help | 22 +++ .../scil_tractogram_compute_TODI.py.help | 74 ++++++++ ...cil_tractogram_compute_density_map.py.help | 28 +++ .../.hidden/scil_tractogram_convert.py.help | 28 +++ ...cil_tractogram_convert_hdf5_to_trk.py.help | 50 +++++ .../scil_tractogram_count_streamlines.py.help | 24 +++ .../scil_tractogram_cut_streamlines.py.help | 60 ++++++ .../scil_tractogram_detect_loops.py.help | 57 ++++++ .../.hidden/scil_tractogram_dpp_math.py.help | 76 ++++++++ .../scil_tractogram_extract_ushape.py.help | 41 ++++ .../scil_tractogram_filter_by_anatomy.py.help | 111 +++++++++++ .../scil_tractogram_filter_by_length.py.help | 41 ++++ ...l_tractogram_filter_by_orientation.py.help | 65 +++++++ .../scil_tractogram_filter_by_roi.py.help | 127 +++++++++++++ .../.hidden/scil_tractogram_fix_trk.py.help | 80 ++++++++ scripts/.hidden/scil_tractogram_flip.py.help | 27 +++ scripts/.hidden/scil_tractogram_math.py.help | 75 ++++++++ ...cil_tractogram_pairwise_comparison.py.help | 51 +++++ .../scil_tractogram_print_info.py.help | 32 ++++ ...ctogram_project_map_to_streamlines.py.help | 68 +++++++ ...ctogram_project_streamlines_to_map.py.help | 77 ++++++++ scripts/.hidden/scil_tractogram_qbx.py.help | 43 +++++ .../.hidden/scil_tractogram_register.py.help | 42 +++++ .../scil_tractogram_remove_invalid.py.help | 41 ++++ .../.hidden/scil_tractogram_resample.py.help | 72 +++++++ ...scil_tractogram_resample_nb_points.py.help | 28 +++ .../scil_tractogram_seed_density_map.py.help | 29 +++ .../scil_tractogram_segment_and_score.py.help | 164 ++++++++++++++++ .../scil_tractogram_segment_bundles.py.help | 65 +++++++ ...m_segment_bundles_for_connectivity.py.help | 105 +++++++++++ ...scil_tractogram_segment_one_bundle.py.help | 62 ++++++ .../.hidden/scil_tractogram_shuffle.py.help | 22 +++ .../.hidden/scil_tractogram_smooth.py.help | 51 +++++ scripts/.hidden/scil_tractogram_split.py.help | 48 +++++ scripts/.hidden/scil_viz_bingham_fit.py.help | 38 ++++ scripts/.hidden/scil_viz_bundle.py.help | 56 ++++++ .../scil_viz_bundle_screenshot_mni.py.help | 48 +++++ .../scil_viz_bundle_screenshot_mosaic.py.help | 49 +++++ scripts/.hidden/scil_viz_connectivity.py.help | 0 .../.hidden/scil_viz_dti_screenshot.py.help | 30 +++ scripts/.hidden/scil_viz_fodf.py.help | 119 ++++++++++++ .../scil_viz_gradients_screenshot.py.help | 38 ++++ .../.hidden/scil_viz_tractogram_seeds.py.help | 21 +++ .../scil_viz_tractogram_seeds_3d.py.help | 46 +++++ .../.hidden/scil_viz_volume_histogram.py.help | 30 +++ .../scil_viz_volume_scatterplot.py.help | 94 ++++++++++ .../scil_viz_volume_screenshot.py.help | 118 ++++++++++++ .../scil_viz_volume_screenshot_mosaic.py.help | 96 ++++++++++ .../scil_volume_apply_transform.py.help | 27 +++ .../.hidden/scil_volume_b0_synthesis.py.help | 34 ++++ .../scil_volume_count_non_zero_voxels.py.help | 31 +++ scripts/.hidden/scil_volume_crop.py.help | 30 +++ scripts/.hidden/scil_volume_flip.py.help | 18 ++ scripts/.hidden/scil_volume_math.py.help | 176 ++++++++++++++++++ ...scil_volume_remove_outliers_ransac.py.help | 26 +++ scripts/.hidden/scil_volume_resample.py.help | 36 ++++ .../scil_volume_reshape_to_reference.py.help | 29 +++ .../.hidden/scil_volume_stats_in_ROI.py.help | 39 ++++ .../scil_volume_stats_in_labels.py.help | 22 +++ subprocess | 0 172 files changed, 9462 insertions(+) create mode 100644 os create mode 100755 scilpy-bot-scripts/generate_help_files.py create mode 100644 scripts/.hidden/scil_NODDI_maps.py.help create mode 100644 scripts/.hidden/scil_NODDI_priors.py.help create mode 100644 scripts/.hidden/scil_aodf_metrics.py.help create mode 100644 scripts/.hidden/scil_bids_validate.py.help create mode 100644 scripts/.hidden/scil_bingham_metrics.py.help create mode 100644 scripts/.hidden/scil_btensor_metrics.py.help create mode 100644 scripts/.hidden/scil_bundle_clean_qbx_clusters.py.help create mode 100644 scripts/.hidden/scil_bundle_compute_centroid.py.help create mode 100644 scripts/.hidden/scil_bundle_compute_endpoints_map.py.help create mode 100644 scripts/.hidden/scil_bundle_diameter.py.help create mode 100644 scripts/.hidden/scil_bundle_filter_by_occurence.py.help create mode 100644 scripts/.hidden/scil_bundle_generate_priors.py.help create mode 100644 scripts/.hidden/scil_bundle_label_map.py.help create mode 100644 scripts/.hidden/scil_bundle_mean_fixel_afd.py.help create mode 100644 scripts/.hidden/scil_bundle_mean_fixel_afd_from_hdf5.py.help create mode 100644 scripts/.hidden/scil_bundle_mean_fixel_bingham_metric.py.help create mode 100644 scripts/.hidden/scil_bundle_mean_std.py.help create mode 100644 scripts/.hidden/scil_bundle_pairwise_comparison.py.help create mode 100644 scripts/.hidden/scil_bundle_reject_outliers.py.help create mode 100644 scripts/.hidden/scil_bundle_score_many_bundles_one_tractogram.py.help create mode 100644 scripts/.hidden/scil_bundle_score_same_bundle_many_segmentations.py.help create mode 100644 scripts/.hidden/scil_bundle_shape_measures.py.help create mode 100644 scripts/.hidden/scil_bundle_uniformize_endpoints.py.help create mode 100644 scripts/.hidden/scil_bundle_volume_per_label.py.help create mode 100644 scripts/.hidden/scil_connectivity_compare_populations.py.help create mode 100644 scripts/.hidden/scil_connectivity_compute_matrices.py.help create mode 100644 scripts/.hidden/scil_connectivity_compute_pca.py.help create mode 100644 scripts/.hidden/scil_connectivity_filter.py.help create mode 100644 scripts/.hidden/scil_connectivity_graph_measures.py.help create mode 100644 scripts/.hidden/scil_connectivity_hdf5_average_density_map.py.help create mode 100644 scripts/.hidden/scil_connectivity_math.py.help create mode 100644 scripts/.hidden/scil_connectivity_normalize.py.help create mode 100644 scripts/.hidden/scil_connectivity_pairwise_agreement.py.help create mode 100644 scripts/.hidden/scil_connectivity_print_filenames.py.help create mode 100644 scripts/.hidden/scil_connectivity_reorder_rois.py.help create mode 100644 scripts/.hidden/scil_denoising_nlmeans.py.help create mode 100644 scripts/.hidden/scil_dki_metrics.py.help create mode 100644 scripts/.hidden/scil_dti_convert_tensors.py.help create mode 100644 scripts/.hidden/scil_dti_metrics.py.help create mode 100644 scripts/.hidden/scil_dwi_apply_bias_field.py.help create mode 100644 scripts/.hidden/scil_dwi_compute_snr.py.help create mode 100644 scripts/.hidden/scil_dwi_concatenate.py.help create mode 100644 scripts/.hidden/scil_dwi_convert_FDF.py.help create mode 100644 scripts/.hidden/scil_dwi_detect_volume_outliers.py.help create mode 100644 scripts/.hidden/scil_dwi_extract_b0.py.help create mode 100644 scripts/.hidden/scil_dwi_extract_shell.py.help create mode 100644 scripts/.hidden/scil_dwi_powder_average.py.help create mode 100644 scripts/.hidden/scil_dwi_prepare_eddy_command.py.help create mode 100644 scripts/.hidden/scil_dwi_prepare_topup_command.py.help create mode 100644 scripts/.hidden/scil_dwi_reorder_philips.py.help create mode 100644 scripts/.hidden/scil_dwi_split_by_indices.py.help create mode 100644 scripts/.hidden/scil_dwi_to_sh.py.help create mode 100644 scripts/.hidden/scil_fodf_max_in_ventricles.py.help create mode 100644 scripts/.hidden/scil_fodf_memsmt.py.help create mode 100644 scripts/.hidden/scil_fodf_metrics.py.help create mode 100644 scripts/.hidden/scil_fodf_msmt.py.help create mode 100644 scripts/.hidden/scil_fodf_ssst.py.help create mode 100644 scripts/.hidden/scil_fodf_to_bingham.py.help create mode 100644 scripts/.hidden/scil_freewater_maps.py.help create mode 100644 scripts/.hidden/scil_freewater_priors.py.help create mode 100644 scripts/.hidden/scil_frf_mean.py.help create mode 100644 scripts/.hidden/scil_frf_memsmt.py.help create mode 100644 scripts/.hidden/scil_frf_msmt.py.help create mode 100644 scripts/.hidden/scil_frf_set_diffusivities.py.help create mode 100644 scripts/.hidden/scil_frf_ssst.py.help create mode 100644 scripts/.hidden/scil_get_version.py.help create mode 100644 scripts/.hidden/scil_gradients_apply_transform.py.help create mode 100644 scripts/.hidden/scil_gradients_convert.py.help create mode 100644 scripts/.hidden/scil_gradients_generate_sampling.py.help create mode 100644 scripts/.hidden/scil_gradients_modify_axes.py.help create mode 100644 scripts/.hidden/scil_gradients_round_bvals.py.help create mode 100644 scripts/.hidden/scil_gradients_validate_correct.py.help create mode 100644 scripts/.hidden/scil_gradients_validate_correct_eddy.py.help create mode 100644 scripts/.hidden/scil_header_print_info.py.help create mode 100644 scripts/.hidden/scil_header_validate_compatibility.py.help create mode 100644 scripts/.hidden/scil_json_convert_entries_to_xlsx.py.help create mode 100644 scripts/.hidden/scil_json_harmonize_entries.py.help create mode 100644 scripts/.hidden/scil_json_merge_entries.py.help create mode 100644 scripts/.hidden/scil_labels_combine.py.help create mode 100644 scripts/.hidden/scil_labels_dilate.py.help create mode 100644 scripts/.hidden/scil_labels_remove.py.help create mode 100644 scripts/.hidden/scil_labels_split_volume_by_ids.py.help create mode 100644 scripts/.hidden/scil_labels_split_volume_from_lut.py.help create mode 100644 scripts/.hidden/scil_lesions_info.py.help create mode 100644 scripts/.hidden/scil_mti_adjust_B1_header.py.help create mode 100644 scripts/.hidden/scil_mti_maps_MT.py.help create mode 100644 scripts/.hidden/scil_mti_maps_ihMT.py.help create mode 100644 scripts/.hidden/scil_plot_stats_per_point.py.help create mode 100644 scripts/.hidden/scil_qball_metrics.py.help create mode 100644 scripts/.hidden/scil_rgb_convert.py.help create mode 100644 scripts/.hidden/scil_sh_convert.py.help create mode 100644 scripts/.hidden/scil_sh_fusion.py.help create mode 100644 scripts/.hidden/scil_sh_to_aodf.py.help create mode 100644 scripts/.hidden/scil_sh_to_rish.py.help create mode 100644 scripts/.hidden/scil_sh_to_sf.py.help create mode 100644 scripts/.hidden/scil_stats_group_comparison.py.help create mode 100644 scripts/.hidden/scil_surface_apply_transform.py.help create mode 100644 scripts/.hidden/scil_surface_convert.py.help create mode 100644 scripts/.hidden/scil_surface_flip.py.help create mode 100644 scripts/.hidden/scil_surface_smooth.py.help create mode 100644 scripts/.hidden/scil_tracking_local.py.help create mode 100644 scripts/.hidden/scil_tracking_local_dev.py.help create mode 100644 scripts/.hidden/scil_tracking_pft.py.help create mode 100644 scripts/.hidden/scil_tracking_pft_maps.py.help create mode 100644 scripts/.hidden/scil_tracking_pft_maps_edit.py.help create mode 100644 scripts/.hidden/scil_tractogram_apply_transform.py.help create mode 100644 scripts/.hidden/scil_tractogram_apply_transform_to_hdf5.py.help create mode 100644 scripts/.hidden/scil_tractogram_assign_custom_color.py.help create mode 100644 scripts/.hidden/scil_tractogram_assign_uniform_color.py.help create mode 100644 scripts/.hidden/scil_tractogram_commit.py.help create mode 100644 scripts/.hidden/scil_tractogram_compress.py.help create mode 100644 scripts/.hidden/scil_tractogram_compute_TODI.py.help create mode 100644 scripts/.hidden/scil_tractogram_compute_density_map.py.help create mode 100644 scripts/.hidden/scil_tractogram_convert.py.help create mode 100644 scripts/.hidden/scil_tractogram_convert_hdf5_to_trk.py.help create mode 100644 scripts/.hidden/scil_tractogram_count_streamlines.py.help create mode 100644 scripts/.hidden/scil_tractogram_cut_streamlines.py.help create mode 100644 scripts/.hidden/scil_tractogram_detect_loops.py.help create mode 100644 scripts/.hidden/scil_tractogram_dpp_math.py.help create mode 100644 scripts/.hidden/scil_tractogram_extract_ushape.py.help create mode 100644 scripts/.hidden/scil_tractogram_filter_by_anatomy.py.help create mode 100644 scripts/.hidden/scil_tractogram_filter_by_length.py.help create mode 100644 scripts/.hidden/scil_tractogram_filter_by_orientation.py.help create mode 100644 scripts/.hidden/scil_tractogram_filter_by_roi.py.help create mode 100644 scripts/.hidden/scil_tractogram_fix_trk.py.help create mode 100644 scripts/.hidden/scil_tractogram_flip.py.help create mode 100644 scripts/.hidden/scil_tractogram_math.py.help create mode 100644 scripts/.hidden/scil_tractogram_pairwise_comparison.py.help create mode 100644 scripts/.hidden/scil_tractogram_print_info.py.help create mode 100644 scripts/.hidden/scil_tractogram_project_map_to_streamlines.py.help create mode 100644 scripts/.hidden/scil_tractogram_project_streamlines_to_map.py.help create mode 100644 scripts/.hidden/scil_tractogram_qbx.py.help create mode 100644 scripts/.hidden/scil_tractogram_register.py.help create mode 100644 scripts/.hidden/scil_tractogram_remove_invalid.py.help create mode 100644 scripts/.hidden/scil_tractogram_resample.py.help create mode 100644 scripts/.hidden/scil_tractogram_resample_nb_points.py.help create mode 100644 scripts/.hidden/scil_tractogram_seed_density_map.py.help create mode 100644 scripts/.hidden/scil_tractogram_segment_and_score.py.help create mode 100644 scripts/.hidden/scil_tractogram_segment_bundles.py.help create mode 100644 scripts/.hidden/scil_tractogram_segment_bundles_for_connectivity.py.help create mode 100644 scripts/.hidden/scil_tractogram_segment_one_bundle.py.help create mode 100644 scripts/.hidden/scil_tractogram_shuffle.py.help create mode 100644 scripts/.hidden/scil_tractogram_smooth.py.help create mode 100644 scripts/.hidden/scil_tractogram_split.py.help create mode 100644 scripts/.hidden/scil_viz_bingham_fit.py.help create mode 100644 scripts/.hidden/scil_viz_bundle.py.help create mode 100644 scripts/.hidden/scil_viz_bundle_screenshot_mni.py.help create mode 100644 scripts/.hidden/scil_viz_bundle_screenshot_mosaic.py.help create mode 100644 scripts/.hidden/scil_viz_connectivity.py.help create mode 100644 scripts/.hidden/scil_viz_dti_screenshot.py.help create mode 100644 scripts/.hidden/scil_viz_fodf.py.help create mode 100644 scripts/.hidden/scil_viz_gradients_screenshot.py.help create mode 100644 scripts/.hidden/scil_viz_tractogram_seeds.py.help create mode 100644 scripts/.hidden/scil_viz_tractogram_seeds_3d.py.help create mode 100644 scripts/.hidden/scil_viz_volume_histogram.py.help create mode 100644 scripts/.hidden/scil_viz_volume_scatterplot.py.help create mode 100644 scripts/.hidden/scil_viz_volume_screenshot.py.help create mode 100644 scripts/.hidden/scil_viz_volume_screenshot_mosaic.py.help create mode 100644 scripts/.hidden/scil_volume_apply_transform.py.help create mode 100644 scripts/.hidden/scil_volume_b0_synthesis.py.help create mode 100644 scripts/.hidden/scil_volume_count_non_zero_voxels.py.help create mode 100644 scripts/.hidden/scil_volume_crop.py.help create mode 100644 scripts/.hidden/scil_volume_flip.py.help create mode 100644 scripts/.hidden/scil_volume_math.py.help create mode 100644 scripts/.hidden/scil_volume_remove_outliers_ransac.py.help create mode 100644 scripts/.hidden/scil_volume_resample.py.help create mode 100644 scripts/.hidden/scil_volume_reshape_to_reference.py.help create mode 100644 scripts/.hidden/scil_volume_stats_in_ROI.py.help create mode 100644 scripts/.hidden/scil_volume_stats_in_labels.py.help create mode 100644 subprocess diff --git a/os b/os new file mode 100644 index 000000000..e69de29bb diff --git a/scilpy-bot-scripts/generate_help_files.py b/scilpy-bot-scripts/generate_help_files.py new file mode 100755 index 000000000..ee281a343 --- /dev/null +++ b/scilpy-bot-scripts/generate_help_files.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +This script iterates over all Python scripts in the 'scripts' directory, runs each script with the '--h' flag to generate help text, and saves the output to corresponding hidden files in the '.hidden' directory. + +By doing this, we can precompute the help outputs for each script, which can be useful for faster searches or documentation purposes. + +Scripts that should be skipped: + +- '**init**.py' +- 'scil_search_keywords.py' + +The help output is saved in a hidden directory to avoid clutter in the main scripts directory. +""" + +import os +import subprocess +from pathlib import Path + + + +# Directory where your scripts are located +scripts_dir = Path('scripts/') + +# Hidden directory to store help files +hidden_dir = scripts_dir / '.hidden' +hidden_dir.mkdir(exist_ok=True) + +# Iterate over all scripts and generate help files +for script in scripts_dir.glob('*.py'): + if script.name == '__init__.py' or script.name == 'scil_search_keywords.py': + continue + help_file = hidden_dir / f'{script.name}.help' + + # Run the script with --h and capture the output + result = subprocess.run(['python', script, '--h'], capture_output=True, text=True) + + # Save the output to the hidden file + with open(help_file, 'w') as f: + f.write(result.stdout) + + print(f'Help output for {script.name} saved to {help_file}') \ No newline at end of file diff --git a/scripts/.hidden/scil_NODDI_maps.py.help b/scripts/.hidden/scil_NODDI_maps.py.help new file mode 100644 index 000000000..2e1ee2efd --- /dev/null +++ b/scripts/.hidden/scil_NODDI_maps.py.help @@ -0,0 +1,57 @@ +usage: scil_NODDI_maps.py [-h] [--mask MASK] [--out_dir OUT_DIR] + [--tolerance tol] [--skip_b0_check] + [--para_diff PARA_DIFF] [--iso_diff ISO_DIFF] + [--lambda1 LAMBDA1] [--lambda2 LAMBDA2] + [--save_kernels DIRECTORY | --load_kernels DIRECTORY] + [--compute_only] [--processes NBR] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bval in_bvec + +Compute NODDI [1] maps using AMICO. +Multi-shell DWI necessary. + +Formerly: scil_compute_NODDI.py + +positional arguments: + in_dwi DWI file acquired with a NODDI compatible protocol (single-shell data not suited). + in_bval b-values filename, in FSL format (.bval). + in_bvec b-vectors filename, in FSL format (.bvec). + +options: + -h, --help show this help message and exit + --mask MASK Brain mask filename. + --out_dir OUT_DIR Output directory for the NODDI results. [results] + --tolerance tol The tolerated gap between the b-values to extract and the current b-value. + [Default: 20] + * Note. We would expect to find at least one b-value in the + range [0, tolerance]. To skip this check, use --skip_b0_check. + --skip_b0_check By default, we supervise that at least one b0 exists in your data + (i.e. b-values below the default --tolerance). Use this option to + allow continuing even if the minimum b-value is suspiciously high. + Use with care, and only if you understand your data. + --processes NBR Number of sub-processes to start. + Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Model options: + --para_diff PARA_DIFF + Axial diffusivity (AD) in the CC. [0.0017] + --iso_diff ISO_DIFF Mean diffusivity (MD) in ventricles. [0.003] + --lambda1 LAMBDA1 First regularization parameter. [0.5] + --lambda2 LAMBDA2 Second regularization parameter. [0.001] + +Kernels options: + --save_kernels DIRECTORY + Output directory for the COMMIT kernels. + --load_kernels DIRECTORY + Input directory where the COMMIT kernels are located. + --compute_only Compute kernels only, --save_kernels must be used. + +Reference: + [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC. + NODDI: practical in vivo neurite orientation dispersion + and density imaging of the human brain. + NeuroImage. 2012 Jul 16;61:1000-16. diff --git a/scripts/.hidden/scil_NODDI_priors.py.help b/scripts/.hidden/scil_NODDI_priors.py.help new file mode 100644 index 000000000..6ea54799a --- /dev/null +++ b/scripts/.hidden/scil_NODDI_priors.py.help @@ -0,0 +1,70 @@ +usage: scil_NODDI_priors.py [-h] [--fa_min_single_fiber FA_MIN_SINGLE_FIBER] + [--fa_max_ventricles FA_MAX_VENTRICLES] + [--md_min_ventricles MD_MIN_VENTRICLES] + [--roi_radius ROI_RADIUS] + [--roi_center pos pos pos] + [--out_txt_1fiber_para FILE] + [--out_txt_1fiber_perp FILE] + [--out_mask_1fiber FILE] + [--out_txt_ventricles FILE] + [--out_mask_ventricles FILE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_FA in_AD in_RD in_MD + +Compute the axial (para_diff), radial (perp_diff), and mean (iso_diff) +diffusivity priors for NODDI. + +Formerly: scil_compute_NODDI_priors.py + +positional arguments: + in_FA Path to the FA volume. + in_AD Path to the axial diffusivity (AD) volume. + in_RD Path to the radial diffusivity (RD) volume. + in_MD Path to the mean diffusivity (MD) volume. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Metrics options: + --fa_min_single_fiber FA_MIN_SINGLE_FIBER + Minimal threshold of FA (voxels above that threshold are considered in + the single fiber mask). [0.7] + --fa_max_ventricles FA_MAX_VENTRICLES + Maximal threshold of FA (voxels under that threshold are considered in + the ventricles). [0.1] + --md_min_ventricles MD_MIN_VENTRICLES + Minimal threshold of MD in mm2/s (voxels above that threshold are considered + for in the ventricles). [0.003] + +Regions options: + --roi_radius ROI_RADIUS + Radius of the region used to estimate the priors. The roi will be a cube spanning + from ROI_CENTER in each direction. [20] + --roi_center pos pos pos + Center of the roi of size roi_radius used to estimate the priors; a 3-value coordinate. + If not set, uses the center of the 3D volume. + +Outputs: + --out_txt_1fiber_para FILE + Output path for the text file containing the single fiber average value of AD. + If not set, the file will not be saved. + --out_txt_1fiber_perp FILE + Output path for the text file containing the single fiber average value of RD. + If not set, the file will not be saved. + --out_mask_1fiber FILE + Output path for single fiber mask. If not set, the mask will not be saved. + --out_txt_ventricles FILE + Output path for the text file containing the ventricles average value of MD. + If not set, the file will not be saved. + --out_mask_ventricles FILE + Output path for the ventricule mask. + If not set, the mask will not be saved. + +Reference: + [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC. + NODDI: practical in vivo neurite orientation dispersion and density + imaging of the human brain. NeuroImage. 2012 Jul 16;61:1000-16. diff --git a/scripts/.hidden/scil_aodf_metrics.py.help b/scripts/.hidden/scil_aodf_metrics.py.help new file mode 100644 index 000000000..9af856779 --- /dev/null +++ b/scripts/.hidden/scil_aodf_metrics.py.help @@ -0,0 +1,93 @@ +usage: scil_aodf_metrics.py [-h] [--mask MASK] [--asi_map ASI_MAP] + [--odd_power_map ODD_POWER_MAP] [--peaks PEAKS] + [--peak_values PEAK_VALUES] + [--peak_indices PEAK_INDICES] [--nufid NUFID] + [--not_all] [--at A_THRESHOLD] [--rt R_THRESHOLD] + [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}] + [--processes NBR] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_sh + +Script to compute various metrics derivated from asymmetric ODF. + +These metrics include the asymmetric peak directions image, a number of fiber +directions (nufid) map [1], the asymmetry index (ASI) map [2] and an odd-power +map [3]. + +The asymmetric peak directions image contains peaks per hemisphere, considering +antipodal sphere directions as distinct. On a symmetric signal, the number of +asymmetric peaks extracted is then twice the number of symmetric peaks. + +The nufid map is the asymmetric alternative to NuFO maps. It counts the +number of asymmetric peaks extracted and ranges in [0..N] with N the maximum +number of peaks. + +The asymmetric index is a cosine-based metric in the range [0..1], with 0 +corresponding to a perfectly symmetric signal and 1 to a perfectly asymmetric +signal. + +The odd-power map is also in the range [0..1], with 0 corresponding to a +perfectly symmetric signal and 1 to a perfectly anti-symmetric signal. It is +given as the ratio of the L2-norm of odd SH coefficients on the L2-norm of all +SH coefficients. + +Formerly: scil_compute_asym_odf_metrics.py + +positional arguments: + in_sh Input SH image. + +options: + -h, --help show this help message and exit + --mask MASK Optional mask. + --asi_map ASI_MAP Output asymmetry index (ASI) map. + --odd_power_map ODD_POWER_MAP + Output odd power map. + --peaks PEAKS Output filename for the extracted peaks. + --peak_values PEAK_VALUES + Output filename for the extracted peaks values. + --peak_indices PEAK_INDICES + Output filename for the generated peaks indices on the sphere. + --nufid NUFID Output filename for the nufid file. + --not_all If set, only saves the files specified using the file flags [False]. + --at A_THRESHOLD Absolute threshold on fODF amplitude. This value should be set to + approximately 1.5 to 2 times the maximum fODF amplitude in isotropic voxels + (ie. ventricles). + Use scil_fodf_max_in_ventricles.py to find the maximal value. + See [Dell'Acqua et al HBM 2013] [0.0]. + --rt R_THRESHOLD Relative threshold on fODF amplitude in percentage [0.1]. + --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} + Sphere to use for peak directions estimation [symmetric724]. + --processes NBR Number of sub-processes to start. + Default: [1] + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +References: +[1] C. Poirier and M. Descoteaux, "Filtering Methods for Asymmetric ODFs: +Where and How Asymmetry Occurs in the White Matter." bioRxiv. 2022 Jan 1; +2022.12.18.520881. doi: https://doi.org/10.1101/2022.12.18.520881 + +[2] S. Cetin Karayumak, E. Özarslan, and G. Unal, +"Asymmetric Orientation Distribution Functions (AODFs) revealing intravoxel +geometry in diffusion MRI," Magnetic Resonance Imaging, vol. 49, pp. 145-158, +Jun. 2018, doi: https://doi.org/10.1016/j.mri.2018.03.006. + +[3] C. Poirier, E. St-Onge, and M. Descoteaux, "Investigating the Occurence of +Asymmetric Patterns in White Matter Fiber Orientation Distribution Functions" +[Abstract], In: Proc. Intl. Soc. Mag. Reson. Med. 29 (2021), 2021 May 15-20, +Vancouver, BC, Abstract number 0865. diff --git a/scripts/.hidden/scil_bids_validate.py.help b/scripts/.hidden/scil_bids_validate.py.help new file mode 100644 index 000000000..9af451483 --- /dev/null +++ b/scripts/.hidden/scil_bids_validate.py.help @@ -0,0 +1,42 @@ +usage: scil_bids_validate.py [-h] [--bids_ignore BIDS_IGNORE] [--fs FS] + [--clean] [--readout READOUT] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bids out_json + +Create a json file from a BIDS dataset detailling all info +needed for tractoflow +- DWI/rev_DWI +- T1 +- fmap/sbref (based on IntendedFor entity) +- Freesurfer (optional - could be one per participant + or one per participant/session) + +The BIDS dataset MUST be homogeneous. +The metadata need to be uniform across all participants/sessions/runs + +Mandatory entity: IntendedFor +Sensitive entities: PhaseEncodingDirection, TotalReadoutTime, direction + +Formerly: scil_validate_bids.py + +positional arguments: + in_bids Input BIDS folder. + out_json Output json file. + +options: + -h, --help show this help message and exit + --bids_ignore BIDS_IGNORE + If you want to ignore some subjects or some files, you + can provide an extra bidsignore file.Check: + https://github.com/bids-standard/bids- + validator#bidsignore + --fs FS Output freesurfer path. It will add keys wmparc and + aparc+aseg. + --clean If set, it will remove all the participants that are + missing any information. + --readout READOUT Default total readout time value [0.062]. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided + level. Default level is warning, default when using -v + is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_bingham_metrics.py.help b/scripts/.hidden/scil_bingham_metrics.py.help new file mode 100644 index 000000000..ac2a1c2ff --- /dev/null +++ b/scripts/.hidden/scil_bingham_metrics.py.help @@ -0,0 +1,52 @@ +usage: scil_bingham_metrics.py [-h] [--out_fd OUT_FD] [--out_fs OUT_FS] + [--out_ff OUT_FF] [--not_all] [--mask MASK] + [--nbr_integration_steps NBR_INTEGRATION_STEPS] + [-v [{DEBUG,INFO,WARNING}]] [--processes NBR] + [-f] + in_bingham + +Script to compute fODF lobe-specific metrics derived from a Bingham +distribution fit, as described in [1]. Resulting metrics are fiber density +(FD), fiber spread (FS) and fiber fraction (FF) [2]. + +The Bingham coefficients volume comes from scil_fodf_to_bingham.py. + +A lobe's FD is the integral of the Bingham function on the sphere. It +represents the density of fibers going through a given voxel for a given +fODF lobe (fixel). A lobe's FS is the ratio of its FD on its maximum AFD. It +is at its minimum for a sharp lobe and at its maximum for a wide lobe. A lobe's +FF is the ratio of its FD on the total FD in the voxel. + +Using 12 threads, the execution takes 10 minutes for FD estimation for a brain +with 1mm isotropic resolution. Other metrics take less than a second. + +Formerly: scil_compute_lobe_specific_fodf_metrics.py + +positional arguments: + in_bingham Input Bingham nifti image. + +options: + -h, --help show this help message and exit + --out_fd OUT_FD Path to output fiber density. [fd.nii.gz] + --out_fs OUT_FS Path to output fiber spread. [fs.nii.gz] + --out_ff OUT_FF Path to fiber fraction file. [ff.nii.gz] + --not_all Do not compute all metrics. Then, please provide the output paths of the files you need. + --mask MASK Optional mask image. Only voxels inside the mask are computed. + --nbr_integration_steps NBR_INTEGRATION_STEPS + Number of integration steps along the theta axis for fiber density estimation. [50] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + --processes NBR Number of sub-processes to start. + Default: [1] + -f Force overwriting of the output files. + +[1] T. W. Riffert, J. Schreiber, A. Anwander, and T. R. Knösche, “Beyond + fractional anisotropy: Extraction of bundle-specific structural metrics + from crossing fiber models,” NeuroImage, vol. 100, pp. 176-191, Oct. 2014, + doi: 10.1016/j.neuroimage.2014.06.015. + +[2] J. Schreiber, T. Riffert, A. Anwander, and T. R. Knösche, “Plausibility + Tracking: A method to evaluate anatomical connectivity and microstructural + properties along fiber pathways,” NeuroImage, vol. 90, pp. 163-178, Apr. + 2014, doi: 10.1016/j.neuroimage.2014.01.002. diff --git a/scripts/.hidden/scil_btensor_metrics.py.help b/scripts/.hidden/scil_btensor_metrics.py.help new file mode 100644 index 000000000..2cb7853e0 --- /dev/null +++ b/scripts/.hidden/scil_btensor_metrics.py.help @@ -0,0 +1,94 @@ +usage: scil_btensor_metrics.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals + IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS + [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5} + [{0,1,-0.5,0.5} ...] [--mask MASK] + [--tolerance tol] [--skip_b0_check] + [--fit_iters FIT_ITERS] + [--random_iters RANDOM_ITERS] + [--do_weight_bvals] [--do_weight_pa] + [--do_multiple_s0] [--op OP] [--fa FA] + [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] + [-f] [--not_all] [--md file] [--ufa file] + [--mk_i file] [--mk_a file] [--mk_t file] + +Script to compute microstructure metrics using the DIVIDE method. In order to +operate, the script needs at leats two different types of b-tensor encodings. +Note that custom encodings are not yet supported, so that only the linear +tensor encoding (LTE, b_delta = 1), the planar tensor encoding +(PTE, b_delta = -0.5), the spherical tensor encoding (STE, b_delta = 0) and +the cigar shape tensor encoding (b_delta = 0.5) are available. Moreover, all +of `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the +same number of arguments. Be sure to keep the same order of encodings +throughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT). + +By default, will output all possible files, using default names. Thus, this +script outputs the results from the DIVIDE fit or direct derivatives: +mean diffusivity (MD), isotropic mean kurtosis (mk_i), anisotropic mean +kurtosis (mk_a), total mean kurtosis (mk_t) and finally micro-FA (uFA). +Specific names can be specified using the +file flags specified in the "File flags" section. + +If --not_all is set, only the files specified explicitly by the flags +will be output. The order parameter can also be computed from the uFA and a +precomputed FA, using separate input parameters. + +>>> scil_btensor_metrics.py --in_dwis LTE.nii.gz PTE.nii.gz STE.nii.gz + --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs LTE.bvec PTE.bvec STE.bvec + --in_bdeltas 1 -0.5 0 --mask mask.nii.gz + +IMPORTANT: If the script does not converge to a solution, it is probably due to +noise outside the brain. Thus, it is strongly recommanded to provide a brain +mask with --mask. + +Based on Markus Nilsson, Filip Szczepankiewicz, Björn Lampinen, André Ahlgren, +João P. de Almeida Martins, Samo Lasic, Carl-Fredrik Westin, +and Daniel Topgaard. An open-source framework for analysis of multidimensional +diffusion MRI data implemented in MATLAB. +Proc. Intl. Soc. Mag. Reson. Med. (26), Paris, France, 2018. + +Formerly: scil_compute_divide.py + +options: + -h, --help show this help message and exit + --in_dwis IN_DWIS [IN_DWIS ...] + Path to the input diffusion volume for each b-tensor encoding type. + --in_bvals IN_BVALS [IN_BVALS ...] + Path to the bval file, in FSL format, for each b-tensor encoding type. + --in_bvecs IN_BVECS [IN_BVECS ...] + Path to the bvec file, in FSL format, for each b-tensor encoding type. + --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...] + Value of b_delta for each b-tensor encoding type, in the same order as dwi, bval and bvec inputs. + --mask MASK Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction. + --tolerance tol The tolerated gap between the b-values to extract and the current b-value. + [Default: 20] + * Note. We would expect to find at least one b-value in the + range [0, tolerance]. To skip this check, use --skip_b0_check. + --skip_b0_check By default, we supervise that at least one b0 exists in your data + (i.e. b-values below the default --tolerance). Use this option to + allow continuing even if the minimum b-value is suspiciously high. + Use with care, and only if you understand your data. + --fit_iters FIT_ITERS + The number of time the gamma fit will be done [1] + --random_iters RANDOM_ITERS + The number of iterations for the initial parameters search. [50] + --do_weight_bvals If set, does not do a weighting on the bvalues in the gamma fit. + --do_weight_pa If set, does not do a powder averaging weighting in the gamma fit. + --do_multiple_s0 If set, does not take into account multiple baseline signals. + --processes NBR Number of sub-processes to start. + Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + --not_all If set, only saves the files specified using the file flags. (Default: False) + +Order parameter (OP): + --op OP Output filename for the order parameter. The OP will not be output if this is not given. Computation of the OP also requires a precomputed FA map (given using --fa). + --fa FA Path to a FA map. Needed for calculating the OP. + +File flags: + --md file Output filename for the MD. + --ufa file Output filename for the microscopic FA. + --mk_i file Output filename for the isotropic mean kurtosis. + --mk_a file Output filename for the anisotropic mean kurtosis. + --mk_t file Output filename for the total mean kurtosis. diff --git a/scripts/.hidden/scil_bundle_clean_qbx_clusters.py.help b/scripts/.hidden/scil_bundle_clean_qbx_clusters.py.help new file mode 100644 index 000000000..f76af15c7 --- /dev/null +++ b/scripts/.hidden/scil_bundle_clean_qbx_clusters.py.help @@ -0,0 +1,54 @@ +usage: scil_bundle_clean_qbx_clusters.py [-h] + [--out_accepted_dir OUT_ACCEPTED_DIR] + [--out_rejected_dir OUT_REJECTED_DIR] + [--min_cluster_size MIN_CLUSTER_SIZE] + [--background_opacity BACKGROUND_OPACITY] + [--background_linewidth BACKGROUND_LINEWIDTH] + [--clusters_linewidth CLUSTERS_LINEWIDTH] + [--reference REFERENCE] + [--no_bbox_check] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bundles [in_bundles ...] + out_accepted out_rejected + + Render clusters sequentially to either accept or reject them based on + visual inspection. Useful for cleaning bundles for RBx, BST or for figures. + The VTK window does not handle well opacity of streamlines, this is a + normal rendering behavior. + Often use in pair with scil_tractogram_qbx.py. + + Key mapping: + - a/A: accept displayed clusters + - r/R: reject displayed clusters + - z/Z: Rewing one element + - c/C: Stop rendering of the background concatenation of streamlines + - q/Q: Early window exist, everything remaining will be rejected + +positional arguments: + in_bundles List of the clusters filename. + out_accepted Filename of the concatenated accepted clusters. + out_rejected Filename of the concatenated rejected clusters. + +options: + -h, --help show this help message and exit + --out_accepted_dir OUT_ACCEPTED_DIR + Directory to save all accepted clusters separately. + --out_rejected_dir OUT_REJECTED_DIR + Directory to save all rejected clusters separately. + --min_cluster_size MIN_CLUSTER_SIZE + Minimum cluster size for consideration [1].Must be at least 1. + --background_opacity BACKGROUND_OPACITY + Opacity of the background streamlines.Keep low between 0 and 0.5 [0.1]. + --background_linewidth BACKGROUND_LINEWIDTH + Linewidth of the background streamlines [1]. + --clusters_linewidth CLUSTERS_LINEWIDTH + Linewidth of the current cluster [1]. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of + tractograms (ignores the presence of invalid streamlines). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_bundle_compute_centroid.py.help b/scripts/.hidden/scil_bundle_compute_centroid.py.help new file mode 100644 index 000000000..197b4c393 --- /dev/null +++ b/scripts/.hidden/scil_bundle_compute_centroid.py.help @@ -0,0 +1,24 @@ +usage: scil_bundle_compute_centroid.py [-h] [--nb_points NB_POINTS] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bundle out_centroid + +Compute a single bundle centroid, using an 'infinite' QuickBundles threshold. + +Formerly: scil_compute_centroid.py + +positional arguments: + in_bundle Fiber bundle file. + out_centroid Output centroid streamline filename. + +options: + -h, --help show this help message and exit + --nb_points NB_POINTS + Number of points defining the centroid streamline[20]. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_bundle_compute_endpoints_map.py.help b/scripts/.hidden/scil_bundle_compute_endpoints_map.py.help new file mode 100644 index 000000000..1de7346dc --- /dev/null +++ b/scripts/.hidden/scil_bundle_compute_endpoints_map.py.help @@ -0,0 +1,42 @@ +usage: scil_bundle_compute_endpoints_map.py [-h] [--swap] [--binary] + [--nb_points NB_POINTS] + [--indent INDENT] [--sort_keys] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bundle endpoints_map_head + endpoints_map_tail + +Computes the endpoint map of a bundle. The endpoint map is simply a count of +the number of streamlines that start or end in each voxel. + +The idea is to estimate the cortical area affected by the bundle (assuming +streamlines start/end in the cortex). + +Note: If the streamlines are not ordered the head/tail are random and not +really two coherent groups. Use the following script to order streamlines: +scil_tractogram_uniformize_endpoints.py + +Formerly: scil_compute_endpoints_map.py + +positional arguments: + in_bundle Fiber bundle filename. + endpoints_map_head Output endpoints map head filename. + endpoints_map_tail Output endpoints map tail filename. + +options: + -h, --help show this help message and exit + --swap Swap head<->tail convention. Can be useful when the reference is not in RAS. + --binary Save outputs as a binary mask instead of a heat map. + --nb_points NB_POINTS + Number of points to consider at the extremities of the streamlines. [1] + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_bundle_diameter.py.help b/scripts/.hidden/scil_bundle_diameter.py.help new file mode 100644 index 000000000..adf2eb4ea --- /dev/null +++ b/scripts/.hidden/scil_bundle_diameter.py.help @@ -0,0 +1,70 @@ +usage: scil_bundle_diameter.py [-h] + [--fitting_func {lin_up,lin_down,exp,inv,log}] + [--show_rendering | --save_rendering OUT_FOLDER] + [--wireframe] [--error_coloring] + [--width WIDTH] [--opacity OPACITY] + [--win_dims WIDTH HEIGHT] [--background R G B] + [--reference REFERENCE] [--indent INDENT] + [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bundles [in_bundles ...] in_labels + [in_labels ...] + +Script to estimate the diameter of bundle(s) along their length. +See also scil_bundle_shape_measures.py, which prints a quick estimate of +the diameter (volume / length). The computation here is more complex and done +for each section of the bundle. + +The script expects: +- bundles with coherent endpoints from scil_tractogram_uniformize_endpoints.py +- labels maps with around 5-50 points scil_bundle_label_map.py + <5 is not enough, high risk of bad fit + >50 is too much, high risk of bad fit +- bundles that are close to a tube + without major fanning in a single axis + fanning is in 2 directions (uniform dispersion) good approximation + +The scripts prints a JSON file with mean/std to be compatible with tractometry. +WARNING: STD is in fact an ERROR measure from the fit and NOT an STD. + +Since the estimation and fit quality is not always intuitive for some bundles +and the tube with varying diameter is not easy to color/visualize, +the script comes with its own VTK rendering to allow exploration of the data. +(optional). + +Formerly: scil_estimate_bundles_diameter.py + +positional arguments: + in_bundles List of tractography files. + in_labels List of labels maps that match the bundles. + +options: + -h, --help show this help message and exit + --fitting_func {lin_up,lin_down,exp,inv,log} + Function to weigh points using their distance. + [Default: None] + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Visualization options: + --show_rendering Display VTK window (optional). + --save_rendering OUT_FOLDER + Save VTK render in the specified folder (optional) + --wireframe Use wireframe for the tube rendering. + --error_coloring Use the fitting error to color the tube. + --width WIDTH Width of tubes or lines representing streamlines + [Default: 0.2] + --opacity OPACITY Opacity for the streamlines rendered with the tube. + [Default: 0.2] + --win_dims WIDTH HEIGHT + The dimensions for the vtk window. [(1920, 1080)] + --background R G B RBG values [0, 255] of the color of the background. + [Default: [1, 1, 1]] + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_bundle_filter_by_occurence.py.help b/scripts/.hidden/scil_bundle_filter_by_occurence.py.help new file mode 100644 index 000000000..7c8dc5881 --- /dev/null +++ b/scripts/.hidden/scil_bundle_filter_by_occurence.py.help @@ -0,0 +1,38 @@ +usage: scil_bundle_filter_by_occurence.py [-h] [--ratio_voxels [RATIO_VOXELS]] + [--ratio_streamlines [RATIO_STREAMLINES]] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bundles [in_bundles ...] + output_prefix + +Use multiple versions of a same bundle and detect the most probable voxels by +using a threshold on the occurence, voxel-wise. With threshold 0.5, this is +a majority vote. This is useful to generate an average representation from +bundles of a given population. + +If streamlines originate from the same tractogram (ex, to compare various +bundle clustering techniques), streamline-wise vote is available to find the +streamlines most often included in the bundle. + +Formerly: scil_perform_majority_vote.py + +positional arguments: + in_bundles Input bundles filename(s). All tractograms must have identical headers. + output_prefix Output prefix. Ex: my_path/voting_. The suffixes will be: streamlines.trk and voxels.nii.gz + +options: + -h, --help show this help message and exit + --ratio_voxels [RATIO_VOXELS] + Threshold on the ratio of bundles with at least one streamine in a + given voxel to consider it as part of the 'gold standard'. Default if set: 0.5. + --ratio_streamlines [RATIO_STREAMLINES] + If all bundles come from the same tractogram, use this to generate + a voting for streamlines too. The associated value is the threshold on the ratio of + bundles including the streamline to consider it as part of the 'gold standard'. [0.5] + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_bundle_generate_priors.py.help b/scripts/.hidden/scil_bundle_generate_priors.py.help new file mode 100644 index 000000000..abd09ab30 --- /dev/null +++ b/scripts/.hidden/scil_bundle_generate_priors.py.help @@ -0,0 +1,58 @@ +usage: scil_bundle_generate_priors.py [-h] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [--todi_sigma {0,1,2,3,4}] + [--sf_threshold SF_THRESHOLD] + [--out_prefix OUT_PREFIX] + [--out_dir OUT_DIR] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bundle in_fodf in_mask + +Generation of priors and enhanced-FOD from an example/template bundle. +The bundle must have been cleaned thorougly before use. The E-FOD can then +be used for bundle-specific tractography, but not for FOD metrics. + +Formerly: scil_generate_priors_from_bundle.py + +positional arguments: + in_bundle Input bundle filename. + in_fodf Input FOD filename. + in_mask Mask to constrain the TODI spatial smoothing, + for example a WM mask. + +options: + -h, --help show this help message and exit + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + --todi_sigma {0,1,2,3,4} + Smooth the orientation histogram. + --sf_threshold SF_THRESHOLD + Relative threshold for sf masking (0.0-1.0). + --out_prefix OUT_PREFIX + Add a prefix to all output filename, + default is no prefix. + --out_dir OUT_DIR Output directory for all generated files, + default is current directory. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + + References: + [1] Rheault, Francois, et al. "Bundle-specific tractography with + incorporated anatomical and orientational priors." + NeuroImage 186 (2019): 382-398 + diff --git a/scripts/.hidden/scil_bundle_label_map.py.help b/scripts/.hidden/scil_bundle_label_map.py.help new file mode 100644 index 000000000..39484a324 --- /dev/null +++ b/scripts/.hidden/scil_bundle_label_map.py.help @@ -0,0 +1,43 @@ +usage: scil_bundle_label_map.py [-h] [--nb_pts NB_PTS] [--colormap COLORMAP] + [--new_labelling] [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bundles [in_bundles ...] in_centroid + out_dir + +Compute the label image (Nifti) from a centroid and tractograms (all +representing the same bundle). The label image represents the coverage of +the bundle, segmented into regions labelled from 0 to --nb_pts, starting from +the head, ending in the tail. + +Each voxel will have the label of its nearest centroid point. + +The number of labels will be the same as the centroid's number of points. + +Formerly: scil_compute_bundle_voxel_label_map.py + +positional arguments: + in_bundles Fiber bundle file. + in_centroid Centroid streamline corresponding to bundle. + out_dir Directory to save all mapping and coloring files: + - correlation_map.nii.gz + - session_x/labels_map.nii.gz + - session_x/distance_map.nii.gz + - session_x/correlation_map.nii.gz + - session_x/labels.trk + - session_x/distance.trk + - session_x/correlation.trk + Where session_x is numbered with each bundle. + +options: + -h, --help show this help message and exit + --nb_pts NB_PTS Number of divisions for the bundles. + Default is the number of points of the centroid. + --colormap COLORMAP Select the colormap for colored trk (data_per_point) [jet]. + --new_labelling Use the new labelling method (multi-centroids). + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_bundle_mean_fixel_afd.py.help b/scripts/.hidden/scil_bundle_mean_fixel_afd.py.help new file mode 100644 index 000000000..994221db7 --- /dev/null +++ b/scripts/.hidden/scil_bundle_mean_fixel_afd.py.help @@ -0,0 +1,50 @@ +usage: scil_bundle_mean_fixel_afd.py [-h] [--length_weighting] + [--reference REFERENCE] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bundle in_fodf afd_mean_map + +Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF) +maps along a bundle. + +This is the "real" fixel-based fODF amplitude along every streamline +of the bundle provided, averaged at every voxel. + +Please use a bundle file rather than a whole tractogram. + +Formerly: scil_compute_fixel_afd_from_bundles.py + +positional arguments: + in_bundle Path of the bundle file. + in_fodf Path of the fODF volume in spherical harmonics (SH). + afd_mean_map Path of the output mean AFD map. + +options: + -h, --help show this help message and exit + --length_weighting If set, will weigh the AFD values according to segment lengths. [False] + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Reference: + [1] Raffelt, D., Tournier, JD., Rose, S., Ridgway, GR., Henderson, R., + Crozier, S., Salvado, O., & Connelly, A. (2012). + Apparent Fibre Density: a novel measure for the analysis of + diffusion-weighted magnetic resonance images. NeuroImage, 59(4), + 3976--3994. diff --git a/scripts/.hidden/scil_bundle_mean_fixel_afd_from_hdf5.py.help b/scripts/.hidden/scil_bundle_mean_fixel_afd_from_hdf5.py.help new file mode 100644 index 000000000..cf289868d --- /dev/null +++ b/scripts/.hidden/scil_bundle_mean_fixel_afd_from_hdf5.py.help @@ -0,0 +1,50 @@ +usage: scil_bundle_mean_fixel_afd_from_hdf5.py [-h] [--length_weighting] + [--processes NBR] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [-v [{DEBUG,INFO,WARNING}]] + [-f] + in_hdf5 in_fodf out_hdf5 + +Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF) +maps for every connections within a hdf5 (.h5) file. + +This is the "real" fixel-based fODF amplitude along every streamline +of each connection, averaged at every voxel. + +Please use a hdf5 (.h5) file containing decomposed connections + +Formerly: scil_compute_fixel_afd_from_hdf5.py + +positional arguments: + in_hdf5 HDF5 filename (.h5) containing decomposed connections. + in_fodf Path of the fODF volume in spherical harmonics (SH). + out_hdf5 Path of the output HDF5 filenames (.h5). + +options: + -h, --help show this help message and exit + --length_weighting If set, will weigh the AFD values according to segment lengths. [False] + --processes NBR Number of sub-processes to start. + Default: [1] + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Reference: + [1] Raffelt, D., Tournier, JD., Rose, S., Ridgway, GR., Henderson, R., + Crozier, S., Salvado, O., & Connelly, A. (2012). + Apparent Fibre Density: a novel measure for the analysis of + diffusion-weighted magnetic resonance images. NeuroImage, + 59(4), 3976--3994. diff --git a/scripts/.hidden/scil_bundle_mean_fixel_bingham_metric.py.help b/scripts/.hidden/scil_bundle_mean_fixel_bingham_metric.py.help new file mode 100644 index 000000000..2e4d7f6c7 --- /dev/null +++ b/scripts/.hidden/scil_bundle_mean_fixel_bingham_metric.py.help @@ -0,0 +1,48 @@ +usage: scil_bundle_mean_fixel_bingham_metric.py [-h] [--length_weighting] + [--max_theta MAX_THETA] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] + [-f] + in_bundle in_bingham + in_bingham_metric out_mean_map + +Given a bundle and Bingham coefficients, compute the average Bingham +metric at each voxel intersected by the bundle. Intersected voxels are +found by computing the intersection between the voxel grid and each streamline +in the input tractogram. + +This script behaves like scil_compute_mean_fixel_afd_from_bundles.py for fODFs, +but here for Bingham distributions. These add the unique possibility to capture +fixel-based fiber spread (FS) and fiber fraction (FF). FD from the bingham +should be "equivalent" to the AFD_fixel we are used to. + +Bingham coefficients volume must come from scil_fodf_to_bingham.py +and Bingham metrics comes from scil_bingham_metrics.py. + +Bingham metrics are extracted from Bingham distributions fitted to fODF. There +are as many values per voxel as there are lobes extracted. The values chosen +for a given voxelis the one belonging to the lobe better aligned with the +current streamline segment. + +Please use a bundle file rather than a whole tractogram. + +Formerly: scil_compute_mean_fixel_obe_metric_from_bundles.py + +positional arguments: + in_bundle Path of the bundle file. + in_bingham Path of the Bingham volume. + in_bingham_metric Path of the Bingham metric (FD, FS, or FF) volume. + out_mean_map Path of the output mean map. + +options: + -h, --help show this help message and exit + --length_weighting If set, will weigh the FD values according to segment lengths. + --max_theta MAX_THETA + Maximum angle (in degrees) condition on lobe alignment. [60] + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_bundle_mean_std.py.help b/scripts/.hidden/scil_bundle_mean_std.py.help new file mode 100644 index 000000000..8a75b6b42 --- /dev/null +++ b/scripts/.hidden/scil_bundle_mean_std.py.help @@ -0,0 +1,51 @@ +usage: scil_bundle_mean_std.py [-h] [--per_point in_labels | --include_dps] + [--density_weighting] + [--distance_weighting DISTANCE_NII] + [--correlation_weighting CORRELATION_NII] + [--out_json OUT_JSON] [--reference REFERENCE] + [--indent INDENT] [--sort_keys] + [-v [{DEBUG,INFO,WARNING}]] + in_bundle in_metrics [in_metrics ...] + +Compute mean and std for each metric. + +- Default: For the whole bundle. This is achieved by averaging the metric + values of all voxels occupied by the bundle. +- Option --per_point: For all streamlines points in the bundle for each metric + combination, along the bundle, i.e. for each point. + **To create label_map and distance_map, see + scil_bundle_label_map.py + +Density weighting modifies the contribution of voxel with lower/higher +streamline count to reduce influence of spurious streamlines. + +Formerly: scil_compute_bundle_mean_std_per_point.py or +scil_compute_bundle_mean_std.py + +positional arguments: + in_bundle Fiber bundle file to compute statistics on. + in_metrics Nifti file to compute statistics on. Probably some tractometry measure(s) such as FA, MD, RD, ... + +options: + -h, --help show this help message and exit + --per_point in_labels + If set, computes the metrics per point instead of on the whole bundle. + You must then give the label map (.nii.gz) of the corresponding fiber bundle. + --include_dps Save values from data_per_streamline. + Currently not offered with option --per_point. + --density_weighting If set, weights statistics by the number of fibers passing through each voxel. + --distance_weighting DISTANCE_NII + If set, weights statistics by the inverse of the distance between a streamline and the centroid. + --correlation_weighting CORRELATION_NII + If set, weight statistics by the correlation strength between longitudinal data. + --out_json OUT_JSON Path of the output file. If not given, the output is simply printed on screen. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_bundle_pairwise_comparison.py.help b/scripts/.hidden/scil_bundle_pairwise_comparison.py.help new file mode 100644 index 000000000..8c485f23e --- /dev/null +++ b/scripts/.hidden/scil_bundle_pairwise_comparison.py.help @@ -0,0 +1,57 @@ +usage: scil_bundle_pairwise_comparison.py [-h] [--streamline_dice] + [--bundle_adjency_no_overlap] + [--disable_streamline_distance] + [--single_compare SINGLE_COMPARE] + [--keep_tmp] [--ratio] + [--processes NBR] + [--reference REFERENCE] + [--indent INDENT] [--sort_keys] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bundles [in_bundles ...] out_json + +Evaluate pair-wise similarity measures of bundles. +All tractograms must be in the same space (aligned to one reference). + +For the voxel representation, the computed similarity measures are: + bundle_adjacency_voxels, dice_voxels, w_dice_voxels, density_correlation + volume_overlap, volume_overreach +The same measures are also evluated for the endpoints. + +For the streamline representation, the computed similarity measures are: + bundle_adjacency_streamlines, dice_streamlines, streamlines_count_overlap, + streamlines_count_overreach + +Formerly: scil_evaluate_bundles_pairwise_agreement_measures.py + +positional arguments: + in_bundles Path of the input bundles. + out_json Path of the output json file. + +options: + -h, --help show this help message and exit + --streamline_dice Compute streamline-wise dice coefficient. + Tractograms must be identical [False]. + --bundle_adjency_no_overlap + If set, do not count zeros in the average BA. + --disable_streamline_distance + Will not compute the streamlines distance + [False]. + --single_compare SINGLE_COMPARE + Compare inputs to this single file. + --keep_tmp Will not delete the tmp folder at the end. + --ratio Compute overlap and overreach as a ratio over the + reference tractogram in a Tractometer-style way. + Can only be used if also using the `single_compare` option. + --processes NBR Number of sub-processes to start. + Default: [1] + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_bundle_reject_outliers.py.help b/scripts/.hidden/scil_bundle_reject_outliers.py.help new file mode 100644 index 000000000..d624985ae --- /dev/null +++ b/scripts/.hidden/scil_bundle_reject_outliers.py.help @@ -0,0 +1,35 @@ +usage: scil_bundle_reject_outliers.py [-h] + [--remaining_bundle REMAINING_BUNDLE] + [--alpha ALPHA] [--display_counts] + [--indent INDENT] [--sort_keys] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bundle out_bundle + +Clean a bundle (inliers/outliers) using hiearchical clustering. +http://archive.ismrm.org/2015/2844.html + +If spurious streamlines are dense, it is possible they will not be recognized +as outliers. Manual cleaning may be required to overcome this limitation. + +positional arguments: + in_bundle Fiber bundle file to remove outliers from. + out_bundle Fiber bundle without outliers. + +options: + -h, --help show this help message and exit + --remaining_bundle REMAINING_BUNDLE + Removed outliers. + --alpha ALPHA Percent of the length of the tree that clusters of individual streamlines will be pruned. [0.6] + --display_counts Print streamline count before and after filtering + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_bundle_score_many_bundles_one_tractogram.py.help b/scripts/.hidden/scil_bundle_score_many_bundles_one_tractogram.py.help new file mode 100644 index 000000000..fd342a4e5 --- /dev/null +++ b/scripts/.hidden/scil_bundle_score_many_bundles_one_tractogram.py.help @@ -0,0 +1,110 @@ +usage: scil_bundle_score_many_bundles_one_tractogram.py [-h] [--json_prefix p] + [--gt_dir DIR] + [--indent INDENT] + [--sort_keys] + [--reference REFERENCE] + [--no_bbox_check] + [-v [{DEBUG,INFO,WARNING}]] + [-f] + gt_config bundles_dir + +This script is intended to score all bundles from a single tractogram. Each +valid bundle is compared to its ground truth. +Ex: It was used for the ISMRM 2015 Challenge scoring. + +See also scil_bundle_score_same_bundle_many_segmentations.py to score many +versions of a same bundle, compared to ONE ground truth / gold standard. + +This script is the second part of script scil_score_tractogram, which also +segments the wholebrain tractogram into bundles first. + +Here we suppose that the bundles are already segmented and saved as follows: + main_dir/ + segmented_VB/*_VS.trk. + segmented_IB/*_*_IC.trk (optional) + segmented_WPC/*_wpc.trk (optional) + IS.trk OR NC.trk (if segmented_IB is present) + +Config file +----------- +The config file needs to be a json containing a dict of the ground-truth +bundles as keys. The value for each bundle is itself a dictionnary with: + + - gt_mask: expected result. OL and OR metrics will be computed from this.* + +* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will +be created. If it is a nifti file, it will be considered to be a mask. + +Exemple config file: +{ + "Ground_truth_bundle_0": { + "gt_mask": "PATH/bundle0.nii.gz", + } +} + +Formerly: scil_score_bundles.py + +Tractometry +----------- +Global connectivity metrics: + +- Computed by default: + - VS: valid streamlines, belonging to a bundle (i.e. respecting all the + criteria for that bundle; endpoints, limit_mask, gt_mask.). + - IS: invalid streamlines. All other streamlines. IS = IC + NC. + +- Optional: + - WPC: wrong path connections, streamlines connecting correct ROIs but not + respecting the other criteria for that bundle. Such streamlines always + exist but they are only saved separately if specified in the options. + Else, they are merged back with the IS. + By definition. WPC are only computed if "limits masks" are provided. + - IC: invalid connections, streamlines joining an incorrect combination of + ROIs. Use carefully, quality depends on the quality of your ROIs and no + analysis is done on the shape of the streamlines. + - NC: no connections. Invalid streamlines minus invalid connections. + +- Fidelity metrics: + - OL: Overlap. Percentage of ground truth voxels containing streamline(s) + for a given bundle. + - OR: Overreach. Amount of voxels containing streamline(s) when they + shouldn't, for a given bundle. We compute two versions : + OR_pct_vs = divided by the total number of voxel covered by the bundle. + (percentage of the voxels touched by VS). + Values range between 0 and 100%. Values are not defined when we + recovered no streamline for a bundle, but we set the OR_pct_vs to 0 + in that case. + OR_pct_gt = divided by the total size of the ground truth bundle mask. + Values could be higher than 100%. + - f1 score: which is the same as the Dice score. + +positional arguments: + gt_config .json dict configured as specified above. + bundles_dir Directory containing all bundles. + (Ex: Output directory for scil_score_tractogram). + It is expected to contain a file IS.trk and + files segmented_VB/*_VS.trk, with, possibly, files + segmented_WPC/*_wpc.trk and segmented_IC/ + +options: + -h, --help show this help message and exit + --json_prefix p Prefix of the output json file. Ex: 'study_x_'. + Suffix will be results.json. File will be saved inside bundles_dir. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of + tractograms (ignores the presence of invalid streamlines). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Additions to gt_config: + --gt_dir DIR Root path of the ground truth files listed in the gt_config. + If not set, filenames in the config file are considered + as absolute paths. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_bundle_score_same_bundle_many_segmentations.py.help b/scripts/.hidden/scil_bundle_score_same_bundle_many_segmentations.py.help new file mode 100644 index 000000000..dbee99f10 --- /dev/null +++ b/scripts/.hidden/scil_bundle_score_same_bundle_many_segmentations.py.help @@ -0,0 +1,62 @@ +usage: scil_bundle_score_same_bundle_many_segmentations.py [-h] + [--streamlines_measures GOLD_STANDARD_STREAMLINES TRACTOGRAM] + [--voxels_measures GOLD_STANDARD_MASK TRACKING MASK] + [--processes NBR] + [--reference REFERENCE] + [--indent INDENT] + [--sort_keys] + [-v [{DEBUG,INFO,WARNING}]] + [-f] + in_bundles + [in_bundles ...] + out_json + +This script is intended to score many versions of a same bundle, compared to +ONE ground truth / gold standard. + +See also scil_bundle_score_many_bundles_one_tractogram.py to score all bundles +from a single tractogram by comparing each valid bundle to its ground truth. + +All tractograms must be in the same space (aligned to one reference). +The measures can be applied to a voxel-wise or streamline-wise representation. + +A gold standard must be provided for the desired representation. +A gold standard would be a segmentation from an expert or a group of experts. +If only the streamline-wise representation is provided without a voxel-wise +gold standard, it will be computed from the provided streamlines. +At least one of the two representations is required. + +The gold standard tractogram is the tractogram (whole brain most likely) from +which the segmentation is performed. +The gold standard tracking mask is the tracking mask used by the tractography +algorighm to generate the gold standard tractogram. + +The computed binary classification measures are: +sensitivity, specificity, precision, accuracy, dice, kappa, youden for both +the streamline and voxel representation (if provided). + +Formerly: scil_evaluate_bundles_binary_classification_measures.py + +positional arguments: + in_bundles Path of the input bundles. + out_json Path of the output json. + +options: + -h, --help show this help message and exit + --streamlines_measures GOLD_STANDARD_STREAMLINES TRACTOGRAM + The gold standard bundle and the original tractogram. + --voxels_measures GOLD_STANDARD_MASK TRACKING MASK + The gold standard mask and the original tracking mask. + --processes NBR Number of sub-processes to start. + Default: [1] + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_bundle_shape_measures.py.help b/scripts/.hidden/scil_bundle_shape_measures.py.help new file mode 100644 index 000000000..8ab8dc13b --- /dev/null +++ b/scripts/.hidden/scil_bundle_shape_measures.py.help @@ -0,0 +1,64 @@ +usage: scil_bundle_shape_measures.py [-h] [--out_json OUT_JSON] + [--group_statistics] [--no_uniformize] + [--reference REFERENCE] [--processes NBR] + [--indent INDENT] [--sort_keys] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bundles [in_bundles ...] + +Evaluate basic measurements of bundle(s). + +The computed measures are: + - volume_info: volume, volume_endpoints + - streamlines_info: streamlines_count, avg_length (in mm or in number of + point), average step size, min_length, max_length. + ** You may also get this information with scil_tractogram_print_info.py. + - shape_info: span, curl, diameter, elongation, surface area, + irregularity, end surface area, radius, end surface irregularity, + mean_curvature, fractal dimension. + ** The diameter, here, is a simple estimation using volume / length. + For a more complex calculation, see scil_bundle_diameter.py. + +With more than one bundle, the measures are averaged over bundles. All +tractograms must be in the same space. + +The set average contains the average measures of all input bundles. The +measures that are dependent on the streamline count are weighted by the number +of streamlines of each bundle. Each of these average measure is computed by +first summing the multiple of a measure and the streamline count of each +bundle and divide the sum by the total number of streamlines. Thus, measures +including length and span are essentially averages of all the streamlines. +Other streamline-related set measure are computed with other set averages. +Whereas bundle-related measures are computed as an average of all bundles. +These measures include volume and surface area. + +The fractal dimension is dependent on the voxel size and the number of voxels. +If data comparison is performed, the bundles MUST be in same resolution. + +Formerly: scil_compute_bundle_volume.py or +scil_evaluate_bundles_individual_measures.py + +positional arguments: + in_bundles Path of the input bundles. + +options: + -h, --help show this help message and exit + --out_json OUT_JSON Path of the output file. If not given, the output is simply printed on screen. + --group_statistics Show average measures [False]. + --no_uniformize Do NOT automatically uniformize endpoints for theendpoints related metrics. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + --processes NBR Number of sub-processes to start. + Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. + +References: +[1] Fang-Cheng Yeh. 2020. + Shape analysis of the human association pathways. NeuroImage. diff --git a/scripts/.hidden/scil_bundle_uniformize_endpoints.py.help b/scripts/.hidden/scil_bundle_uniformize_endpoints.py.help new file mode 100644 index 000000000..75da0ffbe --- /dev/null +++ b/scripts/.hidden/scil_bundle_uniformize_endpoints.py.help @@ -0,0 +1,44 @@ +usage: scil_bundle_uniformize_endpoints.py [-h] + (--axis {x,y,z} | --auto | --centroid tractogram | --target_roi TARGET_ROI [TARGET_ROI ...]) + [--swap] [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bundle out_bundle + +Uniformize streamlines' endpoints according to a defined axis. +Useful for tractometry or models creation. + +The --auto option will automatically calculate the main orientation. +If the input bundle is poorly defined, it is possible heuristic will be wrong. + +The default is to flip each streamline so their first point's coordinate in the +defined axis is smaller than their last point (--swap does the opposite). + +The --target_roi option will use the barycenter of the target mask to define +the axis. The target mask can be a binary mask or an atlas. If an atlas is +used, labels are expected in the form of --target_roi atlas.nii.gz 2 3 5:7. + +Formerly: scil_uniformize_streamlines_endpoints.py + +positional arguments: + in_bundle Input path of the tractography file. + out_bundle Output path of the uniformized file. + +options: + -h, --help show this help message and exit + --axis {x,y,z} Match endpoints of the streamlines along this axis. + SUGGESTION: Commissural = x, Association = y, Projection = z + --auto Match endpoints of the streamlines along an automatically determined axis. + --centroid tractogram + Match endpoints of the streamlines to align it to a reference unique streamline (centroid). + --target_roi TARGET_ROI [TARGET_ROI ...] + Provide a target ROI: either a binary mask or a label map and the labels to use. + Will align heads to be closest to the mask barycenter. + (atlas: if no labels are provided, all labels will be used. + --swap Swap head <-> tail convention. Can be useful when the reference is not in RAS. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_bundle_volume_per_label.py.help b/scripts/.hidden/scil_bundle_volume_per_label.py.help new file mode 100644 index 000000000..6d28443d7 --- /dev/null +++ b/scripts/.hidden/scil_bundle_volume_per_label.py.help @@ -0,0 +1,31 @@ +usage: scil_bundle_volume_per_label.py [-h] [--indent INDENT] [--sort_keys] + [-v [{DEBUG,INFO,WARNING}]] [-f] + voxel_label_map bundle_name + +Compute bundle volume per label in mm3. This script supports anisotropic voxels +resolution. Volume is estimated by counting the number of voxel occupied by +each label and multiplying it by the volume of a single voxel. + +The labels can be obtained by scil_bundle_label_map.py. + +This estimation is typically performed at resolution around 1mm3. + +To get the volume and other measures directly from the (whole) bundle, use +scil_bundle_shape_measures.py. + +Formerly: scil_compute_bundle_volume_per_label.py + +positional arguments: + voxel_label_map Fiber bundle file. + bundle_name Bundle name. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_connectivity_compare_populations.py.help b/scripts/.hidden/scil_connectivity_compare_populations.py.help new file mode 100644 index 000000000..b95c60a3a --- /dev/null +++ b/scripts/.hidden/scil_connectivity_compare_populations.py.help @@ -0,0 +1,63 @@ +usage: scil_connectivity_compare_populations.py [-h] --in_g1 IN_G1 [IN_G1 ...] + --in_g2 IN_G2 [IN_G2 ...] + [--tail {left,right,both}] + [--paired] + [--fdr | --bonferroni] + [--p_threshold THRESH OUT_FILE] + [--filtering_mask FILTERING_MASK] + [-v [{DEBUG,INFO,WARNING}]] + [-f] + out_pval_matrix + +Performs a network-based statistical comparison for populations g1 and g2. The +output is a matrix of the same size as the input connectivity matrices, with +p-values at each edge. +All input matrices must have the same shape (NxN). For paired t-test, both +groups must have the same number of observations. + +For example, if you have streamline count weighted matrices for a MCI and a +control group and you want to investiguate differences in their connectomes: + >>> scil_connectivity_compare_populations.py pval.npy + --g1 MCI/*_sc.npy --g2 CTL/*_sc.npy + +--filtering_mask will simply multiply the binary mask to all input +matrices before performing the statistical comparison. Reduces the number of +statistical tests, useful when using --fdr or --bonferroni. + +Formerly: scil_compare_connectivity.py + +positional arguments: + out_pval_matrix Output matrix (.npy) containing the edges p-value. + +options: + -h, --help show this help message and exit + --in_g1 IN_G1 [IN_G1 ...] + List of matrices for the first population (.npy). + --in_g2 IN_G2 [IN_G2 ...] + List of matrices for the second population (.npy). + --tail {left,right,both} + Enables specification of an alternative hypothesis: + left: mean of g1 < mean of g2, + right: mean of g2 < mean of g1, + both: both means are not equal (default). + --paired Use paired sample t-test instead of population t-test. + --in_g1 and --in_g2 must be ordered the same way. + --fdr Perform a false discovery rate (FDR) correction for the p-values. + Uses the number of non-zero edges as number of tests (value between 0.01 and 0.1). + --bonferroni Perform a Bonferroni correction for the p-values. + Uses the number of non-zero edges as number of tests. + --p_threshold THRESH OUT_FILE + Threshold the final p-value matrix and save the binary matrix (.npy). + --filtering_mask FILTERING_MASK + Binary filtering mask (.npy) to apply before computing the measures. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +[1] Rubinov, Mikail, and Olaf Sporns. "Complex network measures of brain + connectivity: uses and interpretations." Neuroimage 52.3 (2010): + 1059-1069. +[2] Zalesky, Andrew, Alex Fornito, and Edward T. Bullmore. "Network-based + statistic: identifying differences in brain networks." Neuroimage 53.4 + (2010): 1197-1207. diff --git a/scripts/.hidden/scil_connectivity_compute_matrices.py.help b/scripts/.hidden/scil_connectivity_compute_matrices.py.help new file mode 100644 index 000000000..3b5c0c9c8 --- /dev/null +++ b/scripts/.hidden/scil_connectivity_compute_matrices.py.help @@ -0,0 +1,91 @@ +usage: scil_connectivity_compute_matrices.py [-h] [--volume OUT_FILE] + [--streamline_count OUT_FILE] + [--length OUT_FILE] + [--similarity IN_FOLDER OUT_FILE] + [--maps IN_FOLDER OUT_FILE] + [--metrics IN_FILE OUT_FILE] + [--lesion_load IN_FILE OUT_DIR] + [--min_lesion_vol MIN_LESION_VOL] + [--density_weighting] + [--no_self_connection] + [--include_dps OUT_DIR] + [--force_labels_list FORCE_LABELS_LIST] + [--processes NBR] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_hdf5 in_labels + +This script computes a variety of measures in the form of connectivity +matrices. This script is made to follow +scil_tractogram_segment_bundles_for_connectivity.py and +uses the same labels list as input. + +The script expects a folder containing all relevants bundles following the +naming convention LABEL1_LABEL2.trk and a text file containing the list of +labels that should be part of the matrices. The ordering of labels in the +matrices will follow the same order as the list. +This script only generates matrices in the form of array, does not visualize +or reorder the labels (node). + +The parameter --similarity expects a folder with density maps +(LABEL1_LABEL2.nii.gz) following the same naming convention as the input +directory. +The bundles should be averaged version in the same space. This will +compute the weighted-dice between each node and their homologuous average +version. + +The parameters --metrics can be used more than once and expect a map (t1, fa, +etc.) in the same space and each will generate a matrix. The average value in +the volume occupied by the bundle will be reported in the matrices nodes. + +The parameters --maps can be used more than once and expect a folder with +pre-computed maps (LABEL1_LABEL2.nii.gz) following the same naming convention +as the input directory. Each will generate a matrix. The average non-zeros +value in the map will be reported in the matrices nodes. + +The parameters --lesion_load will compute 3 lesion(s) related matrices: +lesion_count.npy, lesion_vol.npy, lesion_sc.npy and put it inside of a +specified folder. They represent the number of lesion, the total volume of +lesion(s) and the total of streamlines going through the lesion(s) for of each +connection. Each connection can be seen as a 'bundle' and then something +similar to scil_analyse_lesion_load.py is run for each 'bundle'. + +Formerly: scil_compute_connectivity.py + +positional arguments: + in_hdf5 Input filename for the hdf5 container (.h5). + Obtained from scil_tractogram_segment_bundles_for_connectivity.py. + in_labels Labels file name (nifti). + This generates a NxN connectivity matrix. + +options: + -h, --help show this help message and exit + --volume OUT_FILE Output file for the volume weighted matrix (.npy). + --streamline_count OUT_FILE + Output file for the streamline count weighted matrix (.npy). + --length OUT_FILE Output file for the length weighted matrix (.npy). + --similarity IN_FOLDER OUT_FILE + Input folder containing the averaged bundle density + maps (.nii.gz) and output file for the similarity weighted matrix (.npy). + --maps IN_FOLDER OUT_FILE + Input folder containing pre-computed maps (.nii.gz) + and output file for the weighted matrix (.npy). + --metrics IN_FILE OUT_FILE + Input (.nii.gz). and output file (.npy) for a metric weighted matrix. + --lesion_load IN_FILE OUT_DIR + Input binary mask (.nii.gz) and output directory for all lesion-related matrices. + --min_lesion_vol MIN_LESION_VOL + Minimum lesion volume in mm3 [7]. + --density_weighting Use density-weighting for the metric weightedmatrix. + --no_self_connection Eliminate the diagonal from the matrices. + --include_dps OUT_DIR + Save matrices from data_per_streamline in the output directory. + COMMIT-related values will be summed instead of averaged. + Will always overwrite files. + --force_labels_list FORCE_LABELS_LIST + Path to a labels list (.txt) in case of missing labels in the atlas. + --processes NBR Number of sub-processes to start. + Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_connectivity_compute_pca.py.help b/scripts/.hidden/scil_connectivity_compute_pca.py.help new file mode 100644 index 000000000..297cfbf43 --- /dev/null +++ b/scripts/.hidden/scil_connectivity_compute_pca.py.help @@ -0,0 +1,75 @@ +usage: scil_connectivity_compute_pca.py [-h] --metrics METRICS [METRICS ...] + --list_ids FILE [--not_only_common] + [--input_connectoflow] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_folder out_folder + +Script to compute PCA analysis on diffusion metrics. Output returned is all +significant principal components (e.g. presenting eigenvalues > 1) in a +connectivity matrix format. This script can take into account all edges from +every subject in a population or only non-zero edges across all subjects. + +The script can take directly as input a connectoflow output folder. Simply use +the --input_connectoflow flag. For other type of folder input, the script +expects a single folder containing all matrices for all subjects. +Example: + [in_folder] + |--- sub-01_ad.npy + |--- sub-01_md.npy + |--- sub-02_ad.npy + |--- sub-02_md.npy + |--- ... + +The plots, tables and principal components matrices will be outputted in the +designated folder from the argument. If you want to move back your +principal components matrices in your connectoflow output, you can use a +similar bash command for all principal components: +for sub in `cat list_id.txt`; +do + cp out_folder/${sub}_PC1.npy connectoflow_output/$sub/Compute_Connectivity/ +done + +Interpretation of resulting principal components can be done by evaluating the +loadings values for each metrics. A value near 0 means that this metric doesn't +contribute to this specific component whereas high positive or negative values +mean a larger contribution. Components can then be labeled based on which +metric contributes the highest. For example, a principal component showing a +high loading for afd_fixel and near 0 loading for all other metrics can be +interpreted as axonal density (see Gagnon et al. 2022 for this specific example +or ref [3] for an introduction to PCA). + +EXAMPLE USAGE: +scil_connectivity_compute_pca.py input_folder/ output_folder/ + --metrics ad fa md rd [...] --list_ids list_ids.txt + +positional arguments: + in_folder Path to the input folder. See explanation above for its expected organization. + out_folder Path to the output folder to export graphs, tables and principal + components matrices. + +options: + -h, --help show this help message and exit + --metrics METRICS [METRICS ...] + Suffixes of all metrics to include in PCA analysis (ex: ad md fa rd). + They must be immediately followed by the .npy extension. + --list_ids FILE Path to a .txt file containing a list of all ids. + --not_only_common If true, will include all edges from all subjects and not only + common edges (Not recommended) + --input_connectoflow If true, script will assume the input folder is a Connectoflow output. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +[1] Chamberland M, Raven EP, Genc S, Duffy K, Descoteaux M, Parker GD, Tax CMW, + Jones DK. Dimensionality reduction of diffusion MRI measures for improved + tractometry of the human brain. Neuroimage. 2019 Oct 15;200:89-100. + doi: 10.1016/j.neuroimage.2019.06.020. Epub 2019 Jun 20. PMID: 31228638; + PMCID: PMC6711466. +[2] Gagnon A., Grenier G., Bocti C., Gillet V., Lepage J.-F., Baccarelli A. A., + Posner J., Descoteaux M., Takser L. (2022). White matter microstructural + variability linked to differential attentional skills and impulsive behavior + in a pediatric population. Cerebral Cortex. + https://doi.org/10.1093/cercor/bhac180 +[3] https://towardsdatascience.com/what-are-pca-loadings-and-biplots-9a7897f2e559 + diff --git a/scripts/.hidden/scil_connectivity_filter.py.help b/scripts/.hidden/scil_connectivity_filter.py.help new file mode 100644 index 000000000..3349006e3 --- /dev/null +++ b/scripts/.hidden/scil_connectivity_filter.py.help @@ -0,0 +1,56 @@ +usage: scil_connectivity_filter.py [-h] [--lower_than [LOWER_THAN ...]] + [--greater_than [GREATER_THAN ...]] + [--keep_condition_count] [--inverse_mask] + [-v [{DEBUG,INFO,WARNING}]] [-f] + out_matrix_mask + +Script to facilitate filtering of connectivity matrices. +The same could be achieved through a complex sequence of +scil_connectivity_math.py. + +Can be used with any connectivity matrix from +scil_connectivity_compute_matrices.py. + +For example, a simple filtering (Jasmeen style) would be: +scil_connectivity_filter.py out_mask.npy + --greater_than */sc.npy 1 0.90 + --lower_than */sim.npy 2 0.90 + --greater_than */len.npy 40 0.90 -v; + +This will result in a binary mask where each node with a value of 1 represents +a node with at least 90% of the population having at least 1 streamline, +90% of the population is similar to the average (2mm) and 90% of the +population having at least 40mm of average streamlines length. + +All operation are stricly > or <, there is no >= or <=. + +--greater_than or --lower_than expect the same convention: + MATRICES_LIST VALUE_THR POPULATION_PERC +It is strongly recommended (but not enforced) that the same number of +connectivity matrices is used for each condition. + +This script performs an intersection of all conditions, meaning that all +conditions must be met in order not to be filtered. +If the user wants to manually handle the requirements, --keep_condition_count +can be used and manually binarized using scil_connectivity_math.py + +Formerly: scil_filter_connectivity.py + +positional arguments: + out_matrix_mask Output mask (matrix) resulting from the provided conditions (.npy). + +options: + -h, --help show this help message and exit + --lower_than [LOWER_THAN ...] + Lower than condition using the VALUE_THR in at least POPULATION_PERC (from MATRICES_LIST). + See description for more details. + --greater_than [GREATER_THAN ...] + Greater than condition using the VALUE_THR in at least POPULATION_PERC (from MATRICES_LIST). + See description for more details. + --keep_condition_count + Report the number of condition(s) that pass/fail rather than a binary mask. + --inverse_mask Inverse the final mask. 0 where all conditions are respected and 1 where at least one fail. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_connectivity_graph_measures.py.help b/scripts/.hidden/scil_connectivity_graph_measures.py.help new file mode 100644 index 000000000..10cab03e9 --- /dev/null +++ b/scripts/.hidden/scil_connectivity_graph_measures.py.help @@ -0,0 +1,63 @@ +usage: scil_connectivity_graph_measures.py [-h] + [--filtering_mask FILTERING_MASK] + [--avg_node_wise] [--append_json] + [--small_world] [--indent INDENT] + [--sort_keys] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_conn_matrix in_length_matrix + out_json + +Evaluate graph theory measures from connectivity matrices. +A length weighted and a streamline count weighted matrix are required since +some measures require one or the other. + +This script evaluates the measures one subject at the time. To generate a +population dictionary (similarly to other scil_connectivity_*.py scripts), use +the --append_json option as well as using the same output filename. +>>> for i in hcp/*/; do scil_connectivity_graph_measures.py ${i}/sc_prob.npy + ${i}/len_prob.npy hcp_prob.json --append_json --avg_node_wise; done + +Some measures output one value per node, the default behavior is to list +them all into a list. To obtain only the average use the +--avg_node_wise option. + +The computed connectivity measures are: +centrality, modularity, assortativity, participation, clustering, +nodal_strength, local_efficiency, global_efficiency, density, rich_club, +path_length, edge_count, omega, sigma + +For more details about the measures, please refer to +- https://sites.google.com/site/bctnet/measures +- https://github.com/aestrivex/bctpy/wiki + +This script is under the GNU GPLv3 license, for more detail please refer to +https://www.gnu.org/licenses/gpl-3.0.en.html + +Formerly: scil_evaluate_connectivity_graph_measures.py + +positional arguments: + in_conn_matrix Input connectivity matrix (.npy). + Typically a streamline count weighted matrix. + in_length_matrix Input length weighted matrix (.npy). + out_json Path of the output json. + +options: + -h, --help show this help message and exit + --filtering_mask FILTERING_MASK + Binary filtering mask to apply before computing the measures. + --avg_node_wise Return a single value for node-wise measures. + --append_json If the file already exists, will append to the dictionary. + --small_world Compute measure related to small worldness (omega and sigma). + This option is much slower. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. + +[1] Rubinov, Mikail, and Olaf Sporns. "Complex network measures of brain + connectivity: uses and interpretations." Neuroimage 52.3 (2010): + 1059-1069. diff --git a/scripts/.hidden/scil_connectivity_hdf5_average_density_map.py.help b/scripts/.hidden/scil_connectivity_hdf5_average_density_map.py.help new file mode 100644 index 000000000..71a633c5a --- /dev/null +++ b/scripts/.hidden/scil_connectivity_hdf5_average_density_map.py.help @@ -0,0 +1,36 @@ +usage: scil_connectivity_hdf5_average_density_map.py [-h] [--binary] + [--processes NBR] + [-v [{DEBUG,INFO,WARNING}]] + [-f] + in_hdf5 [in_hdf5 ...] + out_dir + +Compute a density map for each connection from a hdf5 file. +Typically use after scil_tractogram_segment_bundles_for_connectivity.py in +order to obtain the average density map of each connection to allow the use +of --similarity in scil_connectivity_compute_matrices.py. + +This script is parallelized, but will run much slower on non-SSD if too many +processes are used. The output is a directory containing the thousands of +connections: +out_dir/ + |-- LABEL1_LABEL1.nii.gz + |-- LABEL1_LABEL2.nii.gz + |-- [...] + |-- LABEL90_LABEL90.nii.gz + +Formerly: scil_compute_hdf5_average_density_map.py + +positional arguments: + in_hdf5 List of HDF5 filenames (.h5) from scil_tractogram_segment_bundles_for_connectivity.py. + out_dir Path of the output directory. + +options: + -h, --help show this help message and exit + --binary Binarize density maps before the population average. + --processes NBR Number of sub-processes to start. + Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_connectivity_math.py.help b/scripts/.hidden/scil_connectivity_math.py.help new file mode 100644 index 000000000..e4419c2fe --- /dev/null +++ b/scripts/.hidden/scil_connectivity_math.py.help @@ -0,0 +1,150 @@ +usage: scil_connectivity_math.py [-h] [--data_type DATA_TYPE] + [--exclude_background] + [-v [{DEBUG,INFO,WARNING}]] [-f] + {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference} + in_matrices [in_matrices ...] out_matrix + +Performs an operation on a list of matrices. The supported operations are +listed below. + +Some operations such as multiplication or addition accept float value as +parameters instead of matrices. +> scil_connectivity_math.py multiplication mat.npy 10 mult_10.npy + + lower_threshold: MAT THRESHOLD + All values below the threshold will be set to zero. + All values above the threshold will be set to one. + + upper_threshold: MAT THRESHOLD + All values below the threshold will be set to one. + All values above the threshold will be set to zero. + Equivalent to lower_threshold followed by an inversion. + + lower_threshold_eq: MAT THRESHOLD + All values below the threshold will be set to zero. + All values above or equal the threshold will be set to one. + + upper_threshold_eq: MAT THRESHOLD + All values below or equal the threshold will be set to one. + All values above the threshold will be set to zero. + Equivalent to lower_threshold followed by an inversion. + + lower_threshold_otsu: MAT + All values below or equal to the Otsu threshold will be set to zero. + All values above the Otsu threshold will be set to one. + (Otsu's method is an algorithm to perform automatic matrix thresholding + of the background.) + + upper_threshold_otsu: MAT + All values below the Otsu threshold will be set to one. + All values above or equal to the Otsu threshold will be set to zero. + Equivalent to lower_threshold_otsu followed by an inversion. + + lower_clip: MAT THRESHOLD + All values below the threshold will be set to threshold. + + upper_clip: MAT THRESHOLD + All values above the threshold will be set to threshold. + + absolute_value: MAT + All negative values will become positive. + + round: MAT + Round all decimal values to the closest integer. + + ceil: MAT + Ceil all decimal values to the next integer. + + floor: MAT + Floor all decimal values to the previous integer. + + normalize_sum: MAT + Normalize the matrix so the sum of all values is one. + + normalize_max: MAT + Normalize the matrix so the maximum value is one. + + log_10: MAT + Apply a log (base 10) to all non zeros values of an matrix. + + log_e: MAT + Apply a natural log to all non zeros values of an matrix. + + convert: MAT + Perform no operation, but simply change the data type. + + invert: MAT + Operation on binary matrix to interchange 0s and 1s in a binary mask. + + addition: MATs + Add multiple matrices together. + + subtraction: MAT_1 MAT_2 + Subtract first matrix by the second (MAT_1 - MAT_2). + + multiplication: MATs + Multiply multiple matrices together (danger of underflow and overflow) + + division: MAT_1 MAT_2 + Divide first matrix by the second (danger of underflow and overflow) + Ignore zeros values, excluded from the operation. + + mean: MATs + Compute the mean of matrices. + If a single 4D matrix is provided, average along the last dimension. + + std: MATs + Compute the standard deviation average of multiple matrices. + If a single 4D matrix is provided, compute the STD along the last + dimension. + + correlation: MATs + Computes the correlation of the 3x3x3 neighborhood of each voxel, for + all pair of input matrices. The final matrix is the average correlation + (through all pairs). + For a given pair of matrices + - Background is considered as 0. May lead to very high correlations + close to the border of the background regions, or very poor ones if the + background in both matrices differ. + - Images are zero-padded. For the same reason as higher, may lead to + very high correlations if you have data close to the border of the + matrix. + - NaN values (if a voxel's neighborhood is entirely uniform; std 0) are + replaced by + - 0 if at least one neighborhood was entirely containing background. + - 1 if the voxel's neighborhoods are uniform in both matrices + - 0 if the voxel's neighborhoods is uniform in one matrix, but not + the other. + + UPDATE AS OF VERSION 2.0: Random noise was previously added in the + process to help avoid NaN values. Now replaced by either 0 or 1 as + explained above. + + union: MATs + Operation on binary matrix to keep voxels, that are non-zero, in at + least one file. + + intersection: MATs + Operation on binary matrix to keep the voxels, that are non-zero, + are present in all files. + + difference: MAT_1 MAT_2 + Operation on binary matrix to keep voxels from the first file that are + not in the second file (non-zeros). + + +positional arguments: + {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference} + The type of operation to be performed on the matrices. + in_matrices The list of matrices files or parameters. + out_matrix Output matrix path. + +options: + -h, --help show this help message and exit + --data_type DATA_TYPE + Data type of the output image. Use the format: uint8, float16, int32. + --exclude_background Does not affect the background of the original matrices. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_connectivity_normalize.py.help b/scripts/.hidden/scil_connectivity_normalize.py.help new file mode 100644 index 000000000..9731dcb81 --- /dev/null +++ b/scripts/.hidden/scil_connectivity_normalize.py.help @@ -0,0 +1,76 @@ +usage: scil_connectivity_normalize.py [-h] + [--length LENGTH_MATRIX | --inverse_length LENGTH_MATRIX] + [--bundle_volume VOLUME_MATRIX] + [--parcel_volume ATLAS LABELS_LIST | --parcel_surface ATLAS LABELS_LIST] + [--max_at_one | --sum_to_one | --log_10] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_matrix out_matrix + +Normalize a connectivity matrix coming from +scil_tractogram_segment_bundles_for_connectivity.py. +3 categories of normalization are available: +-- Edge attributes + - length: Multiply each edge by the average bundle length. + Compensate for far away connections when using interface seeding. + Cannot be used with inverse_length. + + - inverse_length: Divide each edge by the average bundle length. + Compensate for big connections when using white matter seeding. + Cannot be used with length. + + - bundle_volume: Divide each edge by the average bundle length. + Compensate for big connections when using white matter seeding. + +-- Node attributes (Mutually exclusive) + - parcel_volume: Divide each edge by the sum of node volume. + Compensate for the likelihood of ending in the node. + Compensate seeding bias when using interface seeding. + + - parcel_surface: Divide each edge by the sum of the node surface. + Compensate for the likelihood of ending in the node. + Compensate for seeding bias when using interface seeding. + +-- Matrix scaling (Mutually exclusive) + - max_at_one: Maximum value of the matrix will be set to one. + - sum_to_one: Ensure the sum of all edges weight is one + - log_10: Apply a base 10 logarithm to all edges weight + +The volume and length matrix should come from the +scil_tractogram_segment_bundles_for_connectivity.py script. + +A review of the type of normalization is available in: +Colon-Perez, Luis M., et al. "Dimensionless, scale-invariant, edge weight +metric for the study of complex structural networks." PLOS one 10.7 (2015). + +However, the proposed weighting of edge presented in this publication is not +implemented. + +Formerly: scil_normalize_connectivity.py + +positional arguments: + in_matrix Input connectivity matrix. This is typically a streamline_count matrix (.npy). + out_matrix Output normalized matrix (.npy). + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Edge-wise options: + --length LENGTH_MATRIX + Length matrix used for edge-wise multiplication. + --inverse_length LENGTH_MATRIX + Length matrix used for edge-wise division. + --bundle_volume VOLUME_MATRIX + Volume matrix used for edge-wise division. + --parcel_volume ATLAS LABELS_LIST + Atlas and labels list for edge-wise division. + --parcel_surface ATLAS LABELS_LIST + Atlas and labels list for edge-wise division. + +Scaling options: + --max_at_one Scale matrix with maximum value at one. + --sum_to_one Scale matrix with sum of all elements at one. + --log_10 Apply a base 10 logarithm to the matrix. diff --git a/scripts/.hidden/scil_connectivity_pairwise_agreement.py.help b/scripts/.hidden/scil_connectivity_pairwise_agreement.py.help new file mode 100644 index 000000000..182cf6a11 --- /dev/null +++ b/scripts/.hidden/scil_connectivity_pairwise_agreement.py.help @@ -0,0 +1,33 @@ +usage: scil_connectivity_pairwise_agreement.py [-h] [--single_compare matrix] + [--normalize] [--indent INDENT] + [--sort_keys] + [-v [{DEBUG,INFO,WARNING}]] + [-f] + in_matrices [in_matrices ...] + out_json + +Evaluate pair-wise similarity measures of connectivity matrix. + +The computed similarity measures are: +sum of square difference and pearson correlation coefficent + +Formerly: scil_evaluate_connectivity_pairwaise_agreement_measures.py + +positional arguments: + in_matrices Path of the input matricies. + out_json Path of the output json file. + +options: + -h, --help show this help message and exit + --single_compare matrix + Compare inputs to this single file. + (Else, compute all pairs in in_matrices). + --normalize If set, will normalize all matrices from zero to one. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_connectivity_print_filenames.py.help b/scripts/.hidden/scil_connectivity_print_filenames.py.help new file mode 100644 index 000000000..575fa9fec --- /dev/null +++ b/scripts/.hidden/scil_connectivity_print_filenames.py.help @@ -0,0 +1,32 @@ +usage: scil_connectivity_print_filenames.py [-h] [-v [{DEBUG,INFO,WARNING}]] + [-f] + in_matrix labels_list out_txt + +Output the list of filenames using the coordinates from a binary connectivity +matrix. Typically used to move around files that are considered valid after +the scil_connectivity_filter.py script. + +Example: +# Keep connections with more than 1000 streamlines for 100% of a population +scil_connectivity_filter.py filtering_mask.npy + --greater_than */streamlines_count.npy 1000 1.0 +scil_connectivity_print_filenames.py filtering_mask.npy + labels_list.txt pass.txt +for file in $(cat pass.txt); + do mv ${SOMEWHERE}/${FILE} ${SOMEWHERE_ELSE}/; +done + +Formerly: scil_print_connectivity_filenames.py + +positional arguments: + in_matrix Binary matrix in numpy (.npy) format. + Typically from scil_connectivity_filter.py + labels_list List saved by the decomposition script. + out_txt Output text file containing all filenames. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_connectivity_reorder_rois.py.help b/scripts/.hidden/scil_connectivity_reorder_rois.py.help new file mode 100644 index 000000000..ad23d8da1 --- /dev/null +++ b/scripts/.hidden/scil_connectivity_reorder_rois.py.help @@ -0,0 +1,51 @@ +usage: scil_connectivity_reorder_rois.py [-h] + (--in_ordering IN_ORDERING | --optimal_leaf_ordering OUT_FILE) + [--out_suffix OUT_SUFFIX] + [--out_dir OUT_DIR] + [--labels_list LABELS_LIST] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_matrices [in_matrices ...] + +Re-order one or many connectivity matrices using a text file format. +The first row are the (x) and the second row the (y), must be space separated. +The resulting matrix does not have to be square (support unequal number of +x and y). + +The values refer to the coordinates (starting at 0) in the matrix, but if the +--labels_list parameter is used, the values will refer to the label which will +be converted to the appropriate coordinates. This file must be the same as the +one provided to the scil_tractogram_segment_bundles_for_connectivity.py. + +To subsequently use scil_visualize_connectivity.py with a lookup table, you +must use a label-based reording json and use --labels_list. + +You can also use the Optimal Leaf Ordering(OLO) algorithm to transform a +sparse matrix into an ordering that reduces the matrix bandwidth. The output +file can then be re-used with --in_ordering. Only one input can be used with +this option, we recommand an average streamline count or volume matrix. + +Formerly: scil_reorder_connectivity.py + +positional arguments: + in_matrices Connectivity matrices in .npy or .txt format. + +options: + -h, --help show this help message and exit + --in_ordering IN_ORDERING + Txt file with the first row as x and second as y. + --optimal_leaf_ordering OUT_FILE + Output a text file with an ordering that alignsstructures along the diagonal. + --out_suffix OUT_SUFFIX + Suffix for the output matrix filename. + --out_dir OUT_DIR Output directory for the re-ordered matrices. + --labels_list LABELS_LIST + List saved by the decomposition script, + --in_ordering must contain labels rather than coordinates (.txt). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +[1] Rubinov, Mikail, and Olaf Sporns. "Complex network measures of brain + connectivity: uses and interpretations." Neuroimage 52.3 (2010): + 1059-1069. diff --git a/scripts/.hidden/scil_denoising_nlmeans.py.help b/scripts/.hidden/scil_denoising_nlmeans.py.help new file mode 100644 index 000000000..69680f495 --- /dev/null +++ b/scripts/.hidden/scil_denoising_nlmeans.py.help @@ -0,0 +1,28 @@ +usage: scil_denoising_nlmeans.py [-h] [--mask] [--sigma float] [--log LOGFILE] + [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] + [-f] + in_image out_image number_coils + +Script to denoise a dataset with the Non Local Means algorithm. + +Formerly: scil_run_nlmeans.py + +positional arguments: + in_image Path of the image file to denoise. + out_image Path to save the denoised image file. + number_coils Number of receiver coils of the scanner. + Use number_coils=1 in the case of a SENSE (GE, Philips) reconstruction and + number_coils >= 1 for GRAPPA reconstruction (Siemens). number_coils=4 works well for the 1.5T + in Sherbrooke. Use number_coils=0 if the noise is considered Gaussian distributed. + +options: + -h, --help show this help message and exit + --mask Path to a binary mask. Only the data inside the mask will be used for computations + --sigma float The standard deviation of the noise to use instead of computing it automatically. + --log LOGFILE If supplied, name of the text file to store the logs. + --processes NBR Number of sub-processes to start. + Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dki_metrics.py.help b/scripts/.hidden/scil_dki_metrics.py.help new file mode 100644 index 000000000..e831c70ce --- /dev/null +++ b/scripts/.hidden/scil_dki_metrics.py.help @@ -0,0 +1,105 @@ +usage: scil_dki_metrics.py [-h] [--mask MASK] [--tolerance tol] + [--skip_b0_check] [--min_k MIN_K] [--max_k MAX_K] + [--smooth SMOOTH] [--not_all] [--ak file] + [--mk file] [--rk file] [--msk file] + [--dki_fa file] [--dki_md file] [--dki_ad file] + [--dki_rd file] [--dki_residual file] [--msd file] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bval in_bvec + +Script to compute the Diffusion Kurtosis Imaging (DKI) and Mean Signal DKI +(MSDKI) metrics. DKI is a multi-shell diffusion model. The input DWI needs +to be multi-shell, i.e. multi-bvalued. + +Since the diffusion kurtosis model involves the estimation of a large number +of parameters and since the non-Gaussian components of the diffusion signal +are more sensitive to artefacts, you should really denoise your DWI volume +before using this DKI script (e.g. scil_denoising_nlmeans.py). Moreover, to +remove biases due to fiber dispersion, fiber crossings and other mesoscopic +properties of the underlying tissue, MSDKI does a powder-average of DWI for all +directions, thus removing the orientational dependencies and creating an +alternative mean kurtosis map. + +DKI is also known to be vulnerable to artefacted voxels induced by the +low radial diffusivities of aligned white matter (CC, CST voxels). Since it is +very hard to capture non-Gaussian information due to the low decays in radial +direction, its kurtosis estimates have very low robustness. +Noisy kurtosis estimates tend to be negative and its absolute values can have +order of magnitudes higher than the typical kurtosis values. Consequently, +these negative kurtosis values will heavily propagate to the mean and radial +kurtosis metrics. This is well-reported in [Rafael Henriques MSc thesis 2012, +chapter 3]. Two ways to overcome this issue: i) compute the kurtosis values +from powder-averaged MSDKI, and ii) perform 3D Gaussian smoothing. On +powder-averaged signal decays, you don't have this low diffusivity issue and +your kurtosis estimates have much higher precision (additionally they are +independent to the fODF). + +By default, will output all available metrics, using default names. Specific +names can be specified using the metrics flags that are listed in the "Metrics +files flags" section. If --not_all is set, only the metrics specified +explicitly by the flags will be output. + +This script directly comes from the DIPY example gallery and references +therein. +[1] examples_built/reconst_dki/#example-reconst-dki +[2] examples_built/reconst_msdki/#example-reconst-msdki + +Formerly: scil_compute_kurtosis_metrics.py + +positional arguments: + in_dwi Path of the input multi-shell DWI dataset. + in_bval Path of the b-value file, in FSL format. + in_bvec Path of the b-vector file, in FSL format. + +options: + -h, --help show this help message and exit + --mask MASK Path to a binary mask. + Only data inside the mask will be used for computations and reconstruction. + [Default: None] + --tolerance tol The tolerated gap between the b-values to extract and the current b-value. + [Default: 20] + * Note. We would expect to find at least one b-value in the + range [0, tolerance]. To skip this check, use --skip_b0_check. + --skip_b0_check By default, we supervise that at least one b0 exists in your data + (i.e. b-values below the default --tolerance). Use this option to + allow continuing even if the minimum b-value is suspiciously high. + Use with care, and only if you understand your data. + --min_k MIN_K Minimum kurtosis value in the output maps + (ak, mk, rk). In theory, -3/7 is the min kurtosis + limit for regions that consist of water confined + to spherical pores (see DIPY example and + documentation) [Default: 0.0]. + --max_k MAX_K Maximum kurtosis value in the output maps + (ak, mk, rk). In theory, 10 is the max kurtosis + limit for regions that consist of water confined + to spherical pores (see DIPY example and + documentation) [Default: 3.0]. + --smooth SMOOTH Smooth input DWI with a 3D Gaussian filter with + full-width-half-max (fwhm). Kurtosis fitting is + sensitive and outliers occur easily. According to + tests on HCP, CB_Brain, Penthera3T, this smoothing + is thus turned ON by default with fwhm=2.5. + [Default: 2.5]. + --not_all If set, will only save the metrics explicitly + specified using the other metrics flags. + [Default: not set]. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Metrics files flags: + --ak file Output filename for the axial kurtosis. + --mk file Output filename for the mean kurtosis. + --rk file Output filename for the radial kurtosis. + --msk file Output filename for the mean signal kurtosis. + --dki_fa file Output filename for the fractional anisotropy from DKI. + --dki_md file Output filename for the mean diffusivity from DKI. + --dki_ad file Output filename for the axial diffusivity from DKI. + --dki_rd file Output filename for the radial diffusivity from DKI. + +Quality control files flags: + --dki_residual file Output filename for the map of the residual of the tensor fit. + Note. In previous versions, the resulting map was normalized. + It is not anymore. + --msd file Output filename for the mean signal diffusion (powder-average). diff --git a/scripts/.hidden/scil_dti_convert_tensors.py.help b/scripts/.hidden/scil_dti_convert_tensors.py.help new file mode 100644 index 000000000..fff01c191 --- /dev/null +++ b/scripts/.hidden/scil_dti_convert_tensors.py.help @@ -0,0 +1,37 @@ +usage: scil_dti_convert_tensors.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] + in_file out_file in_format out_format + +Conversion of tensors (the 6 values from the triangular matrix) between various +software standards. We cannot discover the input format type, user must know +how the tensors were created. + + Dipy's order is [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz] + Shape: [i, j , k, 6]. + Ref: https://github.com/dipy/dipy/blob/master/dipy/reconst/dti.py#L1639 + + MRTRIX's order is : [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz] + Shape: [i, j , k, 6]. + Ref: https://mrtrix.readthedocs.io/en/dev/reference/commands/dwi2tensor.html + + ANTS's order ('nifti format') is : [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]. + Shape: [i, j , k, 1, 6] (Careful, file is 5D). + Ref: https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software + + FSL's order is [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz] + Shape: [i, j , k, 6]. + Ref: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide + (Also used for the Fibernavigator) + + +positional arguments: + in_file Input tensors filename. + out_file Output tensors filename. + in_format Input format. Choices: ['fsl', 'nifti', 'mrtrix', 'dipy'] + out_format Output format. Choices: ['fsl', 'nifti', 'mrtrix', 'dipy'] + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dti_metrics.py.help b/scripts/.hidden/scil_dti_metrics.py.help new file mode 100644 index 000000000..098e0891d --- /dev/null +++ b/scripts/.hidden/scil_dti_metrics.py.help @@ -0,0 +1,101 @@ +usage: scil_dti_metrics.py [-h] [-f] [--mask MASK] [--method method_name] + [--not_all] [--ad file] [--evecs file] + [--evals file] [--fa file] [--ga file] [--md file] + [--mode file] [--norm file] [--rgb file] + [--rd file] [--tensor file] + [--tensor_format {fsl,nifti,mrtrix,dipy}] + [--non-physical file] [--pulsation string] + [--residual file] [--b0_threshold thr] + [--skip_b0_check] [-v [{DEBUG,INFO,WARNING}]] + in_dwi in_bval in_bvec + +Script to compute all of the Diffusion Tensor Imaging (DTI) metrics. + +By default, will output all available metrics, using default names. Specific +names can be specified using the metrics flags that are listed in the "Metrics +files flags" section. + +If --not_all is set, only the metrics specified explicitly by the flags +will be output. The available metrics are: + +fractional anisotropy (FA), geodesic anisotropy (GA), axial diffusivisty (AD), +radial diffusivity (RD), mean diffusivity (MD), mode, red-green-blue colored +FA (rgb), principal tensor e-vector and tensor coefficients (dxx, dxy, dxz, +dyy, dyz, dzz). + +For all the quality control metrics such as residual, physically implausible +signals, pulsation and misalignment artifacts, see +[J-D Tournier, S. Mori, A. Leemans. Diffusion Tensor Imaging and Beyond. +MRM 2011]. + +Formerly: scil_compute_dti_metrics.py + +positional arguments: + in_dwi Path of the input diffusion volume. + in_bval Path of the bvals file, in FSL format. + in_bvec Path of the bvecs file, in FSL format. + +options: + -h, --help show this help message and exit + -f Force overwriting of the output files. + --mask MASK Path to a binary mask. + Only data inside the mask will be used for computations and reconstruction. (Default: None) + --method method_name Tensor fit method. + WLS for weighted least squares + LS for ordinary least squares + NLLS for non-linear least-squares + restore for RESTORE robust tensor fitting. (Default: WLS) + --not_all If set, will only save the metrics explicitly specified using the other metrics flags. (Default: not set). + --b0_threshold thr Threshold under which b-values are considered to be b0s. + [Default: 20] + * Note. We would expect to find at least one b-value in the + range [0, b0_threshold]. To skip this check, use --skip_b0_check. + --skip_b0_check By default, we supervise that at least one b0 exists in your data + (i.e. b-values below the default --b0_threshold). Use this option to + allow continuing even if the minimum b-value is suspiciously high. + If no b-value is found below the threshold, the script will continue + with your minimal b-value as new --b0_threshold. + Use with care, and only if you understand your data. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + +Metrics files flags: + --ad file Output filename for the axial diffusivity. + --evecs file Output filename for the eigenvectors of the tensor. + --evals file Output filename for the eigenvalues of the tensor. + --fa file Output filename for the fractional anisotropy. + --ga file Output filename for the geodesic anisotropy. + --md file Output filename for the mean diffusivity. + --mode file Output filename for the mode. + --norm file Output filename for the tensor norm. + --rgb file Output filename for the colored fractional anisotropy. + --rd file Output filename for the radial diffusivity. + --tensor file Output filename for the tensor coefficients. + --tensor_format {fsl,nifti,mrtrix,dipy} + Format used for the tensors saved in --tensor file.(default: fsl) + + Dipy's order is [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz] + Shape: [i, j , k, 6]. + Ref: https://github.com/dipy/dipy/blob/master/dipy/reconst/dti.py#L1639 + + MRTRIX's order is : [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz] + Shape: [i, j , k, 6]. + Ref: https://mrtrix.readthedocs.io/en/dev/reference/commands/dwi2tensor.html + + ANTS's order ('nifti format') is : [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]. + Shape: [i, j , k, 1, 6] (Careful, file is 5D). + Ref: https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software + + FSL's order is [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz] + Shape: [i, j , k, 6]. + Ref: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide + (Also used for the Fibernavigator) + + +Quality control files flags: + --non-physical file Output filename for the voxels with physically implausible signals + where the mean of b=0 images is below one or more diffusion-weighted images. + --pulsation string Standard deviation map across all diffusion-weighted images and across b=0 images if more than one is available. + Shows pulsation and misalignment artifacts. + --residual file Output filename for the map of the residual of the tensor fit. diff --git a/scripts/.hidden/scil_dwi_apply_bias_field.py.help b/scripts/.hidden/scil_dwi_apply_bias_field.py.help new file mode 100644 index 000000000..777f5a146 --- /dev/null +++ b/scripts/.hidden/scil_dwi_apply_bias_field.py.help @@ -0,0 +1,24 @@ +usage: scil_dwi_apply_bias_field.py [-h] [--mask MASK] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bias_field out_name + +Apply bias field correction to DWI. This script doesn't compute the bias +field itself. It ONLY applies an existing bias field. Please use the ANTs +N4BiasFieldCorrection executable to compute the bias field. + +Formerly: scil_apply_bias_field_on_dwi.py + +positional arguments: + in_dwi DWI Nifti image. + in_bias_field Bias field Nifti image. + out_name Corrected DWI Nifti image. + +options: + -h, --help show this help message and exit + --mask MASK Apply bias field correction only in the region defined by the mask. + If this is not given, the bias field is still only applied only in non-background data + (i.e. where the dwi is not 0). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_compute_snr.py.help b/scripts/.hidden/scil_dwi_compute_snr.py.help new file mode 100644 index 000000000..be4e1a6fb --- /dev/null +++ b/scripts/.hidden/scil_dwi_compute_snr.py.help @@ -0,0 +1,59 @@ +usage: scil_dwi_compute_snr.py [-h] + [--noise_mask NOISE_MASK | --noise_map NOISE_MAP] + [--b0_thr B0_THR] [--out_basename OUT_BASENAME] + [--split_shells] [--indent INDENT] + [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bval in_bvec in_mask + +Script to compute signal to noise ratio (SNR) in a region of interest (ROI) +of a DWI volume. + +It will compute the SNR for all DWI volumes of the input image seperately. +The output will contain the SNR which is the ratio of +mean(signal) / std(noise). +The mean of the signal is computed inside the mask. +The standard deviation of the noise is estimated inside the noise_mask +or inside the same mask if a noise_map is provided. +If it's not supplied, it will be estimated using the data outside the brain, +computed with Dipy medotsu + +If verbose is True, the SNR for every DWI volume will be output. + +This works best in a well-defined ROI such as the corpus callosum. +It is heavily dependent on the ROI and its quality. + +We highly recommend using a noise_map if you can acquire one. +See refs [1, 2] that describe the noise map acquisition. +[1] St-Jean, et al (2016). Non Local Spatial and Angular Matching... + https://doi.org/10.1016/j.media.2016.02.010 +[2] Reymbaut, et al (2021). Magic DIAMOND... + https://doi.org/10.1016/j.media.2021.101988 + +Formerly: scil_snr_in_roi.py + +positional arguments: + in_dwi Path of the input diffusion volume. + in_bval Path of the bvals file, in FSL format. + in_bvec Path of the bvecs file, in FSL format. + in_mask Binary mask of the region used to estimate SNR. + +options: + -h, --help show this help message and exit + --b0_thr B0_THR All b-values with values less than or equal to b0_thr are considered as b0s i.e. without diffusion weighting. [0.0] + --out_basename OUT_BASENAME + Path and prefix for the various saved file. + --split_shells SNR will be split into shells. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Masks options: + --noise_mask NOISE_MASK + Binary mask used to estimate the noise from the DWI. + --noise_map NOISE_MAP + Noise map. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_dwi_concatenate.py.help b/scripts/.hidden/scil_dwi_concatenate.py.help new file mode 100644 index 000000000..a63cde9d5 --- /dev/null +++ b/scripts/.hidden/scil_dwi_concatenate.py.help @@ -0,0 +1,31 @@ +usage: scil_dwi_concatenate.py [-h] [--in_dwis IN_DWIS [IN_DWIS ...]] + [--in_bvals IN_BVALS [IN_BVALS ...]] + [--in_bvecs IN_BVECS [IN_BVECS ...]] + [--data_type DATA_TYPE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + out_dwi out_bval out_bvec + +Concatenate DWI, bval and bvecs together. File must be specified in matching +order. Default data type will be the same as the first input DWI. + +Formerly: scil_concatenate_dwi.py + +positional arguments: + out_dwi The name of the output DWI file. + out_bval The name of the output b-values file (.bval). + out_bvec The name of the output b-vectors file (.bvec). + +options: + -h, --help show this help message and exit + --in_dwis IN_DWIS [IN_DWIS ...] + The DWI file (.nii) to concatenate. + --in_bvals IN_BVALS [IN_BVALS ...] + The b-values files in FSL format (.bval). + --in_bvecs IN_BVECS [IN_BVECS ...] + The b-vectors files in FSL format (.bvec). + --data_type DATA_TYPE + Data type of the output image. Use the format: uint8, int16, int/float32, int/float64. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_convert_FDF.py.help b/scripts/.hidden/scil_dwi_convert_FDF.py.help new file mode 100644 index 000000000..19e401845 --- /dev/null +++ b/scripts/.hidden/scil_dwi_convert_FDF.py.help @@ -0,0 +1,31 @@ +usage: scil_dwi_convert_FDF.py [-h] [--bval BVAL] [--bvec BVEC] + [--flip dimension [dimension ...]] + [--swap dimension [dimension ...]] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_b0_path in_dwi_path out_path + +Converts a Varian FDF file or directory to a nifti file. +If the procpar contains diffusion information, it will be saved as bval and +bvec in the same folder as the output file. + +ex: scil_dwi_convert_FDF.py semsdw/b0_folder/ semsdw/dwi_folder/ dwi.nii.gz --bval dwi.bval --bvec dwi.bvec -f + +Formerly: scil_convert_fdf.py + +positional arguments: + in_b0_path Path to the b0 FDF file or folder to convert. + in_dwi_path Path to the DWI FDF file or folder to convert. + out_path Path to the nifti file to write on disk. + +options: + -h, --help show this help message and exit + --bval BVAL Path to the bval file to write on disk. + --bvec BVEC Path to the bvec file to write on disk. + --flip dimension [dimension ...] + The axes you want to flip. eg: to flip the x and y axes use: x y. [None] + --swap dimension [dimension ...] + The axes you want to swap. eg: to swap the x and y axes use: x y. [None] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_detect_volume_outliers.py.help b/scripts/.hidden/scil_dwi_detect_volume_outliers.py.help new file mode 100644 index 000000000..8e55b96e5 --- /dev/null +++ b/scripts/.hidden/scil_dwi_detect_volume_outliers.py.help @@ -0,0 +1,39 @@ +usage: scil_dwi_detect_volume_outliers.py [-h] [--std_scale STD_SCALE] + [--b0_threshold thr] + [--skip_b0_check] + [-v [{DEBUG,INFO,WARNING}]] + in_dwi in_bval in_bvec + +This script simply finds the 3 closest angular neighbors of each direction +(per shell) and compute the voxel-wise correlation. +If the angles or correlations to neighbors are below the shell average (by +args.std_scale x STD) it will flag the volume as a potential outlier. + +This script supports multi-shells, but each shell is independant and detected +using the --b0_threshold parameter. + +This script can be run before any processing to identify potential problem +before launching pre-processing. + +positional arguments: + in_dwi The DWI file (.nii) to concatenate. + in_bval The b-values files in FSL format (.bval). + in_bvec The b-vectors files in FSL format (.bvec). + +options: + -h, --help show this help message and exit + --std_scale STD_SCALE + How many deviation from the mean are required to be considered an outlier. [2.0] + --b0_threshold thr Threshold under which b-values are considered to be b0s. + [Default: 20] + * Note. We would expect to find at least one b-value in the + range [0, b0_threshold]. To skip this check, use --skip_b0_check. + --skip_b0_check By default, we supervise that at least one b0 exists in your data + (i.e. b-values below the default --b0_threshold). Use this option to + allow continuing even if the minimum b-value is suspiciously high. + If no b-value is found below the threshold, the script will continue + with your minimal b-value as new --b0_threshold. + Use with care, and only if you understand your data. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_dwi_extract_b0.py.help b/scripts/.hidden/scil_dwi_extract_b0.py.help new file mode 100644 index 000000000..35b655d14 --- /dev/null +++ b/scripts/.hidden/scil_dwi_extract_b0.py.help @@ -0,0 +1,46 @@ +usage: scil_dwi_extract_b0.py [-h] + [--all | --mean | --cluster-mean | --cluster-first] + [--block-size INT] [--single-image] + [--b0_threshold thr] [--skip_b0_check] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bval in_bvec out_b0 + +Extract B0s from DWI, based on the bval and bvec information. + +The default behavior is to save the first b0 of the series. + +Formerly: scil_extract_b0.py + +positional arguments: + in_dwi DWI Nifti image. + in_bval b-values filename, in FSL format (.bval). + in_bvec b-values filename, in FSL format (.bvec). + out_b0 Output b0 file(s). + +options: + -h, --help show this help message and exit + --block-size INT, -s INT + Load the data using this block size. Useful + when the data is too large to be loaded in memory. + --single-image If output b0 volume has multiple time points, only outputs a single + image instead of a numbered series of images. + --b0_threshold thr Threshold under which b-values are considered to be b0s. + [Default: 20] + * Note. We would expect to find at least one b-value in the + range [0, b0_threshold]. To skip this check, use --skip_b0_check. + --skip_b0_check By default, we supervise that at least one b0 exists in your data + (i.e. b-values below the default --b0_threshold). Use this option to + allow continuing even if the minimum b-value is suspiciously high. + If no b-value is found below the threshold, the script will continue + with your minimal b-value as new --b0_threshold. + Use with care, and only if you understand your data. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Options in the case of multiple b0s.: + --all Extract all b0s. Index number will be appended to the output file. + --mean Extract mean b0. + --cluster-mean Extract mean of each continuous cluster of b0s. + --cluster-first Extract first b0 of each continuous cluster of b0s. diff --git a/scripts/.hidden/scil_dwi_extract_shell.py.help b/scripts/.hidden/scil_dwi_extract_shell.py.help new file mode 100644 index 000000000..25edd5c89 --- /dev/null +++ b/scripts/.hidden/scil_dwi_extract_shell.py.help @@ -0,0 +1,45 @@ +usage: scil_dwi_extract_shell.py [-h] [--out_indices OUT_INDICES] + [--block-size INT] [--tolerance INT] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bval in_bvec in_bvals_to_extract + [in_bvals_to_extract ...] out_dwi out_bval + out_bvec + +Extracts the DWI volumes that are on specific b-value shells. Many shells +can be extracted at once by specifying multiple b-values. The extracted +volumes are in the same order as in the original file. + +If the b-values of a shell are not all identical, use the --tolerance +argument to adjust the accepted interval. For example, a b-value of 2000 +and a tolerance of 20 will extract all volumes with a b-values from 1980 to +2020. + +Files that are too large to be loaded in memory can still be processed by +setting the --block-size argument. A block size of X means that X DWI volumes +are loaded at a time for processing. + +Formerly: scil_extract_dwi_shell.py + +positional arguments: + in_dwi The DW image file to split. + in_bval The b-values file in FSL format (.bval). + in_bvec The b-vectors file in FSL format (.bvec). + in_bvals_to_extract The list of b-values to extract. For example 0 2000. + out_dwi The name of the output DWI file. + out_bval The name of the output b-value file (.bval). + out_bvec The name of the output b-vector file (.bvec). + +options: + -h, --help show this help message and exit + --out_indices OUT_INDICES + Optional filename for valid indices in input dwi volume + --block-size INT, -s INT + Loads the data using this block size. Useful + when the data is too large to be loaded in memory. + --tolerance INT, -t INT + The tolerated gap between the b-values to extract + and the actual b-values. [20] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_powder_average.py.help b/scripts/.hidden/scil_dwi_powder_average.py.help new file mode 100644 index 000000000..213159c11 --- /dev/null +++ b/scripts/.hidden/scil_dwi_powder_average.py.help @@ -0,0 +1,40 @@ +usage: scil_dwi_powder_average.py [-h] [-f] [--mask file] [--b0_thr B0_THR] + [--shells SHELLS [SHELLS ...]] + [--shell_thr SHELL_THR] + [-v [{DEBUG,INFO,WARNING}]] + in_dwi in_bval out_avg + +Script to compute powder average (mean diffusion weighted image) from set of +diffusion images. + +By default will output an average image calculated from all images with +non-zero bvalue. + +Specify --bvalue to output an image for a single shell + +Script currently does not take into account the diffusion gradient directions +being averaged. + +Formerly: scil_compute_powder_average.py + +positional arguments: + in_dwi Path of the input diffusion volume. + in_bval Path of the bvals file, in FSL format. + out_avg Path of the output file. + +options: + -h, --help show this help message and exit + -f Force overwriting of the output files. + --mask file Path to a binary mask. + Only data inside the mask will be used for powder avg. (Default: None) + --b0_thr B0_THR Exclude b0 volumes from powder average with bvalue less than specified threshold. + (Default: remove volumes with bvalue < 50 + --shells SHELLS [SHELLS ...] + bvalue (shells) to include in powder average passed as a list + (e.g. --shells 1000 2000). If not specified will include all volumes with a non-zero bvalue. + --shell_thr SHELL_THR + Include volumes with bvalue +- the specified threshold. + (Default: [50] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_dwi_prepare_eddy_command.py.help b/scripts/.hidden/scil_dwi_prepare_eddy_command.py.help new file mode 100644 index 000000000..10d0c9580 --- /dev/null +++ b/scripts/.hidden/scil_dwi_prepare_eddy_command.py.help @@ -0,0 +1,64 @@ +usage: scil_dwi_prepare_eddy_command.py [-h] [--n_reverse N_REVERSE] + [--topup TOPUP] + [--topup_params TOPUP_PARAMS] + [--eddy_cmd {eddy_openmp,eddy_cuda,eddy_cuda8.0,eddy_cuda9.1,eddy_cuda10.2,eddy,eddy_cpu}] + [--b0_thr B0_THR] + [--encoding_direction {x,y,z}] + [--readout READOUT] + [--slice_drop_correction] + [--lsr_resampling] + [--out_directory OUT_DIRECTORY] + [--out_prefix OUT_PREFIX] + [--out_script] [--fix_seed] + [--eddy_options EDDY_OPTIONS] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bvals in_bvecs in_mask + +Prepare a typical command for eddy and create the necessary files. When using +multiple acquisitions and/or opposite phase directions, images, b-values and +b-vectors should be merged together using scil_dwi_concatenate.py. If using +topup prior to calling this script, images should be concatenated in the same +order as the b0s used with prepare_topup. + +Formerly: scil_prepare_eddy_command.py + +positional arguments: + in_dwi Input DWI Nifti image. If using multiple acquisition and/or opposite phase directions, please merge in the same order as for prepare_topup using scil_dwi_concatenate.py. + in_bvals Input b-values file in FSL format. + in_bvecs Input b-vectors file in FSL format. + in_mask Binary brain mask. + +options: + -h, --help show this help message and exit + --n_reverse N_REVERSE + Number of reverse phase volumes included in the DWI image [0]. + --topup TOPUP Topup output name. If given, apply topup during eddy. + Should be the same as --out_prefix from scil_dwi_prepare_topup_command.py. + --topup_params TOPUP_PARAMS + Parameters file (typically named acqparams) used to run topup. + --eddy_cmd {eddy_openmp,eddy_cuda,eddy_cuda8.0,eddy_cuda9.1,eddy_cuda10.2,eddy,eddy_cpu} + Eddy command [eddy_openmp]. + --b0_thr B0_THR All b-values with values less than or equal to b0_thr are considered + as b0s i.e. without diffusion weighting [20]. + --encoding_direction {x,y,z} + Acquisition direction, default is AP-PA [y]. + --readout READOUT Total readout time from the DICOM metadata [0.062]. + --slice_drop_correction + If set, will activate eddy's outlier correction, + which includes slice drop correction. + --lsr_resampling Perform least-square resampling, allowing eddy to combine forward and reverse phase acquisitions for better reconstruction. Only works if directions and b-values are identical in both phase direction. + --out_directory OUT_DIRECTORY + Output directory for eddy files [.]. + --out_prefix OUT_PREFIX + Prefix of the eddy-corrected DWI [dwi_eddy_corrected]. + --out_script If set, will output a .sh script (eddy.sh). + else, will output the lines to the terminal [False]. + --fix_seed If set, will use the fixed seed strategy for eddy. + Enhances reproducibility. + --eddy_options EDDY_OPTIONS + Additional options you want to use to run eddy. + Add these options using quotes (i.e. "--ol_nstd=6 --mb=4"). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_prepare_topup_command.py.help b/scripts/.hidden/scil_dwi_prepare_topup_command.py.help new file mode 100644 index 000000000..b23857556 --- /dev/null +++ b/scripts/.hidden/scil_dwi_prepare_topup_command.py.help @@ -0,0 +1,44 @@ +usage: scil_dwi_prepare_topup_command.py [-h] [--config CONFIG] [--synb0] + [--encoding_direction {x,y,z}] + [--readout READOUT] + [--out_b0s OUT_B0S] + [--out_directory OUT_DIRECTORY] + [--out_prefix OUT_PREFIX] + [--out_params OUT_PARAMS] + [--out_script] + [--topup_options TOPUP_OPTIONS] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_forward_b0 in_reverse_b0 + +Prepare a typical command for topup and create the necessary files. +The reversed b0 must be in a different file. + +Formerly: scil_prepare_topup_command.py + +positional arguments: + in_forward_b0 Input b0 Nifti image with forward phase encoding. + in_reverse_b0 Input b0 Nifti image with reversed phase encoding. + +options: + -h, --help show this help message and exit + --config CONFIG Topup config file [b02b0.cnf]. + --synb0 If set, will use SyNb0 custom acqparams file. + --encoding_direction {x,y,z} + Acquisition direction of the forward b0 image, default is AP [y]. + --readout READOUT Total readout time from the DICOM metadata [0.062]. + --out_b0s OUT_B0S Output fused b0 file [fused_b0s.nii.gz]. + --out_directory OUT_DIRECTORY + Output directory for topup files [.]. + --out_prefix OUT_PREFIX + Prefix of the topup results [topup_results]. + --out_params OUT_PARAMS + Filename for the acquisition parameters file [acqparams.txt]. + --out_script If set, will output a .sh script (topup.sh). + else, will output the lines to the terminal [False]. + --topup_options TOPUP_OPTIONS + Additional options you want to use to run topup. + Add these options using quotes (i.e. "--fwhm=6 --miter=4"). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_reorder_philips.py.help b/scripts/.hidden/scil_dwi_reorder_philips.py.help new file mode 100644 index 000000000..8c01e8740 --- /dev/null +++ b/scripts/.hidden/scil_dwi_reorder_philips.py.help @@ -0,0 +1,24 @@ +usage: scil_dwi_reorder_philips.py [-h] [--json JSON] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bval in_bvec in_table + out_basename + +Re-order gradient according to original table (Philips) +This script is not needed for version 5.6 and higher + +Formerly: scil_reorder_dwi_philips.py + +positional arguments: + in_dwi Input dwi file. + in_bval Input bval FSL format. + in_bvec Input bvec FSL format. + in_table Original philips table - first line is skipped. + out_basename Basename output file. + +options: + -h, --help show this help message and exit + --json JSON If you give a json file, it will check if you need to reorder your Philips dwi. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_split_by_indices.py.help b/scripts/.hidden/scil_dwi_split_by_indices.py.help new file mode 100644 index 000000000..562f2d0aa --- /dev/null +++ b/scripts/.hidden/scil_dwi_split_by_indices.py.help @@ -0,0 +1,28 @@ +usage: scil_dwi_split_by_indices.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bval in_bvec out_basename + split_indices [split_indices ...] + +Splits the DWI image at certain indices along the last dimension (b-values). +Many indices can be given at once by specifying multiple values. The splited +volumes are in the same order as in the original file. Also outputs the +corresponding .bval and .bvec files. + +This script can be useful for splitting images at places where a b-value +extraction does not work. For instance, if one wants to split the x first +b-1500s from the rest of the b-1500s in an image, simply put x as an index. + +Formerly: scil_split_image.py + +positional arguments: + in_dwi The DW image file to split. + in_bval The b-values file in FSL format (.bval). + in_bvec The b-vectors file in FSL format (.bvec). + out_basename The basename of the output files. Indices number will be appended to out_basename. For example, if split_indices were 3 10, the files would be saved as out_basename_0_2, out_basename_3_10, out_basename_11_20, where the size of the last dimension is 21 in this example. + split_indices The list of indices where to split the image. For example 3 10. This would split the image in three parts, such as [:3], [3:10], [10:]. Indices must be in increasing order. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_to_sh.py.help b/scripts/.hidden/scil_dwi_to_sh.py.help new file mode 100644 index 000000000..d5f63057a --- /dev/null +++ b/scripts/.hidden/scil_dwi_to_sh.py.help @@ -0,0 +1,50 @@ +usage: scil_dwi_to_sh.py [-h] [--sh_order SH_ORDER] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [--smooth SMOOTH] [--use_attenuation] [--mask MASK] + [--b0_threshold thr] [--skip_b0_check] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bval in_bvec out_sh + +Script to compute the SH coefficient directly on the raw DWI signal. + +Formerly: scil_compute_sh_from_signal.py + +positional arguments: + in_dwi Path of the dwi volume. + in_bval Path of the b-value file, in FSL format. + in_bvec Path of the b-vector file, in FSL format. + out_sh Name of the output SH file to save. + +options: + -h, --help show this help message and exit + --sh_order SH_ORDER SH order to fit (int). [4] + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + --smooth SMOOTH Lambda-regularization coefficient in the SH fit (float). [0.006] + --use_attenuation If set, will use signal attenuation before fitting the SH (i.e. divide by the b0). + --mask MASK Path to a binary mask. + Only data inside the mask will be used for computations and reconstruction + --b0_threshold thr Threshold under which b-values are considered to be b0s. + [Default: 20] + * Note. We would expect to find at least one b-value in the + range [0, b0_threshold]. To skip this check, use --skip_b0_check. + --skip_b0_check By default, we supervise that at least one b0 exists in your data + (i.e. b-values below the default --b0_threshold). Use this option to + allow continuing even if the minimum b-value is suspiciously high. + If no b-value is found below the threshold, the script will continue + with your minimal b-value as new --b0_threshold. + Use with care, and only if you understand your data. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_fodf_max_in_ventricles.py.help b/scripts/.hidden/scil_fodf_max_in_ventricles.py.help new file mode 100644 index 000000000..b439b0690 --- /dev/null +++ b/scripts/.hidden/scil_fodf_max_in_ventricles.py.help @@ -0,0 +1,56 @@ +usage: scil_fodf_max_in_ventricles.py [-h] [--fa_threshold FA_THRESHOLD] + [--md_threshold MD_THRESHOLD] + [--max_value_output file] + [--mask_output file] [--small_dims] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [-v [{DEBUG,INFO,WARNING}]] [-f] + fODFs FA MD + +Script to compute the maximum fODF in the ventricles. The ventricules are +estimated from an MD and FA threshold. + +This allows to clip the noise of fODF using an absolute thresold. + +Formerly: scil_compute_fodf_max_in_ventricles.py + +positional arguments: + fODFs Path of the fODF volume in spherical harmonics (SH). + FA Path to the FA volume. + MD Path to the mean diffusivity (MD) volume. + +options: + -h, --help show this help message and exit + --fa_threshold FA_THRESHOLD + Maximal threshold of FA (voxels under that threshold are considered + for evaluation. [0.1]). + --md_threshold MD_THRESHOLD + Minimal threshold of MD in mm2/s (voxels above that threshold are + considered for evaluation. [0.003]). + --max_value_output file + Output path for the text file containing the value. If not set the + file will not be saved. + --mask_output file Output path for the ventricule mask. If not set, the mask + will not be saved. + --small_dims If set, takes the full range of data to search the max fodf amplitude + in ventricles. Useful when the data has small dimensions. + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +[1] Dell'Acqua, Flavio, et al. "Can spherical deconvolution provide more + information than fiber orientations? Hindrance modulated orientational + anisotropy, a true-tract specific index to characterize white matter + diffusion." Human brain mapping 34.10 (2013): 2464-2483. diff --git a/scripts/.hidden/scil_fodf_memsmt.py.help b/scripts/.hidden/scil_fodf_memsmt.py.help new file mode 100644 index 000000000..a83299d57 --- /dev/null +++ b/scripts/.hidden/scil_fodf_memsmt.py.help @@ -0,0 +1,99 @@ +usage: scil_fodf_memsmt.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals + IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS + [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5} + [{0,1,-0.5,0.5} ...] [--sh_order int] [--mask MASK] + [--tolerance tol] [--skip_b0_check] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] [-f] + [--not_all] [--wm_out_fODF file] + [--gm_out_fODF file] [--csf_out_fODF file] + [--vf file] [--vf_rgb file] + in_wm_frf in_gm_frf in_csf_frf + +Script to compute multi-encoding multi-shell multi-tissue (memsmt) +Constrained Spherical Deconvolution ODFs. + +In order to operate, the script only needs the data from one type of b-tensor +encoding. However, giving only a spherical one will not produce good fODFs, as +it only probes spherical shapes. As for planar encoding, it should technically +work alone, but seems to be very sensitive to noise and is yet to be properly +documented. We thus suggest to always use at least the linear encoding, which +will be equivalent to standard multi-shell multi-tissue if used alone, in +combinaison with other encodings. Note that custom encodings are not yet +supported, so that only the linear tensor encoding (LTE, b_delta = 1), the +planar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding +(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are +available. + +All of `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the +same number of arguments. Be sure to keep the same order of encodings +throughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT). + +By default, will output all possible files, using default names. +Specific names can be specified using the file flags specified in the +"File flags" section. + +If --not_all is set, only the files specified explicitly by the flags +will be output. + +>>> scil_fodf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz + PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs + LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz + +Based on P. Karan et al., Bridging the gap between constrained spherical +deconvolution and diffusional variance decomposition via tensor-valued +diffusion MRI. Medical Image Analysis (2022) + +Formerly: scil_compute_memsmt_fodf.py + +positional arguments: + in_wm_frf Text file of WM response function. + in_gm_frf Text file of GM response function. + in_csf_frf Text file of CSF response function. + +options: + -h, --help show this help message and exit + --in_dwis IN_DWIS [IN_DWIS ...] + Path to the input diffusion volume for each b-tensor encoding type. + --in_bvals IN_BVALS [IN_BVALS ...] + Path to the bval file, in FSL format, for each b-tensor encoding type. + --in_bvecs IN_BVECS [IN_BVECS ...] + Path to the bvec file, in FSL format, for each b-tensor encoding type. + --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...] + Value of b_delta for each b-tensor encoding type, in the same order as dwi, bval and bvec inputs. + --sh_order int SH order used for the CSD. (Default: 8) + --mask MASK Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction. + --tolerance tol The tolerated gap between the b-values to extract and the current b-value. + [Default: 20] + * Note. We would expect to find at least one b-value in the + range [0, tolerance]. To skip this check, use --skip_b0_check. + --skip_b0_check By default, we supervise that at least one b0 exists in your data + (i.e. b-values below the default --tolerance). Use this option to + allow continuing even if the minimum b-value is suspiciously high. + Use with care, and only if you understand your data. + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + --processes NBR Number of sub-processes to start. + Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + --not_all If set, only saves the files specified using the file flags. (Default: False) + +File flags: + --wm_out_fODF file Output filename for the WM fODF coefficients. + --gm_out_fODF file Output filename for the GM fODF coefficients. + --csf_out_fODF file Output filename for the CSF fODF coefficients. + --vf file Output filename for the volume fractions map. + --vf_rgb file Output filename for the volume fractions map in rgb. diff --git a/scripts/.hidden/scil_fodf_metrics.py.help b/scripts/.hidden/scil_fodf_metrics.py.help new file mode 100644 index 000000000..17b3aa757 --- /dev/null +++ b/scripts/.hidden/scil_fodf_metrics.py.help @@ -0,0 +1,88 @@ +usage: scil_fodf_metrics.py [-h] [--sphere string] [--mask] [--at A_THRESHOLD] + [--rt R_THRESHOLD] [--abs_peaks_and_values] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [-v [{DEBUG,INFO,WARNING}]] [--processes NBR] [-f] + [--not_all] [--afd_max file] [--afd_total file] + [--afd_sum file] [--nufo file] [--rgb file] + [--peaks file] [--peak_values file] + [--peak_indices file] + in_fODF + +Script to compute the maximum Apparent Fiber Density (AFD), the fiber ODFs +orientations, values and indices (peaks, peak_values, peak_indices), the Number +of Fiber Orientations (NuFO) maps from fiber ODFs and the RGB map. + +AFD_max map is the maximal fODF amplitude for each voxel. + +NuFO is the the number of maxima of the fODF with an ABSOLUTE amplitude above +the threshold set using --at, AND an amplitude above the RELATIVE threshold +set using --rt. + +The --at argument should be set to a value which is 1.5 times the maximal +value of the fODF in the ventricules. This can be obtained with the +scil_fodf_max_in_ventricles.py script. + +If the --abs_peaks_and_values argument is set, the peaks are all normalized +and the peak_values are equal to the actual fODF amplitude of the peaks. By +default, the script max-normalizes the peak_values for each voxel and +multiplies the peaks by peak_values. + +By default, will output all possible files, using default names. Specific names +can be specified using the file flags specified in the "File flags" section. + +If --not_all is set, only the files specified explicitly by the flags will be +output. + +See [Raffelt et al. NeuroImage 2012] and [Dell'Acqua et al HBM 2013] for the +definitions. + +Formerly: scil_compute_fodf_metrics.py + +positional arguments: + in_fODF Path of the fODF volume in spherical harmonics (SH). + +options: + -h, --help show this help message and exit + --sphere string Discrete sphere to use in the processing [repulsion724]. + --mask Path to a binary mask. Only the data inside the mask + will beused for computations and reconstruction [None]. + --at A_THRESHOLD Absolute threshold on fODF amplitude. This value should be set to + approximately 1.5 to 2 times the maximum fODF amplitude in isotropic voxels + (ie. ventricles). + Use scil_fodf_max_in_ventricles.py to find the maximal value. + See [Dell'Acqua et al HBM 2013] [0.0]. + --rt R_THRESHOLD Relative threshold on fODF amplitude in percentage [0.1]. + --abs_peaks_and_values + If set, the peak_values are not max-normalized for each voxel, + but rather they keep the actual fODF amplitude of the peaks. + Also, the peaks are given as unit directions instead of being proportional to peak_values. [False] + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + --processes NBR Number of sub-processes to start. + Default: [1] + -f Force overwriting of the output files. + --not_all If set, only saves the files specified using the file flags [False]. + +File flags: + --afd_max file Output filename for the AFD_max map. + --afd_total file Output filename for the AFD_total map(SH coeff = 0). + --afd_sum file Output filename for the sum of all peak contributions + (sum of fODF lobes on the sphere). + --nufo file Output filename for the NuFO map. + --rgb file Output filename for the RGB map. + --peaks file Output filename for the extracted peaks. + --peak_values file Output filename for the extracted peaks values. + --peak_indices file Output filename for the generated peaks indices on the sphere. diff --git a/scripts/.hidden/scil_fodf_msmt.py.help b/scripts/.hidden/scil_fodf_msmt.py.help new file mode 100644 index 000000000..a1b20c06c --- /dev/null +++ b/scripts/.hidden/scil_fodf_msmt.py.help @@ -0,0 +1,71 @@ +usage: scil_fodf_msmt.py [-h] [--sh_order int] [--mask] [--tolerance tol] + [--skip_b0_check] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [--processes NBR] [--not_all] [--wm_out_fODF file] + [--gm_out_fODF file] [--csf_out_fODF file] + [--vf file] [--vf_rgb file] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bval in_bvec in_wm_frf in_gm_frf in_csf_frf + +Script to compute Multishell Multi-tissue Constrained Spherical Deconvolution +ODFs. + +By default, will output all possible files, using default names. +Specific names can be specified using the file flags specified in the +"File flags" section. + +If --not_all is set, only the files specified explicitly by the flags +will be output. + +Based on B. Jeurissen et al., Multi-tissue constrained spherical +deconvolution for improved analysis of multi-shell diffusion +MRI data. Neuroimage (2014) + +Formerly: scil_compute_msmt_fodf.py + +positional arguments: + in_dwi Path of the input diffusion volume. + in_bval Path of the bval file, in FSL format. + in_bvec Path of the bvec file, in FSL format. + in_wm_frf Text file of WM response function. + in_gm_frf Text file of GM response function. + in_csf_frf Text file of CSF response function. + +options: + -h, --help show this help message and exit + --sh_order int SH order used for the CSD. (Default: 8) + --mask Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction. + --tolerance tol The tolerated gap between the b-values to extract and the current b-value. + [Default: 20] + * Note. We would expect to find at least one b-value in the + range [0, tolerance]. To skip this check, use --skip_b0_check. + --skip_b0_check By default, we supervise that at least one b0 exists in your data + (i.e. b-values below the default --tolerance). Use this option to + allow continuing even if the minimum b-value is suspiciously high. + Use with care, and only if you understand your data. + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + --processes NBR Number of sub-processes to start. + Default: [1] + --not_all If set, only saves the files specified using the file flags. (Default: False) + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +File flags: + --wm_out_fODF file Output filename for the WM fODF coefficients. + --gm_out_fODF file Output filename for the GM fODF coefficients. + --csf_out_fODF file Output filename for the CSF fODF coefficients. + --vf file Output filename for the volume fractions map. + --vf_rgb file Output filename for the volume fractions map in rgb. diff --git a/scripts/.hidden/scil_fodf_ssst.py.help b/scripts/.hidden/scil_fodf_ssst.py.help new file mode 100644 index 000000000..6542f859f --- /dev/null +++ b/scripts/.hidden/scil_fodf_ssst.py.help @@ -0,0 +1,52 @@ +usage: scil_fodf_ssst.py [-h] [--sh_order int] [--mask] [--b0_threshold thr] + [--skip_b0_check] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bval in_bvec frf_file out_fODF + +Script to compute Constrained Spherical Deconvolution (CSD) fiber ODFs. + +See [Tournier et al. NeuroImage 2007] + +Formerly: scil_compute_ssst_fodf.py + +positional arguments: + in_dwi Path of the input diffusion volume. + in_bval Path of the bvals file, in FSL format. + in_bvec Path of the bvecs file, in FSL format. + frf_file Path of the FRF file + out_fODF Output path for the fiber ODF coefficients. + +options: + -h, --help show this help message and exit + --sh_order int SH order used for the CSD. (Default: 8) + --mask Path to a binary mask. Only the data inside the mask will be used + for computations and reconstruction. + --b0_threshold thr Threshold under which b-values are considered to be b0s. + [Default: 20] + * Note. We would expect to find at least one b-value in the + range [0, b0_threshold]. To skip this check, use --skip_b0_check. + --skip_b0_check By default, we supervise that at least one b0 exists in your data + (i.e. b-values below the default --b0_threshold). Use this option to + allow continuing even if the minimum b-value is suspiciously high. + If no b-value is found below the threshold, the script will continue + with your minimal b-value as new --b0_threshold. + Use with care, and only if you understand your data. + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + --processes NBR Number of sub-processes to start. + Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_fodf_to_bingham.py.help b/scripts/.hidden/scil_fodf_to_bingham.py.help new file mode 100644 index 000000000..9e16278d1 --- /dev/null +++ b/scripts/.hidden/scil_fodf_to_bingham.py.help @@ -0,0 +1,51 @@ +usage: scil_fodf_to_bingham.py [-h] [--max_lobes MAX_LOBES] [--at AT] + [--rt RT] [--min_sep_angle MIN_SEP_ANGLE] + [--max_fit_angle MAX_FIT_ANGLE] [--mask MASK] + [-v [{DEBUG,INFO,WARNING}]] [--processes NBR] + [-f] + in_sh out_bingham + +Script for fitting a Bingham distribution to each fODF lobe, as described +in [1]. + +The Bingham fit is saved, with each Bingham distribution described by 7 +coefficients (for example, for a maximum number of lobes of 5, the number +of coefficients is 7 x 5 = 35 -- less than the number of coefficients for +SH of maximum order 8). + +Using 12 threads, the execution takes approximately 30 minutes for a brain with +1mm isotropic resolution. + +Formerly: scil_fit_bingham_to_fodf.py + +positional arguments: + in_sh Input SH image. + out_bingham Output Bingham functions image. + +options: + -h, --help show this help message and exit + --max_lobes MAX_LOBES + Maximum number of lobes per voxel to extract. [5] + --at AT Absolute threshold for peaks extraction. [0.0] + --rt RT Relative threshold for peaks extraction. [0.1] + --min_sep_angle MIN_SEP_ANGLE + Minimum separation angle between two peaks. [25.0] + --max_fit_angle MAX_FIT_ANGLE + Maximum distance in degrees around a peak direction for fitting the Bingham function. [15.0] + --mask MASK Optional mask file. Only SH inside the mask are fitted. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + --processes NBR Number of sub-processes to start. + Default: [1] + -f Force overwriting of the output files. + +[1] T. W. Riffert, J. Schreiber, A. Anwander, and T. R. Knösche, “Beyond + fractional anisotropy: Extraction of bundle-specific structural metrics + from crossing fiber models,” NeuroImage, vol. 100, pp. 176-191, Oct. 2014, + doi: 10.1016/j.neuroimage.2014.06.015. + +[2] J. Schreiber, T. Riffert, A. Anwander, and T. R. Knösche, “Plausibility + Tracking: A method to evaluate anatomical connectivity and microstructural + properties along fiber pathways,” NeuroImage, vol. 90, pp. 163-178, Apr. + 2014, doi: 10.1016/j.neuroimage.2014.01.002. diff --git a/scripts/.hidden/scil_freewater_maps.py.help b/scripts/.hidden/scil_freewater_maps.py.help new file mode 100644 index 000000000..5c51f8740 --- /dev/null +++ b/scripts/.hidden/scil_freewater_maps.py.help @@ -0,0 +1,58 @@ +usage: scil_freewater_maps.py [-h] [--mask MASK] [--out_dir OUT_DIR] + [--b_thr B_THR] [--para_diff PARA_DIFF] + [--iso_diff ISO_DIFF] + [--perp_diff_min PERP_DIFF_MIN] + [--perp_diff_max PERP_DIFF_MAX] + [--lambda1 LAMBDA1] [--lambda2 LAMBDA2] + [--save_kernels DIRECTORY | --load_kernels DIRECTORY] + [--compute_only] [--mouse] [--processes NBR] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bval in_bvec + +Compute Free Water maps [1] using AMICO. +This script supports both single and multi-shell data. + +Formerly: scil_compute_freewater.py + +positional arguments: + in_dwi DWI file. + in_bval b-values filename, in FSL format (.bval). + in_bvec b-vectors filename, in FSL format (.bvec). + +options: + -h, --help show this help message and exit + --mask MASK Brain mask filename. + --out_dir OUT_DIR Output directory for the Free Water results. [results] + --b_thr B_THR Limit value to consider that a b-value is on an + existing shell. Above this limit, the b-value is + placed on a new shell. This includes b0s values. + --mouse If set, use mouse fitting profile. + --processes NBR Number of sub-processes to start. Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided + level. Default level is warning, default when using -v + is info. + -f Force overwriting of the output files. + +Model options: + --para_diff PARA_DIFF + Axial diffusivity (AD) in the CC. [0.0015] + --iso_diff ISO_DIFF Mean diffusivity (MD) in ventricles. [0.003] + --perp_diff_min PERP_DIFF_MIN + Radial diffusivity (RD) minimum. [0.0001] + --perp_diff_max PERP_DIFF_MAX + Radial diffusivity (RD) maximum. [0.0007] + --lambda1 LAMBDA1 First regularization parameter. [0.0] + --lambda2 LAMBDA2 Second regularization parameter. [0.25] + +Kernels options: + --save_kernels DIRECTORY + Output directory for the COMMIT kernels. + --load_kernels DIRECTORY + Input directory where the COMMIT kernels are located. + --compute_only Compute kernels only, --save_kernels must be used. + +Reference: + [1] Pasternak 0, Sochen N, Gur Y, Intrator N, Assaf Y. + Free water elimination and mapping from diffusion mri. + Magn Reson Med. 62 (3) (2009) 717-730. diff --git a/scripts/.hidden/scil_freewater_priors.py.help b/scripts/.hidden/scil_freewater_priors.py.help new file mode 100644 index 000000000..fce34c0e3 --- /dev/null +++ b/scripts/.hidden/scil_freewater_priors.py.help @@ -0,0 +1,71 @@ +usage: scil_freewater_priors.py [-h] + [--fa_min_single_fiber FA_MIN_SINGLE_FIBER] + [--fa_max_ventricles FA_MAX_VENTRICLES] + [--md_min_ventricles MD_MIN_VENTRICLES] + [--roi_radius ROI_RADIUS] + [--roi_center pos pos pos] + [--out_txt_1fiber_para FILE] + [--out_txt_1fiber_perp FILE] + [--out_mask_1fiber FILE] + [--out_txt_ventricles FILE] + [--out_mask_ventricles FILE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_FA in_AD in_RD in_MD + +Compute the axial (para_diff), radial (perp_diff), and mean (iso_diff) +diffusivity priors for NODDI. + +Formerly: scil_compute_NODDI_priors.py + +positional arguments: + in_FA Path to the FA volume. + in_AD Path to the axial diffusivity (AD) volume. + in_RD Path to the radial diffusivity (RD) volume. + in_MD Path to the mean diffusivity (MD) volume. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Metrics options: + --fa_min_single_fiber FA_MIN_SINGLE_FIBER + Minimal threshold of FA (voxels above that threshold are considered in + the single fiber mask). [0.7] + --fa_max_ventricles FA_MAX_VENTRICLES + Maximal threshold of FA (voxels under that threshold are considered in + the ventricles). [0.1] + --md_min_ventricles MD_MIN_VENTRICLES + Minimal threshold of MD in mm2/s (voxels above that threshold are considered + for in the ventricles). [0.003] + +Regions options: + --roi_radius ROI_RADIUS + Radius of the region used to estimate the priors. The roi will be a cube spanning + from ROI_CENTER in each direction. [20] + --roi_center pos pos pos + Center of the roi of size roi_radius used to estimate the priors; a 3-value coordinate. + If not set, uses the center of the 3D volume. + +Outputs: + --out_txt_1fiber_para FILE + Output path for the text file containing the single fiber average value of AD. + If not set, the file will not be saved. + --out_txt_1fiber_perp FILE + Output path for the text file containing the single fiber average value of RD. + If not set, the file will not be saved. + --out_mask_1fiber FILE + Output path for single fiber mask. If not set, the mask will not be saved. + --out_txt_ventricles FILE + Output path for the text file containing the ventricles average value of MD. + If not set, the file will not be saved. + --out_mask_ventricles FILE + Output path for the ventricule mask. + If not set, the mask will not be saved. + +Reference: + [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC. + NODDI: practical in vivo neurite orientation dispersion and density + imaging of the human brain. NeuroImage. 2012 Jul 16;61:1000-16. diff --git a/scripts/.hidden/scil_frf_mean.py.help b/scripts/.hidden/scil_frf_mean.py.help new file mode 100644 index 000000000..f2c6d410b --- /dev/null +++ b/scripts/.hidden/scil_frf_mean.py.help @@ -0,0 +1,22 @@ +usage: scil_frf_mean.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] + list [list ...] file + +Compute the mean Fiber Response Function from a set of individually +computed Response Functions. + +The FRF files are obtained from scil_frf_ssst.py, scil_frf_msmt.py in the +case of multi-shell data or scil_frf_memsmt.py in the case of multi-encoding +multi-shell data. + +Formerly: scil_compute_mean_frf.py + +positional arguments: + list List of FRF filepaths. + file Path of the output mean FRF file. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_frf_memsmt.py.help b/scripts/.hidden/scil_frf_memsmt.py.help new file mode 100644 index 000000000..070ad5f23 --- /dev/null +++ b/scripts/.hidden/scil_frf_memsmt.py.help @@ -0,0 +1,122 @@ +usage: scil_frf_memsmt.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals + IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS + [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5} + [{0,1,-0.5,0.5} ...] [--mask MASK] + [--mask_wm MASK_WM] [--mask_gm MASK_GM] + [--mask_csf MASK_CSF] [--fa_thr_wm FA_THR_WM] + [--fa_thr_gm FA_THR_GM] [--fa_thr_csf FA_THR_CSF] + [--md_thr_gm MD_THR_GM] [--md_thr_csf MD_THR_CSF] + [--min_nvox MIN_NVOX] [--tolerance tol] + [--skip_b0_check] [--dti_bval_limit DTI_BVAL_LIMIT] + [--roi_radii ROI_RADII [ROI_RADII ...]] + [--roi_center tuple(3) tuple(3) tuple(3)] + [--wm_frf_mask file] [--gm_frf_mask file] + [--csf_frf_mask file] [-v [{DEBUG,INFO,WARNING}]] + [-f] + out_wm_frf out_gm_frf out_csf_frf + +Script to estimate response functions for multi-encoding multi-shell +multi-tissue (memsmt) constrained spherical deconvolution. In order to operate, +the script only needs the data from one type of b-tensor encoding. However, +giving only a spherical one will not produce good fiber response functions, as +it only probes spherical shapes. As for planar encoding, it should technically +work alone, but seems to be very sensitive to noise and is yet to be properly +documented. We thus suggest to always use at least the linear encoding, which +will be equivalent to standard multi-shell multi-tissue if used alone, in +combinaison with other encodings. Note that custom encodings are not yet +supported, so that only the linear tensor encoding (LTE, b_delta = 1), the +planar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding +(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are +available. Moreover, all of `--in_dwis`, `--in_bvals`, `--in_bvecs` and +`--in_bdeltas` must have the same number of arguments. Be sure to keep the +same order of encodings throughout all these inputs and to set `--in_bdeltas` +accordingly (IMPORTANT). + +The script computes a response function for white-matter (wm), +gray-matter (gm), csf and the mean b=0. + +In the wm, we compute the response function in each voxels where +the FA is superior at threshold_fa_wm. + +In the gm (or csf), we compute the response function in each voxels where +the FA is below at threshold_fa_gm (or threshold_fa_csf) and where +the MD is below threshold_md_gm (or threshold_md_csf). + +>>> scil_frf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz + PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs + LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz + +Based on P. Karan et al., Bridging the gap between constrained spherical +deconvolution and diffusional variance decomposition via tensor-valued +diffusion MRI. Medical Image Analysis (2022) + +Formerly: scil_compute_memsmt_frf.py + +positional arguments: + out_wm_frf Path to the output WM frf file, in .txt format. + out_gm_frf Path to the output GM frf file, in .txt format. + out_csf_frf Path to the output CSF frf file, in .txt format. + +options: + -h, --help show this help message and exit + --in_dwis IN_DWIS [IN_DWIS ...] + Path to the input diffusion volume for each b-tensor encoding type. + --in_bvals IN_BVALS [IN_BVALS ...] + Path to the bval file, in FSL format, for each b-tensor encoding type. + --in_bvecs IN_BVECS [IN_BVECS ...] + Path to the bvec file, in FSL format, for each b-tensor encoding type. + --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...] + Value of b_delta for each b-tensor encoding type, in the same order as + dwi, bval and bvec inputs. + --mask MASK Path to a binary mask. Only the data inside the mask will be used for + computations and reconstruction. Useful if no tissue masks are available. + --mask_wm MASK_WM Path to the input WM mask file, used to improve the final WM frf mask. + --mask_gm MASK_GM Path to the input GM mask file, used to improve the final GM frf mask. + --mask_csf MASK_CSF Path to the input CSF mask file, used to improve the final CSF frf mask. + --fa_thr_wm FA_THR_WM + If supplied, use this threshold to select single WM fiber voxels from + the FA inside the WM mask defined by mask_wm. + Each voxel above this threshold will be selected. [0.7] + --fa_thr_gm FA_THR_GM + If supplied, use this threshold to select GM voxels from the FA inside + the GM mask defined by mask_gm. + Each voxel below this threshold will be selected. [0.2] + --fa_thr_csf FA_THR_CSF + If supplied, use this threshold to select CSF voxels from the FA inside + the CSF mask defined by mask_csf. + Each voxel below this threshold will be selected. [0.1] + --md_thr_gm MD_THR_GM + If supplied, use this threshold to select GM voxels from the MD inside + the GM mask defined by mask_gm. + Each voxel below this threshold will be selected. [0.0007] + --md_thr_csf MD_THR_CSF + If supplied, use this threshold to select CSF voxels from the MD inside + the CSF mask defined by mask_csf. + Each voxel below this threshold will be selected. [0.003] + --min_nvox MIN_NVOX Minimal number of voxels needed for each tissue masks in order to + proceed to frf estimation. [100] + --tolerance tol The tolerated gap between the b-values to extract and the current b-value. + [Default: 20] + * Note. We would expect to find at least one b-value in the + range [0, tolerance]. To skip this check, use --skip_b0_check. + --skip_b0_check By default, we supervise that at least one b0 exists in your data + (i.e. b-values below the default --tolerance). Use this option to + allow continuing even if the minimum b-value is suspiciously high. + Use with care, and only if you understand your data. + --dti_bval_limit DTI_BVAL_LIMIT + The highest b-value taken for the DTI model. [1200] + --roi_radii ROI_RADII [ROI_RADII ...] + If supplied, use those radii to select a cuboid roi to estimate the + response functions. The roi will be a cuboid spanning from the middle of + the volume in each direction with the different radii. The type is either + an int (e.g. --roi_radii 10) or an array-like (3,) (e.g. --roi_radii 20 30 10). [[20]] + --roi_center tuple(3) tuple(3) tuple(3) + If supplied, use this center to span the cuboid roi using roi_radii. + [center of the 3D volume] (e.g. --roi_center 66 79 79) + --wm_frf_mask file Path to the output WM frf mask file, the voxels used to compute the WM frf. + --gm_frf_mask file Path to the output GM frf mask file, the voxels used to compute the GM frf. + --csf_frf_mask file Path to the output CSF frf mask file, the voxels used to compute the CSF frf. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_frf_msmt.py.help b/scripts/.hidden/scil_frf_msmt.py.help new file mode 100644 index 000000000..300813235 --- /dev/null +++ b/scripts/.hidden/scil_frf_msmt.py.help @@ -0,0 +1,114 @@ +usage: scil_frf_msmt.py [-h] [--mask MASK] [--mask_wm MASK_WM] + [--mask_gm MASK_GM] [--mask_csf MASK_CSF] + [--fa_thr_wm FA_THR_WM] [--fa_thr_gm FA_THR_GM] + [--fa_thr_csf FA_THR_CSF] [--md_thr_gm MD_THR_GM] + [--md_thr_csf MD_THR_CSF] [--min_nvox MIN_NVOX] + [--tolerance TOLERANCE] [--skip_b0_check] + [--dti_bval_limit DTI_BVAL_LIMIT] + [--roi_radii ROI_RADII [ROI_RADII ...]] + [--roi_center tuple(3) tuple(3) tuple(3)] + [--wm_frf_mask file] [--gm_frf_mask file] + [--csf_frf_mask file] [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bval in_bvec out_wm_frf out_gm_frf + out_csf_frf + +Compute response functions for multi-shell multi-tissue (MSMT) constrained +spherical deconvolution from DWI data. + +The script computes a response function for white-matter (wm), +gray-matter (gm), csf and the mean b=0. + - In the wm, we compute the response function in each voxel where the FA is + superior at threshold_fa_wm. + - In the gm (or csf), we compute the response function in each voxel where + the FA is below at threshold_fa_gm (or threshold_fa_csf) and where the MD + is below threshold_md_gm (or threshold_md_csf). + +We output one response function file for each tissue, containing the response +function for each b-value (arranged by lines). These are saved as the diagonal +of the axis-symmetric diffusion tensor (3 e-values) and a mean b0 value. +For example, a typical wm_frf is [15e-4, 4e-4, 4e-4, 700], where the tensor +e-values are (15,4,4)x10^-4 mm^2/s and the mean b0 is 700. + +Based on B. Jeurissen et al., Multi-tissue constrained spherical deconvolution +for improved analysis of multi-shell diffusion MRI data. Neuroimage (2014) + +Formerly: scil_compute_msmt_frf.py + +positional arguments: + in_dwi Path to the input diffusion volume. + in_bval Path to the bval file, in FSL format. + in_bvec Path to the bvec file, in FSL format. + out_wm_frf Path to the output WM frf file, in .txt format. + out_gm_frf Path to the output GM frf file, in .txt format. + out_csf_frf Path to the output CSF frf file, in .txt format. + +options: + -h, --help show this help message and exit + --mask MASK Path to a binary mask. Only the data inside the mask + will be used for computations and reconstruction. + Useful if no tissue masks are available. + --mask_wm MASK_WM Path to the input WM mask file, used to improve the + final WM frf mask. + --mask_gm MASK_GM Path to the input GM mask file, used to improve the + final GM frf mask. + --mask_csf MASK_CSF Path to the input CSF mask file, used to improve the + final CSF frf mask. + --fa_thr_wm FA_THR_WM + If supplied, use this threshold to select single WM + fiber voxels from the FA inside the WM mask defined by + mask_wm. Each voxel above this threshold will be + selected. [0.7] + --fa_thr_gm FA_THR_GM + If supplied, use this threshold to select GM voxels + from the FA inside the GM mask defined by mask_gm. + Each voxel below this threshold will be selected. + [0.2] + --fa_thr_csf FA_THR_CSF + If supplied, use this threshold to select CSF voxels + from the FA inside the CSF mask defined by mask_csf. + Each voxel below this threshold will be selected. + [0.1] + --md_thr_gm MD_THR_GM + If supplied, use this threshold to select GM voxels + from the MD inside the GM mask defined by mask_gm. + Each voxel below this threshold will be selected. + [0.0007] + --md_thr_csf MD_THR_CSF + If supplied, use this threshold to select CSF voxels + from the MD inside the CSF mask defined by mask_csf. + Each voxel below this threshold will be selected. + [0.003] + --min_nvox MIN_NVOX Minimal number of voxels needed for each tissue masks + in order to proceed to frf estimation. [100] + --tolerance TOLERANCE + The tolerated gap between the b-values to extract and + the current b-value. [20] + --skip_b0_check By default, we supervise that at least one b0 exists + in your data (i.e. b-values below the default + --tolerance). Use this option to allow continuing even + if the minimum b-value is suspiciously high. Use with + care, and only if you understand your data. + --dti_bval_limit DTI_BVAL_LIMIT + The highest b-value taken for the DTI model. [1200] + --roi_radii ROI_RADII [ROI_RADII ...] + If supplied, use those radii to select a cuboid roi to + estimate the response functions. The roi will be a + cuboid spanning from the middle of the volume in each + direction with the different radii. The type is either + an int (e.g. --roi_radii 10) or an array-like (3,) + (e.g. --roi_radii 20 30 10). [[20]] + --roi_center tuple(3) tuple(3) tuple(3) + If supplied, use this center to span the cuboid roi + using roi_radii. [center of the 3D volume] (e.g. + --roi_center 66 79 79) + --wm_frf_mask file Path to the output WM frf mask file, the voxels used + to compute the WM frf. + --gm_frf_mask file Path to the output GM frf mask file, the voxels used + to compute the GM frf. + --csf_frf_mask file Path to the output CSF frf mask file, the voxels used + to compute the CSF frf. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided + level. Default level is warning, default when using -v + is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_frf_set_diffusivities.py.help b/scripts/.hidden/scil_frf_set_diffusivities.py.help new file mode 100644 index 000000000..cd534bf52 --- /dev/null +++ b/scripts/.hidden/scil_frf_set_diffusivities.py.help @@ -0,0 +1,30 @@ +usage: scil_frf_set_diffusivities.py [-h] [--no_factor] + [-v [{DEBUG,INFO,WARNING}]] [-f] + input new_frf output + +Replace the fiber response function in the FRF file. +Use this script when you want to use a fixed response function +and keep the mean b0. + +The FRF file is obtained from scil_frf_ssst.py or scil_frf_msmt.py in the case +of multi-shell data. + +Formerly: scil_set_response_function.py + +positional arguments: + input Path of the FRF file. + new_frf New response function given as a tuple. We will replace the + response function in frf_file with this fiber response + function x 10**-4 (e.g. 15,4,4). + If multi-shell, write the first shell,then the second shell, + and the third, etc. (e.g. 15,4,4,13,5,5,12,5,5). + output Path of the new FRF file. + +options: + -h, --help show this help message and exit + --no_factor If supplied, the fiber response function is + evaluated without the x 10**-4 factor. [False]. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_frf_ssst.py.help b/scripts/.hidden/scil_frf_ssst.py.help new file mode 100644 index 000000000..e71476102 --- /dev/null +++ b/scripts/.hidden/scil_frf_ssst.py.help @@ -0,0 +1,61 @@ +usage: scil_frf_ssst.py [-h] [--mask MASK] [--mask_wm MASK_WM] + [--fa_thresh FA_THRESH] + [--min_fa_thresh MIN_FA_THRESH] [--min_nvox MIN_NVOX] + [--roi_radii ROI_RADII [ROI_RADII ...]] + [--roi_center tuple(3) tuple(3) tuple(3)] + [--b0_threshold thr] [--skip_b0_check] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bval in_bvec frf_file + +Compute a single Fiber Response Function from a DWI. + +A DTI fit is made, and voxels containing a single fiber population are +found using a threshold on the FA. + +Formerly: scil_compute_ssst_frf.py + +positional arguments: + in_dwi Path of the input diffusion volume. + in_bval Path of the bvals file, in FSL format. + in_bvec Path of the bvecs file, in FSL format. + frf_file Path to the output FRF file, in .txt format, saved by Numpy. + +options: + -h, --help show this help message and exit + --mask MASK Path to a binary mask. Only the data inside the mask will be used + for computations and reconstruction. Useful if no white matter mask + is available. + --mask_wm MASK_WM Path to a binary white matter mask. Only the data inside this mask + and above the threshold defined by --fa_thresh will be used to estimate the + fiber response function. + --fa_thresh FA_THRESH + If supplied, use this threshold as the initial threshold to select + single fiber voxels. [0.7] + --min_fa_thresh MIN_FA_THRESH + If supplied, this is the minimal value that will be tried when looking + for single fiber voxels. [0.5] + --min_nvox MIN_NVOX Minimal number of voxels needing to be identified as single fiber voxels + in the automatic estimation. [300] + --roi_radii ROI_RADII [ROI_RADII ...] + If supplied, use those radii to select a cuboid roi to estimate the + response functions. The roi will be a cuboid spanning from the middle of + the volume in each direction with the different radii. The type is either + an int (e.g. --roi_radii 10) or an array-like (3,) (e.g. --roi_radii 20 30 10). [[20]] + --roi_center tuple(3) tuple(3) tuple(3) + If supplied, use this center to span the roi of size roi_radius. [center of the 3D volume] + --b0_threshold thr Threshold under which b-values are considered to be b0s. + [Default: 20] + * Note. We would expect to find at least one b-value in the + range [0, b0_threshold]. To skip this check, use --skip_b0_check. + --skip_b0_check By default, we supervise that at least one b0 exists in your data + (i.e. b-values below the default --b0_threshold). Use this option to + allow continuing even if the minimum b-value is suspiciously high. + If no b-value is found below the threshold, the script will continue + with your minimal b-value as new --b0_threshold. + Use with care, and only if you understand your data. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +References: [1] Tournier et al. NeuroImage 2007 diff --git a/scripts/.hidden/scil_get_version.py.help b/scripts/.hidden/scil_get_version.py.help new file mode 100644 index 000000000..062cc2c1a --- /dev/null +++ b/scripts/.hidden/scil_get_version.py.help @@ -0,0 +1,16 @@ +usage: scil_get_version.py [-h] [--show_dependencies] + [-v [{DEBUG,INFO,WARNING}]] + +Give you information about your current scilpy installation. +This is useful for non-developers to give you the information +needed to reproduce your results, or to help debugging. + +If you are experiencing a bug, please run this script and +send the output to the scilpy developers. + +options: + -h, --help show this help message and exit + --show_dependencies Show the dependencies of scilpy. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_gradients_apply_transform.py.help b/scripts/.hidden/scil_gradients_apply_transform.py.help new file mode 100644 index 000000000..82a99e372 --- /dev/null +++ b/scripts/.hidden/scil_gradients_apply_transform.py.help @@ -0,0 +1,21 @@ +usage: scil_gradients_apply_transform.py [-h] [--inverse] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bvecs in_transfo out_bvecs + +Transform bvecs using an affine/rigid transformation. + +Formerly: scil_apply_transform_to_bvecs.py. + +positional arguments: + in_bvecs Path of the bvec file, in FSL format + in_transfo Path of the file containing the 4x4 + transformation, matrix (.txt, .npy or .mat). + out_bvecs Output filename of the transformed bvecs. + +options: + -h, --help show this help message and exit + --inverse Apply the inverse transformation. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_gradients_convert.py.help b/scripts/.hidden/scil_gradients_convert.py.help new file mode 100644 index 000000000..ffec51279 --- /dev/null +++ b/scripts/.hidden/scil_gradients_convert.py.help @@ -0,0 +1,22 @@ +usage: scil_gradients_convert.py [-h] (--input_fsl | --input_mrtrix) [-f] + [-v [{DEBUG,INFO,WARNING}]] + GRADIENT_FILE(S) [GRADIENT_FILE(S) ...] + output + +Script to convert gradient tables between FSL and MRtrix formats. + +Formerly: scil_convert_gradients_mrtrix_to_fsl.py or +scil_convert_gradients_fsl_to_mrtrix.py + +positional arguments: + GRADIENT_FILE(S) Path(s) to the gradient file(s). Either FSL (.bval, .bvec) or MRtrix (.b). + output Basename of output without extension. Extension(s) will be added automatically (.b for MRtrix, .bval/.bvec for FSL. + +options: + -h, --help show this help message and exit + --input_fsl FSL format. + --input_mrtrix MRtrix format. + -f Force overwriting of the output files. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_gradients_generate_sampling.py.help b/scripts/.hidden/scil_gradients_generate_sampling.py.help new file mode 100644 index 000000000..e079dd95d --- /dev/null +++ b/scripts/.hidden/scil_gradients_generate_sampling.py.help @@ -0,0 +1,67 @@ +usage: scil_gradients_generate_sampling.py [-h] [--eddy] [--duty] + [--no_b0_start NO_B0_START | --b0_every B0_EVERY] + [--b0_end] [--b0_value B0_VALUE] + [--b0_philips] + (--bvals bvals [bvals ...] | --b_lin_max B_LIN_MAX | --q_lin_max Q_LIN_MAX) + (--fsl | --mrtrix) + [-v [{DEBUG,INFO,WARNING}]] [-f] + nb_samples_per_shell + [nb_samples_per_shell ...] + out_basename + +Generate multi-shell gradient sampling with various processing options. Helps +accelerate gradients, optimize duty cycle and avoid artefacts. + +Multi-shell gradient sampling is generated as in [1]. The bvecs are then +flipped to maximize spread for eddy current correction, b0s are interleaved at +equal spacing and the non-b0 samples are finally shuffled to minimize the total +diffusion gradient amplitude over a few TR. + +Formerly: scil_generate_gradient_sampling.py + +positional arguments: + nb_samples_per_shell Number of samples on each non b0 shell. + If multishell, provide a number per shell. + out_basename Gradient sampling output basename (don't include extension). + Please add options --fsl and/or --mrtrix below. + +options: + -h, --help show this help message and exit + --eddy If set, we apply eddy optimization. + B-vectors are flipped to be well spread without symmetry. + --duty If set, we apply duty cycle optimization. + B-vectors are shuffled to reduce consecutive colinearity in the samples. [False] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +b0 acquisitions: + Default if you add no option is to have a b0 at the start. + + --no_b0_start NO_B0_START + If set, do not add a b0 at the beginning. + --b0_every B0_EVERY Interleave a b0 every n=b0_every values. Starts after the first b0 + (cannot be used with --no_b0_start). Must be an integer >= 1. + --b0_end If set, adds a b0 as last sample. + --b0_value B0_VALUE b-value of the b0s. [0.0] + --b0_philips If set, replaces values of b0s bvecs by existing bvecs for Philips handling. + +Non-b0 acquisitions: + --bvals bvals [bvals ...] + bval of each non-b0 shell. + --b_lin_max B_LIN_MAX + b-max for linear bval distribution in *b*. + --q_lin_max Q_LIN_MAX + b-max for linear bval distribution in *q*; + the square root of b-values will be linearly distributed.. + +Save as: + --fsl Save in FSL format (.bvec/.bval). + --mrtrix Save in MRtrix format (.b). + +References: [1] Emmanuel Caruyer, Christophe Lenglet, Guillermo Sapiro, +Rachid Deriche. Design of multishell gradient sampling with uniform coverage +in diffusion MRI. Magnetic Resonance in Medicine, Wiley, 2013, 69 (6), +pp. 1534-1540. + diff --git a/scripts/.hidden/scil_gradients_modify_axes.py.help b/scripts/.hidden/scil_gradients_modify_axes.py.help new file mode 100644 index 000000000..71b163f23 --- /dev/null +++ b/scripts/.hidden/scil_gradients_modify_axes.py.help @@ -0,0 +1,28 @@ +usage: scil_gradients_modify_axes.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] + in_gradient_sampling_file + out_gradient_sampling_file + {1,2,3,-1,-2,-3} {1,2,3,-1,-2,-3} + {1,2,3,-1,-2,-3} + +Flip (ex, x --> -x) or swap (ex, x <-> y) chosen axes of the gradient sampling +matrix. Result will be saved in the same format as input gradient sampling +file. + +Formerly: scil_flip_gradients.py or scil_swap_gradient_axis.py + +positional arguments: + in_gradient_sampling_file + Path to gradient sampling file. (.bvec or .b) + out_gradient_sampling_file + Where to save the flipped gradient sampling file.Extension (.bvec or .b) must be the same as in_gradient_sampling_file + {1,2,3,-1,-2,-3} The final order of the axes, compared to original order: x=1 y=2 z=3. + Ex: to only flip y: 1 -2 3. + Ex: to only swap x and y: 2 1 3. + Ex: to first flip x, then permute all three axes: 3 -1 2. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_gradients_round_bvals.py.help b/scripts/.hidden/scil_gradients_round_bvals.py.help new file mode 100644 index 000000000..030942a65 --- /dev/null +++ b/scripts/.hidden/scil_gradients_round_bvals.py.help @@ -0,0 +1,33 @@ +usage: scil_gradients_round_bvals.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bval shells [shells ...] out_bval + tolerance + +Select b-values on specific b-value shells. + +With the --tolerance argument, this is useful for sampling schemes where +b-values of a shell are not all identical. Adjust the tolerance to vary the +accepted interval around the targetted b-value. + +For example, a b-value of 2000 and a tolerance of 20 will select all b-values +between [1980, 2020] and round them to the value of 2000. + +>> scil_gradients_round_bvals.py bvals 0 1000 2000 newbvals --tolerance 20 + +Formerly: scil_resample_bvals.py + +positional arguments: + in_bval The b-values in FSL format. + shells The list of expected shells. For example 0 1000 2000. + All b-values in the b_val file should correspond to one given shell (up to the tolerance). + out_bval The name of the output b-values. + tolerance The tolerated gap between the b-values to extract and the + actual b-values. Expecting an integer value. Comparison is + strict: a b-value of 1010 with a tolerance of 10 is NOT + included in shell 1000. Suggestion: 20. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_gradients_validate_correct.py.help b/scripts/.hidden/scil_gradients_validate_correct.py.help new file mode 100644 index 000000000..19f2ce9d1 --- /dev/null +++ b/scripts/.hidden/scil_gradients_validate_correct.py.help @@ -0,0 +1,48 @@ +usage: scil_gradients_validate_correct.py [-h] [--mask MASK] + [--fa_threshold FA_THRESHOLD] + [--column_wise] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bvec in_peaks in_FA out_bvec + +Detect sign flips and/or axes swaps in the gradients table from a fiber +coherence index [1]. The script takes as input the principal direction(s) +at each voxel, the b-vectors and the fractional anisotropy map and outputs +a corrected b-vectors file. + +A typical pipeline could be: +>>> scil_dti_metrics.py dwi.nii.gz bval bvec --not_all --fa fa.nii.gz + --evecs peaks.nii.gz +>>> scil_gradients_validate_correct.py bvec peaks_v1.nii.gz fa.nii.gz bvec_corr + +Note that peaks_v1.nii.gz is the file containing the direction associated +to the highest eigenvalue at each voxel. + +It is also possible to use a file containing multiple principal directions per +voxel, given that they are sorted by decreasing amplitude. In that case, the +first direction (with the highest amplitude) will be chosen for validation. +Only 4D data is supported, so the directions must be stored in a single +dimension. For example, peaks.nii.gz from scil_fodf_metrics.py could be used. + +Formerly: scil_validate_and_correct_bvecs.py + +positional arguments: + in_bvec Path to bvec file. + in_peaks Path to peaks file. + in_FA Path to the fractional anisotropy file. + out_bvec Path to corrected bvec file (FSL format). + +options: + -h, --help show this help message and exit + --mask MASK Path to an optional mask. If set, FA and Peaks will only be used inside the mask. + --fa_threshold FA_THRESHOLD + FA threshold. Only voxels with FA higher than fa_threshold will be considered. [0.2] + --column_wise Specify if input peaks are column-wise (..., 3, N) instead of row-wise (..., N, 3). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +[1] Schilling KG, Yeh FC, Nath V, Hansen C, Williams O, Resnick S, Anderson AW, +Landman BA. A fiber coherence index for quality control of B-table orientation +in diffusion MRI scans. Magn Reson Imaging. 2019 May;58:82-89. +doi: 10.1016/j.mri.2019.01.018. diff --git a/scripts/.hidden/scil_gradients_validate_correct_eddy.py.help b/scripts/.hidden/scil_gradients_validate_correct_eddy.py.help new file mode 100644 index 000000000..953b3d527 --- /dev/null +++ b/scripts/.hidden/scil_gradients_validate_correct_eddy.py.help @@ -0,0 +1,25 @@ +usage: scil_gradients_validate_correct_eddy.py [-h] + [-v [{DEBUG,INFO,WARNING}]] + [-f] + in_bvec in_bval nb_dirs + out_bvec out_bval + +Validate and correct gradients from eddy outputs +With full AP-PA eddy outputs a full bvec bval (2x nb of dirs and bval) +that doesnt fit with the output dwi (1x nb of dir) + +Formerly: scil_validate_and_correct_eddy_gradients.py + +positional arguments: + in_bvec In bvec file. + in_bval In bval file. + nb_dirs Number of directions per DWI. + out_bvec Out bvec file. + out_bval Out bval file. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_header_print_info.py.help b/scripts/.hidden/scil_header_print_info.py.help new file mode 100644 index 000000000..baa2ca705 --- /dev/null +++ b/scripts/.hidden/scil_header_print_info.py.help @@ -0,0 +1,20 @@ +usage: scil_header_print_info.py [-h] [--keys KEYS [KEYS ...]] + [--print_affine] [-v [{DEBUG,INFO,WARNING}]] + in_file + +Print the raw header from the provided file or only the specified keys. +Support trk, nii and mgz files. + +Formerly: scil_print_header.py + +positional arguments: + in_file Input file (trk, nii and mgz). + +options: + -h, --help show this help message and exit + --keys KEYS [KEYS ...] + Print only the specified keys. + --print_affine Print nibabel's affine. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_header_validate_compatibility.py.help b/scripts/.hidden/scil_header_validate_compatibility.py.help new file mode 100644 index 000000000..28dc2ae4c --- /dev/null +++ b/scripts/.hidden/scil_header_validate_compatibility.py.help @@ -0,0 +1,22 @@ +usage: scil_header_validate_compatibility.py [-h] [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] + in_files [in_files ...] + +Will compare all input files against the first one for the compatibility +of their spatial attributes. + +Spatial attributes are: affine, dimensions, voxel sizes and voxel order. + +Formerly: scil_verify_space_attributes_compatibility.py + +positional arguments: + in_files List of file to compare (trk, tck and nii/nii.gz). + +options: + -h, --help show this help message and exit + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_json_convert_entries_to_xlsx.py.help b/scripts/.hidden/scil_json_convert_entries_to_xlsx.py.help new file mode 100644 index 000000000..552987b90 --- /dev/null +++ b/scripts/.hidden/scil_json_convert_entries_to_xlsx.py.help @@ -0,0 +1,29 @@ +usage: scil_json_convert_entries_to_xlsx.py [-h] [--no_sort_subs] + [--no_sort_bundles] + [--ignore_bundles FILE] + [--stats_over_population] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_json out_xlsx + +Convert a final aggregated json file to an Excel spreadsheet. +Typically used during the tractometry pipeline. + +Formerly: scil_convert_json_to_xlsx.py + +positional arguments: + in_json File containing the json stats (.json). + out_xlsx Output Excel file for the stats (.xlsx). + +options: + -h, --help show this help message and exit + --no_sort_subs If set, subjects won't be sorted alphabetically. + --no_sort_bundles If set, bundles won't be sorted alphabetically. + --ignore_bundles FILE + Path to a text file containing a list of bundles to ignore (.txt). + One bundle, corresponding to keys in the json, per line. + --stats_over_population + If set, consider the input stats to be over an entire population and not subject-based. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_json_harmonize_entries.py.help b/scripts/.hidden/scil_json_harmonize_entries.py.help new file mode 100644 index 000000000..42dc105d6 --- /dev/null +++ b/scripts/.hidden/scil_json_harmonize_entries.py.help @@ -0,0 +1,31 @@ +usage: scil_json_harmonize_entries.py [-h] [--indent INDENT] [--sort_keys] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_file out_file + + This script will harmonize a json file by adding missing keys and values +that differs between the different layers of the dictionary. + +This is used only (for now) in Aggregate_All_* portion of tractometry-flow, +to counter the problem of missing bundles/metrics/lesions between subjects. + +The most common use case is when specific subjects have missing bundles +which will cause a panda array to be incomplete, and thus crash. Finding out +the union of all bundles/metrics/lesions will allow to create a complete json +(but with NaN for missing values). + +Formerly: scil_harmonize_json.py + +positional arguments: + in_file Input file (json). + out_file Output file (json). + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_json_merge_entries.py.help b/scripts/.hidden/scil_json_merge_entries.py.help new file mode 100644 index 000000000..704c245d7 --- /dev/null +++ b/scripts/.hidden/scil_json_merge_entries.py.help @@ -0,0 +1,55 @@ +usage: scil_json_merge_entries.py [-h] [--keep_separate] [--no_list] + [--add_parent_key ADD_PARENT_KEY] + [--remove_parent_key] [--recursive] + [--average_last_layer] [--indent INDENT] + [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] + [-f] + in_json [in_json ...] out_json + + Merge multiple json files into a single one. +Typically used during the tractometry pipeline. + +Without option it will simply merge all entries at the top level, the top +level must not have any conflicting keys. + +--keep_separate option will add a parent for each file, its basename will +become the key. + +--no_list option will merge all entries at the top level, if there is a +conflict the lowest level will be extended with the new values (if list) or +added (if value) + +--add_parent_key option will add a parent key before merging all entries. + +--remove_parent_key option will remove the parent key before merging all +entries. + +--recursive option will merge all entries (scalar) at the lowest layers as a +list. + +--average_last_layer option will average all entries (scalar) at the lowest +layers, but instead of creating a list it creates a mean/std level. + +Formerly: scil_merge_json.py + +positional arguments: + in_json List of json files to merge (.json). + out_json Output json file (.json). + +options: + -h, --help show this help message and exit + --keep_separate Merge entries as separate keys based on filename. + --no_list Merge entries knowing there is no conflict. + --add_parent_key ADD_PARENT_KEY + Merge all entries under a single parent. + --remove_parent_key Merge ignoring parent key (e.g for population). + --recursive Merge all entries at the lowest layers. + --average_last_layer Average all entries at the lowest layers. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_labels_combine.py.help b/scripts/.hidden/scil_labels_combine.py.help new file mode 100644 index 000000000..3476d2a3a --- /dev/null +++ b/scripts/.hidden/scil_labels_combine.py.help @@ -0,0 +1,48 @@ +usage: scil_labels_combine.py [-h] --volume_ids VOLUME_IDS [VOLUME_IDS ...] + [--out_labels_ids OUT_LABELS_IDS [OUT_LABELS_IDS ...] + | --unique | --group_in_m] + [--background BACKGROUND] [--merge_groups] + [-v [{DEBUG,INFO,WARNING}]] [-f] + output + +Script to combine labels from multiple volumes. If there is overlap, it will +overwrite them based on the input order. + + >>> scil_labels_combine.py out_labels.nii.gz + --volume_ids animal_labels.nii 20 + --volume_ids DKT_labels.nii.gz 44 53 + --out_labels_indices 20 44 53 + >>> scil_labels_combine.py slf_labels.nii.gz + --volume_ids a2009s_aseg.nii.gz all + --volume_ids clean/s1__DKT.nii.gz 1028 2028 + +Formerly: scil_combine_labels.py. + +positional arguments: + output Combined labels volume output. + +options: + -h, --help show this help message and exit + --volume_ids VOLUME_IDS [VOLUME_IDS ...] + List of volumes directly followed by their labels: + --volume_ids atlasA id1a id2a + --volume_ids atlasB id1b id2b ... + "all" can be used instead of id numbers. + --out_labels_ids OUT_LABELS_IDS [OUT_LABELS_IDS ...] + List of labels indices for output images. + --unique If set, output id with unique labels, excluding first background value. + --group_in_m Add (x * 10 000) to each volume labels, where x is the input volume order number. + --background BACKGROUND + Background id, excluded from output [0], + the value is used as output background value. + --merge_groups Each group from the --volume_ids option will be merged as a single labels. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + + References: + [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G., + Evans A.C. and Descoteaux M. OHBM 2019. + Surface integration for connectome analysis in age prediction. + diff --git a/scripts/.hidden/scil_labels_dilate.py.help b/scripts/.hidden/scil_labels_dilate.py.help new file mode 100644 index 000000000..ccfb81a56 --- /dev/null +++ b/scripts/.hidden/scil_labels_dilate.py.help @@ -0,0 +1,51 @@ +usage: scil_labels_dilate.py [-h] [--distance DISTANCE] + [--labels_to_dilate LABELS_TO_DILATE [LABELS_TO_DILATE ...]] + [--labels_to_fill LABELS_TO_FILL [LABELS_TO_FILL ...]] + [--labels_not_to_dilate LABELS_NOT_TO_DILATE [LABELS_NOT_TO_DILATE ...]] + [--mask MASK] [--processes NBR] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_file out_file + +Dilate regions (with or without masking) from a labeled volume: +- "label_to_dilate" are regions that will dilate over + "label_to_fill" if close enough to it ("distance"). +- "label_to_dilate", by default (None) will be all + non-"label_to_fill" and non-"label_not_to_dilate". +- "label_not_to_dilate" will not be changed, but will not dilate. +- "mask" is where the dilation is allowed (constrained) + in addition to "background_label" (logical AND) + +>>> scil_labels_dilate.py wmparc_t1.nii.gz wmparc_dil.nii.gz \ + --label_to_fill 0 5001 5002 \ + --label_not_to_dilate 4 43 10 11 12 49 50 51 + +Formerly: scil_dilate_labels.py + +positional arguments: + in_file Path of the volume (nii or nii.gz). + out_file Output filename of the dilated labels. + +options: + -h, --help show this help message and exit + --distance DISTANCE Maximal distance to dilate (in mm) [2.0]. + --labels_to_dilate LABELS_TO_DILATE [LABELS_TO_DILATE ...] + Label list to dilate. By default it dilates all + labels not in labels_to_fill nor in labels_not_to_dilate. + --labels_to_fill LABELS_TO_FILL [LABELS_TO_FILL ...] + Background id / labels to be filled [[0]], + the first one is given as output background value. + --labels_not_to_dilate LABELS_NOT_TO_DILATE [LABELS_NOT_TO_DILATE ...] + Label list not to dilate. + --mask MASK Only dilate values inside the mask. + --processes NBR Number of sub-processes to start. + Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + + References: + [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G., + Evans A.C. and Descoteaux M. OHBM 2019. + Surface integration for connectome analysis in age prediction. + diff --git a/scripts/.hidden/scil_labels_remove.py.help b/scripts/.hidden/scil_labels_remove.py.help new file mode 100644 index 000000000..68a09bcaf --- /dev/null +++ b/scripts/.hidden/scil_labels_remove.py.help @@ -0,0 +1,31 @@ +usage: scil_labels_remove.py [-h] -i INDICES [INDICES ...] + [--background BACKGROUND] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_labels out_labels + +Script to remove specific labels from an atlas volume. + + >>> scil_labels_remove.py DKT_labels.nii out_labels.nii.gz -i 5001 5002 + +Formerly: scil_remove_labels.py + +positional arguments: + in_labels Input labels volume. + out_labels Output labels volume. + +options: + -h, --help show this help message and exit + -i INDICES [INDICES ...], --indices INDICES [INDICES ...] + List of labels indices to remove. + --background BACKGROUND + Integer used for removed labels [0]. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + + References: + [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G., + Evans A.C. and Descoteaux M. OHBM 2019. + Surface integration for connectome analysis in age prediction. + diff --git a/scripts/.hidden/scil_labels_split_volume_by_ids.py.help b/scripts/.hidden/scil_labels_split_volume_by_ids.py.help new file mode 100644 index 000000000..a563eb3e4 --- /dev/null +++ b/scripts/.hidden/scil_labels_split_volume_by_ids.py.help @@ -0,0 +1,32 @@ +usage: scil_labels_split_volume_by_ids.py [-h] [--out_dir OUT_DIR] + [--out_prefix OUT_PREFIX] + [-r min max min max] + [--background BACKGROUND] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_labels + +Split a label image into multiple images where the name of the output images +is the id of the label (ex. 35.nii.gz, 36.nii.gz, ...). If the --range option +is not provided, all labels of the image are extracted. The label 0 is +considered as the background and is ignored. + +IMPORTANT: your label image must be of an integer type. + +Formerly: scil_split_volume_by_ids.py + +positional arguments: + in_labels Path of the input label file, in a format supported by Nibabel. + +options: + -h, --help show this help message and exit + --out_dir OUT_DIR Put all ouptput images in a specific directory. + --out_prefix OUT_PREFIX + Prefix to be used for each output image. + -r min max min max, --range min max min max + Specifies a subset of labels to split, formatted as min max. Ex: -r 3 5 will give files _3, _4, _5. + --background BACKGROUND + Background value. Will not be saved as a separate label. Default: 0. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_labels_split_volume_from_lut.py.help b/scripts/.hidden/scil_labels_split_volume_from_lut.py.help new file mode 100644 index 000000000..ee43eb4be --- /dev/null +++ b/scripts/.hidden/scil_labels_split_volume_from_lut.py.help @@ -0,0 +1,31 @@ +usage: scil_labels_split_volume_from_lut.py [-h] [--out_dir OUT_DIR] + [--out_prefix OUT_PREFIX] + (--scilpy_lut {freesurfer_subcortical,dk_aggregate_structures,freesurfer_desikan_killiany} | --custom_lut CUSTOM_LUT) + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_label + +Split a label image into multiple images where the name of the output images +is taken from a lookup table (ex: left-lateral-occipital.nii.gz, +right-thalamus.nii.gz, ...). Only the labels included in the lookup table +are extracted. + +IMPORTANT: your label image must be of an integer type. + +Formerly: scil_split_volume_by_labels.py + +positional arguments: + in_label Path of the input label file, in a format supported by Nibabel. + +options: + -h, --help show this help message and exit + --out_dir OUT_DIR Put all ouptput images in a specific directory. + --out_prefix OUT_PREFIX + Prefix to be used for each output image. + --scilpy_lut {freesurfer_subcortical,dk_aggregate_structures,freesurfer_desikan_killiany} + Lookup table, in the file scilpy/data/LUT, used to name the output files. + --custom_lut CUSTOM_LUT + Path of the lookup table file, used to name the output files. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_lesions_info.py.help b/scripts/.hidden/scil_lesions_info.py.help new file mode 100644 index 000000000..70b783114 --- /dev/null +++ b/scripts/.hidden/scil_lesions_info.py.help @@ -0,0 +1,50 @@ +usage: scil_lesions_info.py [-h] + [--bundle BUNDLE | --bundle_mask BUNDLE_MASK | --bundle_labels_map BUNDLE_LABELS_MAP] + [--min_lesion_vol MIN_LESION_VOL] + [--out_lesion_atlas FILE] + [--out_lesion_stats FILE] + [--out_streamlines_stats FILE] [--indent INDENT] + [--sort_keys] [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_lesion out_json + +This script will output informations about lesion load in bundle(s). +The input can either be streamlines, binary bundle map, or a bundle voxel +label map. + +To be considered a valid lesion, the lesion volume must be at least +min_lesion_vol mm3. This avoid the detection of thousand of single voxel +lesions if an automatic lesion segmentation tool is used. + +Formerly: scil_analyse_lesions_load.py + +positional arguments: + in_lesion Binary mask of the lesion(s) (.nii.gz). + out_json Output file for lesion information (.json). + +options: + -h, --help show this help message and exit + --bundle BUNDLE Path of the bundle file (.trk). + --bundle_mask BUNDLE_MASK + Path of the bundle binary mask (.nii.gz). + --bundle_labels_map BUNDLE_LABELS_MAP + Path of the bundle labels map (.nii.gz). + --min_lesion_vol MIN_LESION_VOL + Minimum lesion volume in mm3 [7]. + --out_lesion_atlas FILE + Save the labelized lesion(s) map (.nii.gz). + --out_lesion_stats FILE + Save the lesion-wise volume measure (.json). + --out_streamlines_stats FILE + Save the lesion-wise streamline count (.json). + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_mti_adjust_B1_header.py.help b/scripts/.hidden/scil_mti_adjust_B1_header.py.help new file mode 100644 index 000000000..8ca74d463 --- /dev/null +++ b/scripts/.hidden/scil_mti_adjust_B1_header.py.help @@ -0,0 +1,17 @@ +usage: scil_mti_adjust_B1_header.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] + in_B1_map out_B1_map in_B1_json + +Correct B1 map header problem, by applying the scaling (slope) and setting +the slope to 1. + +positional arguments: + in_B1_map Path to input B1 map file. + out_B1_map Path to output B1 map file. + in_B1_json Json file of the B1 map. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_mti_maps_MT.py.help b/scripts/.hidden/scil_mti_maps_MT.py.help new file mode 100644 index 000000000..0745e3fe3 --- /dev/null +++ b/scripts/.hidden/scil_mti_maps_MT.py.help @@ -0,0 +1,150 @@ +usage: scil_mti_maps_MT.py [-h] [--out_prefix OUT_PREFIX] [--mask MASK] + --in_positive IN_POSITIVE [IN_POSITIVE ...] + --in_negative IN_NEGATIVE [IN_NEGATIVE ...] + --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...] + [--in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]] + [--extended] [--filtering] + [--in_jsons PD_json T1_json | --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time] + [--in_B1_map IN_B1_MAP] + [--B1_correction_method {empiric,model_based}] + [--B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]] + [--B1_nominal B1_NOMINAL] + [--B1_smooth_dims B1_SMOOTH_DIMS] + [-v [{DEBUG,INFO,WARNING}]] [-f] + out_dir + +This script computes two myelin indices maps from the Magnetization Transfer +(MT) images. +Magnetization Transfer is a contrast mechanism in tissue resulting from the +proton exchange between non-aqueous protons (from macromolecules and their +closely associated water molecules, the "bound" pool) and protons in the free +water pool called aqueous protons. This exchange attenuates the MRI signal, +introducing microstructure-dependent contrast. MT's effect reflects the +relative density of macromolecules such as proteins and lipids, it has been +associated with myelin content in white matter of the brain. + +Different contrasts can be done with an off-resonance pulse to saturating the +protons on non-aqueous molecules a frequency irradiation. The MT maps are +obtained using three or four contrasts: a single positive frequency image +and/or a single negative frequency image, and two unsaturated contrasts as +reference. These two references should be acquired with predominant PD +(proton density) and T1 weighting at different excitation flip angles +(a_PD, a_T1) and repetition times (TR_PD, TR_T1). + +Input Data recommendation: + - it is recommended to use dcm2niix (v1.0.20200331) to convert data + https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331 + - dcm2niix conversion will create all echo files for each contrast and + corresponding json files + - all input must have a matching json file with the same filename + - all contrasts must have a same number of echoes and coregistered + between them before running the script. + - Mask must be coregistered to the echo images + - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/) + +The output consists of a MT_native_maps folder containing the 2 myelin maps: + - MTR.nii.gz : Magnetization Transfer Ratio map + The MT ratio is a measure reflecting the amount of bound protons. + - MTsat.nii.gz : Magnetization Transfer saturation map + The MT saturation is a pseudo-quantitative maps representing + the signal change between the bound and free water pools. + +As an option, the Complementary_maps folder contains the following images: + - positive.nii.gz : single positive frequency image + - negative.nii.gz : single negative frequency image + - mtoff_PD.nii.gz : unsaturated proton density weighted image + - mtoff_T1.nii.gz : unsaturated T1 weighted image + - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image + - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image + - R1app.nii.gz : Apparent R1 map computed for MTsat. + - B1_map.nii.gz : B1 map after correction and smoothing (if given). + +The final maps from MT_native_maps can be corrected for B1+ field + inhomogeneity, using either an empiric method with + --in_B1_map option, suffix *B1_corrected is added for each map. + --B1_correction_method empiric + or a model-based method with + --in_B1_map option, suffix *B1_corrected is added for each map. + --B1_correction_method model_based + --B1_fitValues 1 or 2 .mat files, obtained externally from + https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction, + and given in this order: positive frequency saturation, negative frequency + saturation. +For both methods, the nominal value of the B1 map can be set with + --B1_nominal value + +>>> scil_mti_maps_MT.py path/to/output/directory + --in_mtoff_pd path/to/echo*mtoff.nii.gz + --in_positive path/to/echo*pos.nii.gz --in_negative path/to/echo*neg.nii.gz + --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz + --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json + +By default, the script uses all the echoes available in the input folder. +If you want to use a single echo, replace the * with the specific number of +the echo. + +positional arguments: + out_dir Path to output folder. + +options: + -h, --help show this help message and exit + --out_prefix OUT_PREFIX + Prefix to be used for each output image. + --mask MASK Path to the binary brain mask. + --extended If set, outputs the folder Complementary_maps. + --filtering Gaussian filtering to remove Gibbs ringing. Not recommended. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Contrast maps: + Path to echoes corresponding to contrast images. All constrasts must have + the same number of echoes and coregistered between them. Use * to include all echoes. + The in_mtoff_pd input and at least one of in_positive or in_negative are required. + + --in_positive IN_POSITIVE [IN_POSITIVE ...] + Path to all echoes corresponding to the positive frequency + saturation pulse. + --in_negative IN_NEGATIVE [IN_NEGATIVE ...] + Path to all echoes corresponding to the negative frequency + saturation pulse. + --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...] + Path to all echoes corresponding to the predominant PD + (proton density) weighting images with no saturation pulse. + --in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...] + Path to all echoes corresponding to the predominant T1 + weighting images with no saturation pulse. This one is optional, + since it is only needed for the calculation of MTsat. + Acquisition parameters should also be set with this image. + +Acquisition parameters: + Acquisition parameters required for MTsat and ihMTsat calculation. + These are the excitation flip angles (a_PD, a_T1), in DEGREES, and + repetition times (TR_PD, TR_T1) of the PD and T1 images, in SECONDS. + Can be given through json files (--in_jsons) or directly (--in_acq_parameters). + + --in_jsons PD_json T1_json + Path to MToff PD json file and MToff T1 json file, in that order. + The acquisition parameters will be extracted from these files. + Must come from a Philips acquisition, otherwise, use in_acq_parameters. + --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time + Acquisition parameters in that order: flip angle of mtoff_PD, + flip angle of mtoff_T1, repetition time of mtoff_PD, + repetition time of mtoff_T1 + +B1 correction: + --in_B1_map IN_B1_MAP + Path to B1 coregister map to MT contrasts. + --B1_correction_method {empiric,model_based} + Choice of B1 correction method. Choose between empiric and model-based. + Note that the model-based method requires a B1 fitvalues file. + Both method will only correct the saturation measures. [empiric] + --B1_fitvalues B1_FITVALUES [B1_FITVALUES ...] + Path to B1 fitvalues files obtained externally. Should be one .mat + file per input MT-on image, given in this specific order: + positive frequency saturation, negative frequency saturation. + --B1_nominal B1_NOMINAL + Nominal value for the B1 map. For Philips, should be 100. [100] + --B1_smooth_dims B1_SMOOTH_DIMS + Dimension of the squared window used for B1 smoothing, in number of voxels. [5] diff --git a/scripts/.hidden/scil_mti_maps_ihMT.py.help b/scripts/.hidden/scil_mti_maps_ihMT.py.help new file mode 100644 index 000000000..a0de4d6b9 --- /dev/null +++ b/scripts/.hidden/scil_mti_maps_ihMT.py.help @@ -0,0 +1,164 @@ +usage: scil_mti_maps_ihMT.py [-h] [--out_prefix OUT_PREFIX] [--mask MASK] + --in_altnp IN_ALTNP [IN_ALTNP ...] --in_altpn + IN_ALTPN [IN_ALTPN ...] --in_negative IN_NEGATIVE + [IN_NEGATIVE ...] --in_positive IN_POSITIVE + [IN_POSITIVE ...] --in_mtoff_pd IN_MTOFF_PD + [IN_MTOFF_PD ...] + [--in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]] + [--extended] [--filtering] + [--in_jsons PD_json T1_json | --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time] + [--in_B1_map IN_B1_MAP] + [--B1_correction_method {empiric,model_based}] + [--B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]] + [--B1_nominal B1_NOMINAL] + [--B1_smooth_dims B1_SMOOTH_DIMS] + [-v [{DEBUG,INFO,WARNING}]] [-f] + out_dir + +This script computes four myelin indices maps from the Magnetization Transfer +(MT) and inhomogeneous Magnetization Transfer (ihMT) images. Magnetization +Transfer is a contrast mechanism in tissue resulting from the proton exchange +between non-aqueous protons (from macromolecules and their closely associated +water molecules, the "bound" pool) and protons in the free water pool called +aqueous protons. This exchange attenuates the MRI signal, introducing +microstructure-dependent contrast. MT's effect reflects the relative density +of macromolecules such as proteins and lipids, it has been associated with +myelin content in white matter of the brain. + +Different contrasts can be done with an off-resonance pulse prior to image +acquisition (a prepulse), saturating the protons on non-aqueous molecules, +by applying different frequency irradiation. The two MT maps and two ihMT maps +are obtained using six contrasts: single positive frequency image, single +negative frequency image, dual alternating positive/negative frequency image, +dual alternating negative/positive frequency image (saturated images); +and two unsaturated contrasts as reference. These two references should be +acquired with predominant PD (proton density) and T1 weighting at different +excitation flip angles (a_PD, a_T1) and repetition times (TR_PD, TR_T1). + +Input Data recommendation: + - it is recommended to use dcm2niix (v1.0.20200331) to convert data + https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331 + - dcm2niix conversion will create all echo files for each contrast and + corresponding json files + - all contrasts must have a same number of echoes and coregistered + between them before running the script + - Mask must be coregistered to the echo images + - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/) + +The output consists of a ihMT_native_maps folder containing the 4 myelin maps: + - MTR.nii.gz : Magnetization Transfer Ratio map + - ihMTR.nii.gz : inhomogeneous Magnetization Transfer Ratio map + The (ih)MT ratio is a measure reflecting the amount of bound protons. + - MTsat.nii.gz : Magnetization Transfer saturation map + - ihMTsat.nii.gz : inhomogeneous Magnetization Transfer saturation map + The (ih)MT saturation is a pseudo-quantitative maps representing + the signal change between the bound and free water pools. + +As an option, the Complementary_maps folder contains the following images: + - altnp.nii.gz : dual alternating negative and positive frequency image + - altpn.nii.gz : dual alternating positive and negative frequency image + - positive.nii.gz : single positive frequency image + - negative.nii.gz : single negative frequency image + - mtoff_PD.nii.gz : unsaturated proton density weighted image + - mtoff_T1.nii.gz : unsaturated T1 weighted image + - MTsat_d.nii.gz : MTsat computed from the mean dual frequency images + - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image + - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image + - R1app.nii.gz : Apparent R1 map computed for MTsat. + - B1_map.nii.gz : B1 map after correction and smoothing (if given). + +The final maps from ihMT_native_maps can be corrected for B1+ field + inhomogeneity, using either an empiric method with + --in_B1_map option, suffix *B1_corrected is added for each map. + --B1_correction_method empiric + or a model-based method with + --in_B1_map option, suffix *B1_corrected is added for each map. + --B1_correction_method model_based + --B1_fitValues 3 .mat files, obtained externally from + https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction, + and given in this order: positive frequency saturation, negative frequency + saturation, dual frequency saturation. +For both methods, the nominal value of the B1 map can be set with + --B1_nominal value + +>>> scil_mti_maps_ihMT.py path/to/output/directory + --in_altnp path/to/echo*altnp.nii.gz --in_altpn path/to/echo*altpn.nii.gz + --in_mtoff_pd path/to/echo*mtoff.nii.gz + --in_negative path/to/echo*neg.nii.gz --in_positive path/to/echo*pos.nii.gz + --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz + --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json + +By default, the script uses all the echoes available in the input folder. +If you want to use a single echo, replace the * with the specific number of +the echo. + +positional arguments: + out_dir Path to output folder. + +options: + -h, --help show this help message and exit + --out_prefix OUT_PREFIX + Prefix to be used for each output image. + --mask MASK Path to the binary brain mask. + --extended If set, outputs the folder Complementary_maps. + --filtering Gaussian filtering to remove Gibbs ringing. Not recommended. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Contrast maps: + Path to echoes corresponding to contrast images. All constrasts must have + the same number of echoes and coregistered between them. Use * to include all echoes. + + --in_altnp IN_ALTNP [IN_ALTNP ...] + Path to all echoes corresponding to the alternation of + negative and positive frequency saturation pulse. + --in_altpn IN_ALTPN [IN_ALTPN ...] + Path to all echoes corresponding to the alternation of + positive and negative frequency saturation pulse. + --in_negative IN_NEGATIVE [IN_NEGATIVE ...] + Path to all echoes corresponding to the negative frequency + saturation pulse. + --in_positive IN_POSITIVE [IN_POSITIVE ...] + Path to all echoes corresponding to the positive frequency + saturation pulse. + --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...] + Path to all echoes corresponding to the predominant PD + (proton density) weighting images with no saturation pulse. + --in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...] + Path to all echoes corresponding to the predominant T1 + weighting images with no saturation pulse. This one is optional, + since it is only needed for the calculation of MTsat and ihMTsat. + Acquisition parameters should also be set with this image. + +Acquisition parameters: + Acquisition parameters required for MTsat and ihMTsat calculation. + These are the excitation flip angles (a_PD, a_T1), in DEGREES, and + repetition times (TR_PD, TR_T1) of the PD and T1 images, in SECONDS. + Can be given through json files (--in_jsons) or directly (--in_acq_parameters). + + --in_jsons PD_json T1_json + Path to MToff PD json file and MToff T1 json file, in that order. + The acquisition parameters will be extracted from these files. + Must come from a Philips acquisition, otherwise, use in_acq_parameters. + --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time + Acquisition parameters in that order: flip angle of mtoff_PD, + flip angle of mtoff_T1, repetition time of mtoff_PD, + repetition time of mtoff_T1 + +B1 correction: + --in_B1_map IN_B1_MAP + Path to B1 coregister map to MT contrasts. + --B1_correction_method {empiric,model_based} + Choice of B1 correction method. Choose between empiric and model-based. + Note that the model-based method requires a B1 fitvalues file. + Both method will only correct the saturation measures. [empiric] + --B1_fitvalues B1_FITVALUES [B1_FITVALUES ...] + Path to B1 fitvalues files obtained externally. Should be one .mat + file per input MT-on image, given in this specific order: + positive frequency saturation, negative frequency saturation. + --B1_nominal B1_NOMINAL + Nominal value for the B1 map. For Philips, should be 100. [100] + --B1_smooth_dims B1_SMOOTH_DIMS + Dimension of the squared window used for B1 smoothing, in number of voxels. [5] diff --git a/scripts/.hidden/scil_plot_stats_per_point.py.help b/scripts/.hidden/scil_plot_stats_per_point.py.help new file mode 100644 index 000000000..c156e3da1 --- /dev/null +++ b/scripts/.hidden/scil_plot_stats_per_point.py.help @@ -0,0 +1,33 @@ +usage: scil_plot_stats_per_point.py [-h] [--stats_over_population] + [--nb_pts NB_PTS] [--display_means] + [--fill_color FILL_COLOR | --dict_colors DICT_COLORS] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_json out_dir + +Plot all mean/std per point for a subject or population json file from +tractometry-flow. +WARNING: For population, the displayed STDs is only showing the variation +of the means. It does not account intra-subject STDs. + +Formerly: scil_plot_mean_std_per_point.py + +positional arguments: + in_json JSON file containing the mean/std per point. For example, can be created using scil_bundle_mean_std.py. + out_dir Output directory. + +options: + -h, --help show this help message and exit + --stats_over_population + If set, consider the input stats to be over an entire population and not subject-based. + --nb_pts NB_PTS Force the number of divisions for the bundles. + Avoid unequal plots across datasets, replace missing data with zeros. + --display_means Display the subjects means as semi-transparent line. + Poor results when the number of subject is high. + --fill_color FILL_COLOR + Hexadecimal RGB color filling the region between mean +/- std. The hexadecimal RGB color should be formatted as 0xRRGGBB. + --dict_colors DICT_COLORS + Dictionnary mapping basename to color.Same convention as --color. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_qball_metrics.py.help b/scripts/.hidden/scil_qball_metrics.py.help new file mode 100644 index 000000000..5c60bcc39 --- /dev/null +++ b/scripts/.hidden/scil_qball_metrics.py.help @@ -0,0 +1,71 @@ +usage: scil_qball_metrics.py [-h] [-f] [--sh_order SH_ORDER] [--mask MASK] + [--use_qball] [--not_all] [--gfa GFA] + [--peaks PEAKS] [--peak_indices PEAK_INDICES] + [--sh SH] [--nufo NUFO] [--a_power A_POWER] + [--b0_threshold thr] [--skip_b0_check] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] + in_dwi in_bval in_bvec + +Script to compute the Constant Solid Angle (CSA) or Analytical Q-ball model, +the generalized fractional anisotropy (GFA) and the peaks of the model. + +By default, will output all possible files, using default names. Specific names +can be specified using the file flags specified in the "File flags" section. + +If --not_all is set, only the files specified explicitly by the flags will be +output. + +See [Descoteaux et al MRM 2007, Aganj et al MRM 2009] for details and +[Cote et al MEDIA 2013] for quantitative comparisons. + +Formerly: scil_compute_qball_metrics.py + +positional arguments: + in_dwi Path of the input diffusion volume. + in_bval Path of the bvals file, in FSL format. + in_bvec Path of the bvecs file, in FSL format. + +options: + -h, --help show this help message and exit + -f Force overwriting of the output files. + --sh_order SH_ORDER Spherical harmonics order. Must be a positive even number [4]. + --mask MASK Path to a binary mask. Only data inside the mask will be used for computations and reconstruction [None]. + --use_qball If set, qball will be used as the odf reconstruction model instead of CSA. + --not_all If set, will only save the files specified using the following flags. + --b0_threshold thr Threshold under which b-values are considered to be b0s. + [Default: 20] + * Note. We would expect to find at least one b-value in the + range [0, b0_threshold]. To skip this check, use --skip_b0_check. + --skip_b0_check By default, we supervise that at least one b0 exists in your data + (i.e. b-values below the default --b0_threshold). Use this option to + allow continuing even if the minimum b-value is suspiciously high. + If no b-value is found below the threshold, the script will continue + with your minimal b-value as new --b0_threshold. + Use with care, and only if you understand your data. + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + --processes NBR Number of sub-processes to start. + Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + +File flags: + --gfa GFA Output filename for the generalized fractional anisotropy [gfa.nii.gz]. + --peaks PEAKS Output filename for the extracted peaks [peaks.nii.gz]. + --peak_indices PEAK_INDICES + Output filename for the generated peaks indices on the sphere [peaks_indices.nii.gz]. + --sh SH Output filename for the spherical harmonics coefficients [sh.nii.gz]. + --nufo NUFO Output filename for the NUFO map [nufo.nii.gz]. + --a_power A_POWER Output filename for the anisotropic power map[anisotropic_power.nii.gz]. diff --git a/scripts/.hidden/scil_rgb_convert.py.help b/scripts/.hidden/scil_rgb_convert.py.help new file mode 100644 index 000000000..145cd097c --- /dev/null +++ b/scripts/.hidden/scil_rgb_convert.py.help @@ -0,0 +1,33 @@ +usage: scil_rgb_convert.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] + in_image out_image + +Converts a RGB image encoded as a 4D image to a RGB image encoded as +a 3D image, or vice versa. + +Typically, most software tools used in the SCIL (including MI-Brain) use +the former, while Trackvis uses the latter. + +Input +-Case 1: 4D image where the 4th dimension contains 3 values. +-Case 2: 3D image, in Trackvis format where each voxel contains a + tuple of 3 elements, one for each value. + +Output +-Case 1: 3D image, in Trackvis format where each voxel contains a + tuple of 3 elements, one for each value (uint8). +-Case 2: 4D image where the 4th dimension contains 3 values (uint8). + +Formerly: scil_convert_rgb.py + +positional arguments: + in_image name of input RGB image. + Either 4D or 3D image. + out_image name of output RGB image. + Either 3D or 4D image. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_sh_convert.py.help b/scripts/.hidden/scil_sh_convert.py.help new file mode 100644 index 000000000..9b4e814a6 --- /dev/null +++ b/scripts/.hidden/scil_sh_convert.py.help @@ -0,0 +1,39 @@ +usage: scil_sh_convert.py [-h] [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] + [-f] + in_sh out_sh + {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + +Convert a SH file between the two of the following basis choices: +'descoteaux07', 'descoteaux07_legacy', 'tournier07' or 'tournier07_legacy'. +Using the sh_basis argument, both the input and the output SH bases must be +given, in the order. For more information about the bases, see +https://docs.dipy.org/stable/theory/sh_basis.html. + +Formerly: scil_convert_sh_basis.py + +positional arguments: + in_sh Input SH filename. (nii or nii.gz) + out_sh Output SH filename. (nii or nii.gz) + {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Both the input and output bases are required, in that order. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy', 'tournier07']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + +options: + -h, --help show this help message and exit + --processes NBR Number of sub-processes to start. + Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_sh_fusion.py.help b/scripts/.hidden/scil_sh_fusion.py.help new file mode 100644 index 000000000..16453420f --- /dev/null +++ b/scripts/.hidden/scil_sh_fusion.py.help @@ -0,0 +1,36 @@ +usage: scil_sh_fusion.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] + in_shs [in_shs ...] out_sh + +Merge a list of Spherical Harmonics files. + +This merges the coefficients of multiple Spherical Harmonics files by taking, +for each coefficient, the one with the largest magnitude. + +Can be used to merge fODFs computed from different shells into 1, while +conserving the most relevant information. + +Based on [1] and [2]. + +Formerly: scil_merge_sh.py + +positional arguments: + in_shs List of SH files. + out_sh output SH file. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Reference: +[1] Garyfallidis, E., Zucchelli, M., Houde, J-C., Descoteaux, M. + How to perform best ODF reconstruction from the Human Connectome + Project sampling scheme? + ISMRM 2014. + +[2] Khachaturian, M. H., Wisco, J. J., & Tuch, D. S. (2007). Boosting the + sampling efficiency of q‐ball imaging using multiple wavevector fusion. + Magnetic Resonance in Medicine: An Official Journal of the International + Society for Magnetic Resonance in Medicine, 57(2), 289-296. diff --git a/scripts/.hidden/scil_sh_to_aodf.py.help b/scripts/.hidden/scil_sh_to_aodf.py.help new file mode 100644 index 000000000..8fbb6801a --- /dev/null +++ b/scripts/.hidden/scil_sh_to_aodf.py.help @@ -0,0 +1,96 @@ +usage: scil_sh_to_aodf.py [-h] [--out_sym OUT_SYM] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}] + [--method {unified,cosine}] + [--sigma_spatial SIGMA_SPATIAL] + [--sigma_align SIGMA_ALIGN] + [--sigma_range SIGMA_RANGE] + [--sigma_angle SIGMA_ANGLE] [--disable_spatial] + [--disable_align] [--disable_range] + [--include_center] [--win_hwidth WIN_HWIDTH] + [--sharpness SHARPNESS] [--device {cpu,gpu}] + [--use_opencl] [--patch_size PATCH_SIZE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_sh out_sh + +Script to estimate asymmetric ODFs (aODFs) from a spherical harmonics image. + +Two methods are available: + * Unified filtering [1] combines four asymmetric filtering methods into + a single equation and relies on a combination of four gaussian filters. + * Cosine filtering [2] is a simpler implementation using cosine distance + for assigning weights to neighbours. + +Unified filtering can be accelerated using OpenCL with the option --use_opencl. +Make sure you have pyopencl installed before using this option. By default, the +OpenCL program will run on the cpu. To use a gpu instead, also specify the +option --device gpu. + +positional arguments: + in_sh Path to the input file. + out_sh File name for averaged signal. + +options: + -h, --help show this help message and exit + --out_sym OUT_SYM Name of optional symmetric output. [None] + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} + Sphere used for the SH to SF projection. [repulsion200] + --method {unified,cosine} + Method for estimating asymmetric ODFs [unified]. + One of: + 'unified': Unified filtering [1]. + 'cosine' : Cosine-based filtering [2]. + --device {cpu,gpu} Device to use for execution. [cpu] + --use_opencl Accelerate code using OpenCL (requires pyopencl + and a working OpenCL implementation). + --patch_size PATCH_SIZE + OpenCL patch size. [40] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Shared filter arguments: + --sigma_spatial SIGMA_SPATIAL + Standard deviation for spatial distance. [1.0] + +Unified filter arguments: + --sigma_align SIGMA_ALIGN + Standard deviation for alignment filter. [0.8] + --sigma_range SIGMA_RANGE + Standard deviation for range filter + *relative to SF range of image*. [0.2] + --sigma_angle SIGMA_ANGLE + Standard deviation for angular filter + (disabled by default). + --disable_spatial Disable spatial filtering. + --disable_align Disable alignment filtering. + --disable_range Disable range filtering. + --include_center Include center voxel in neighourhood. + --win_hwidth WIN_HWIDTH + Filtering window half-width. Defaults to 3*sigma_spatial. + +Cosine filter arguments: + --sharpness SHARPNESS + Specify sharpness factor to use for + weighted average. [1.0] + +[1] Poirier and Descoteaux, 2024, "A Unified Filtering Method for Estimating + Asymmetric Orientation Distribution Functions", Neuroimage, vol. 287, + https://doi.org/10.1016/j.neuroimage.2024.120516 + +[2] Poirier et al, 2021, "Investigating the Occurrence of Asymmetric Patterns + in White Matter Fiber Orientation Distribution Functions", ISMRM 2021 + (abstract 0865) diff --git a/scripts/.hidden/scil_sh_to_rish.py.help b/scripts/.hidden/scil_sh_to_rish.py.help new file mode 100644 index 000000000..460b67ed4 --- /dev/null +++ b/scripts/.hidden/scil_sh_to_rish.py.help @@ -0,0 +1,36 @@ +usage: scil_sh_to_rish.py [-h] [--full_basis] [--mask MASK] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_sh out_prefix + +Compute the RISH (Rotationally Invariant Spherical Harmonics) features of an SH +signal [1]. + +Each RISH feature map is the total energy of its associated order. +Mathematically, it is the sum of the squared SH coefficients of the SH order. + +This script supports both symmetrical and asymmetrical SH images as input, of +any SH order. + +Each RISH feature will be saved as a separate file. + +[1] Mirzaalian, Hengameh, et al. "Harmonizing diffusion MRI data across +multiple sites and scanners." MICCAI 2015. +https://scholar.harvard.edu/files/hengameh/files/miccai2015.pdf + +Formerly: scil_compute_rish_from_sh.py + +positional arguments: + in_sh Path of the sh image. They can be formatted in any sh basis, but we + expect it to be a symmetrical one. Else, provide --full_basis. + out_prefix Prefix of the output RISH files to save. Suffixes will be + based on the sh orders. + +options: + -h, --help show this help message and exit + --full_basis Input SH image uses a full SH basis (asymmetrical). + --mask MASK Path to a binary mask. + Only data inside the mask will be used for computation. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_sh_to_sf.py.help b/scripts/.hidden/scil_sh_to_sf.py.help new file mode 100644 index 000000000..d943ec5e7 --- /dev/null +++ b/scripts/.hidden/scil_sh_to_sf.py.help @@ -0,0 +1,67 @@ +usage: scil_sh_to_sf.py [-h] + (--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} | --in_bvec IN_BVEC) + [--dtype {float32,float64}] [--in_bval IN_BVAL] + [--in_b0 IN_B0] [--out_bval OUT_BVAL] + [--out_bvec OUT_BVEC] [--b0_scaling] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [--full_basis] [--b0_threshold thr] [--processes NBR] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_sh out_sf + +Script to sample SF values from a Spherical Harmonics signal. Outputs a Nifti +file with the SF values and an associated .bvec file with the chosen +directions. + +If converting from SH to a DWI-like SF volume, --in_bval and --in_b0 need +to be provided to concatenate the b0 image to the SF, and to generate the new +bvals file. Otherwise, no .bval file will be created. + +Formerly: scil_compute_sf_from_sh.py + +positional arguments: + in_sh Path of the SH volume. + out_sf Name of the output SF file to save (bvals/bvecs will be automatically named when necessary). + +options: + -h, --help show this help message and exit + --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} + Sphere used for the SH to SF projection. + --in_bvec IN_BVEC Directions used for the SH to SF projection. + If given, --in_bval must also be provided. + --dtype {float32,float64} + Datatype to use for SF computation and output array.'[float32]' + --in_bval IN_BVAL b-value file, in FSL format, used to assign a b-value to the + output SF and generate a `.bval` file. + - If used, --out_bval is required. + - The output bval will contain one b-value per point in the SF + output (i.e. one per point on the --sphere or one per --in_bvec.) + - The values of the output bval will all be set to the same b-value: + the average of your in_bval. (Any b0 found in this file, i.e + b-values under --b0_threshold, will be removed beforehand.) + - To add b0s to both the SF volume and the --out_bval file, use --in_b0. + --in_b0 IN_B0 b0 volume to concatenate to the final SF volume. + --out_bval OUT_BVAL Optional output bval file. + --out_bvec OUT_BVEC Optional output bvec file. + --b0_scaling Scale resulting SF by the b0 image (--in_b0 mustbe given). + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + --full_basis If true, use a full basis for the input SH coefficients. + --b0_threshold thr Threshold under which b-values are considered to be b0s. + Default if not set is 20. + This value is used with option --in_bval only: any b0 found in the in_bval will be removed. + --processes NBR Number of sub-processes to start. + Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_stats_group_comparison.py.help b/scripts/.hidden/scil_stats_group_comparison.py.help new file mode 100644 index 000000000..af5020579 --- /dev/null +++ b/scripts/.hidden/scil_stats_group_comparison.py.help @@ -0,0 +1,70 @@ +usage: scil_stats_group_comparison.py [-h] [--out_dir OUT_DIR] + [--out_json OUT_JSON] + [--bundles BUNDLES [BUNDLES ...]] + [--metrics METRICS [METRICS ...]] + [--values VALUES [VALUES ...]] + [--alpha_error ALPHA_ERROR] + [--generate_graph] [--indent INDENT] + [--sort_keys] + [-v [{DEBUG,INFO,WARNING}]] [-f] + IN_JSON IN_PARTICIPANTS GROUP_BY + +Run group comparison statistics on metrics from tractometry +1) Separate the sample given a particular variable (group_by) into groups + +2) Does Shapiro-Wilk test of normality for every sample +https://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test + +3) Does Levene or Bartlett (depending on normality) test of variance +homogeneity Levene: +https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm +Bartlett: +https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm + +4) Test the group difference for every measure with the correct test depending + on the sample (Student, Welch, Mannwhitneyu, ANOVA, Kruskall-Wallis) +Student : +https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test +Welch : +https://en.wikipedia.org/wiki/Welch%27s_t-test +Mann-Whitney U : +https://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test +ANOVA : +http://www.biostathandbook.com/onewayanova.html +Kruskall-Wallis : +https://en.wikipedia.org/wiki/Kruskal%E2%80%93Wallis_one-way_analysis_of_variance + +5) If the group difference test is positive and number of group is greater than + 2, test the group difference two by two. + +6) Generate the result for all metrics and bundles + +Formerly: scil_group_comparison.py + +positional arguments: + IN_JSON Input JSON file from tractometry nextflow pipeline or equivalent. + IN_PARTICIPANTS Input tsv participants file.See doc in https://scilpy.readthedocs.io/en/latest/documentation/construct_participants_tsv_file.html. + GROUP_BY Variable that will be used to compare group together. + +options: + -h, --help show this help message and exit + --out_dir OUT_DIR Name of the output folder path. [stats] + --out_json OUT_JSON The name of the result json output file otherwise it will be printed. + --bundles BUNDLES [BUNDLES ...], -b BUNDLES [BUNDLES ...] + Bundle(s) in which you want to do stats. [all] + --metrics METRICS [METRICS ...], -m METRICS [METRICS ...] + Metric(s) on which you want to do stats. [all] + --values VALUES [VALUES ...], --va VALUES [VALUES ...] + Value(s) on which you want to do stats (mean, std). [all] + --alpha_error ALPHA_ERROR, -a ALPHA_ERROR + Type 1 error for all the test. [0.05] + --generate_graph, --gg + Generate a simple plot of every metric across groups. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_surface_apply_transform.py.help b/scripts/.hidden/scil_surface_apply_transform.py.help new file mode 100644 index 000000000..3ce730c8c --- /dev/null +++ b/scripts/.hidden/scil_surface_apply_transform.py.help @@ -0,0 +1,38 @@ +usage: scil_surface_apply_transform.py [-h] [--ants_warp ANTS_WARP] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_surface ants_affine out_surface + +Script to apply a transform to a surface (FreeSurfer or VTK supported), +using output from ANTs registration tools (i.e. affine.txt, warp.nii.gz). + +Example usage from T1 to b0 using ANTs transforms: +> ConvertTransformFile 3 output0GenericAffine.mat vtk_transfo.txt --hm +> scil_surface_apply_transform.py lh_white_lps.vtk affine.txt lh_white_b0.vtk\ + --ants_warp warp.nii.gz + +Important: The input surface needs to be in *T1 world LPS* coordinates +(aligned over the T1 in MI-Brain). + +The script will use the linear affine first and then the warp image. +The resulting surface will be in *b0 world LPS* coordinates +(aligned over the b0 in MI-Brain). + +Formerly: scil_apply_transform_to_surface.py. + +positional arguments: + in_surface Input surface (.vtk). + ants_affine Affine transform from ANTs (.txt or .mat). + out_surface Output surface (.vtk). + +options: + -h, --help show this help message and exit + --ants_warp ANTS_WARP + Warp image from ANTs (Nifti image). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +References: +[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018. + Surface-enhanced tractography (SET). NeuroImage. diff --git a/scripts/.hidden/scil_surface_convert.py.help b/scripts/.hidden/scil_surface_convert.py.help new file mode 100644 index 000000000..ed5db7ceb --- /dev/null +++ b/scripts/.hidden/scil_surface_convert.py.help @@ -0,0 +1,32 @@ +usage: scil_surface_convert.py [-h] [--xform XFORM] [--to_lps] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_surface out_surface + +Script to convert surface formats + +Supported formats: + ".vtk", ".vtp", ".ply", ".stl", ".xml", ".obj" + and FreeSurfer surfaces + +> scil_surface_convert.py surf.vtk converted_surf.ply + +Formerly: scil_convert_surface.py + +positional arguments: + in_surface Input a surface (FreeSurfer or supported by VTK). + out_surface Output surface (formats supported by VTK). + +options: + -h, --help show this help message and exit + --xform XFORM Path of the copy-paste output from mri_info + Using: mri_info $input >> log.txt, + The file log.txt would be this parameter + --to_lps Flip for Surface/MI-Brain LPS + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +References: +[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018. + Surface-enhanced tractography (SET). NeuroImage. diff --git a/scripts/.hidden/scil_surface_flip.py.help b/scripts/.hidden/scil_surface_flip.py.help new file mode 100644 index 000000000..ffc1aba29 --- /dev/null +++ b/scripts/.hidden/scil_surface_flip.py.help @@ -0,0 +1,25 @@ +usage: scil_surface_flip.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] + in_surface out_surface {x,y,z,n} [{x,y,z,n} ...] + +Script to flip a given surface (FreeSurfer or VTK supported). + +Can flip surface coordinates around a single or multiple axes +Can also be used to reverse the orientation of the surface normals. + +Formerly: scil_flip_surface.py + +positional arguments: + in_surface Input surface (.vtk). + out_surface Output flipped surface (.vtk). + {x,y,z,n} The axes you want to flip. eg: to flip the x and y axes use: x y. to reverse the surface normals use: n + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +References: +[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018. + Surface-enhanced tractography (SET). NeuroImage. diff --git a/scripts/.hidden/scil_surface_smooth.py.help b/scripts/.hidden/scil_surface_smooth.py.help new file mode 100644 index 000000000..e208926d0 --- /dev/null +++ b/scripts/.hidden/scil_surface_smooth.py.help @@ -0,0 +1,36 @@ +usage: scil_surface_smooth.py [-h] [-m VTS_MASK] [-n NB_STEPS] [-s STEP_SIZE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_surface out_surface + +Script to smooth a surface with a Laplacian blur. + +For a standard FreeSurfer white matter mesh a step_size from 0.1 to 10 +is recommended + +Smoothing time = step_size * nb_steps + small amount of smoothing [step_size 1, nb_steps 10] + moderate amount of smoothing [step_size 10, nb_steps 100] + large amount of smoothing [step_size 100, nb_steps 1000] + +Formerly: scil_smooth_surface.py + +positional arguments: + in_surface Input surface (.vtk). + out_surface Output smoothed surface (.vtk). + +options: + -h, --help show this help message and exit + -m VTS_MASK, --vts_mask VTS_MASK + Vertex mask no smoothing where mask equals 0 (.npy). + -n NB_STEPS, --nb_steps NB_STEPS + Number of steps for laplacian smooth [2]. + -s STEP_SIZE, --step_size STEP_SIZE + Laplacian smooth step size [5.0]. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +References: +[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018. + Surface-enhanced tractography (SET). NeuroImage. diff --git a/scripts/.hidden/scil_tracking_local.py.help b/scripts/.hidden/scil_tracking_local.py.help new file mode 100644 index 000000000..42177325b --- /dev/null +++ b/scripts/.hidden/scil_tracking_local.py.help @@ -0,0 +1,167 @@ +usage: scil_tracking_local.py [-h] [--step STEP_SIZE] [--min_length m] + [--max_length M] [--theta THETA] + [--sfthres sf_th] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [--npv NPV | --nt NT] [--sh_to_pmf] + [--algo {det,prob,ptt,eudx}] + [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}] + [--sub_sphere SUB_SPHERE] + [--probe_length PROBE_LENGTH] + [--probe_radius PROBE_RADIUS] + [--probe_quality PROBE_QUALITY] + [--probe_count PROBE_COUNT] + [--support_exponent SUPPORT_EXPONENT] + [--use_gpu] [--sh_interp {trilinear,nearest}] + [--forward_only] [--batch_size BATCH_SIZE] + [--compress [COMPRESS_TH]] [-f] [--save_seeds] + [--seed SEED] [-v [{DEBUG,INFO,WARNING}]] + in_odf in_seed in_mask out_tractogram + +Local streamline HARDI tractography. +The tracking direction is chosen in the aperture cone defined by the +previous tracking direction and the angular constraint. + +WARNING: This script DOES NOT support asymetric FODF input (aFODF). + +Algo 'eudx': select the peak from the spherical function (SF) most closely +aligned to the previous direction, and follow an average of it and the previous +direction [1]. + +Algo 'det': select the orientation corresponding to the maximum of the +spherical function. + +Algo 'prob': select a direction drawn from the empirical distribution function +defined from the SF. + +Algo 'ptt': select the propagation direction using Parallel-Transport +Tractography (PTT) framework, see [2] for more details. + +NOTE: eudx can be used with pre-computed peaks from fodf as well as +evecs_v1.nii.gz from scil_dti_metrics.py (experimental). + +NOTE: If tracking with PTT, the step-size should be smaller than usual, +i.e 0.1-0.2mm or lower. The maximum angle between segments (theta) should +be between 10 and 20 degrees. + +The local tracking algorithm can also run on the GPU using the --use_gpu +option (experimental). By default, GPU tracking behaves the same as +DIPY. Below is a list of known divergences between the CPU and GPU +implementations: + * Backend: The CPU implementation uses DIPY's LocalTracking and the + GPU implementation uses an in-house OpenCL implementation. + * Algo: For the GPU implementation, the only available algorithm is + Algo 'prob'. + * SH interpolation: For GPU tracking, SH interpolation can be set to either + nearest neighbour or trilinear (default). With DIPY, the only available + method is trilinear. + * Forward tracking: For GPU tracking, the `--forward_only` flag can be used + to disable backward tracking. This option isn't available for CPU + tracking. + +All the input nifti files must be in isotropic resolution. + +References +---------- +[1]: Garyfallidis, E. (2012). Towards an accurate brain tractography +[PhD thesis]. University of Cambridge. United Kingdom. + +[2]: Aydogan, D. B., & Shi, Y. (2020). Parallel transport tractography. +IEEE transactions on medical imaging, 40(2), 635-647. + +Formerly: scil_compute_local_tracking.py + +positional arguments: + in_odf File containing the orientation diffusion function + as spherical harmonics file (.nii.gz). Ex: ODF or fODF. + in_seed Seeding mask (.nii.gz). + in_mask Tracking mask (.nii.gz). + Tracking will stop outside this mask. The last point of each + streamline (triggering the stopping criteria) IS added to the streamline. + out_tractogram Tractogram output file (must be .trk or .tck). + +options: + -h, --help show this help message and exit + +Tracking options: + --step STEP_SIZE Step size in mm. [0.5] + --min_length m Minimum length of a streamline in mm. [10.0] + --max_length M Maximum length of a streamline in mm. [300.0] + --theta THETA Maximum angle between 2 steps. If the angle is too big, streamline is + stopped and the following point is NOT included. + ["eudx"=60, "det"=45, "prob"=20, "ptt"=20] + --sfthres sf_th Spherical function relative threshold. [0.1] + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + --sh_to_pmf If set, map sherical harmonics to spherical function (pmf) before + tracking (faster, requires more memory) + --algo {det,prob,ptt,eudx} + Algorithm to use. [prob] + --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} + Dipy sphere; set of possible directions. + Default: [repulsion724] + --sub_sphere SUB_SPHERE + Subdivides each face of the sphere into 4^s new faces. [0] + +Seeding options: + When no option is provided, uses --npv 1. + + --npv NPV Number of seeds per voxel. + --nt NT Total number of seeds to use. + +PTT options: + --probe_length PROBE_LENGTH + The length of the probes. Smaller value + yields more dispersed fibers. [1.0] + --probe_radius PROBE_RADIUS + The radius of the probe. A large probe_radius + helps mitigate noise in the pmf but it might + make it harder to sample thin and intricate + connections, also the boundary of fiber + bundles might be eroded. [0] + --probe_quality PROBE_QUALITY + The quality of the probe. This parameter sets + the number of segments to split the cylinder + along the length of the probe (minimum=2) [3] + --probe_count PROBE_COUNT + The number of probes. This parameter sets the + number of parallel lines used to model the + cylinder (minimum=1). [1] + --support_exponent SUPPORT_EXPONENT + Data support exponent, used for rejection + sampling. [3] + +GPU options: + --use_gpu Enable GPU tracking (experimental). + --sh_interp {trilinear,nearest} + SH image interpolation method. [trilinear] + --forward_only Perform forward tracking only. + --batch_size BATCH_SIZE + Approximate size of GPU batches (number + of streamlines to track in parallel). [10000] + +Output options: + --compress [COMPRESS_TH] + If set, compress the resulting streamline. Value is the maximum + compression distance in mm. + A rule of thumb is to set it to 0.1mm for deterministic + streamlines and to 0.2mm for probabilitic streamlines.[0.1] + -f Force overwriting of the output files. + --save_seeds If set, save the seeds used for the tracking + in the data_per_streamline property. + Hint: you can then use scil_tractogram_seed_density_map. + --seed SEED Random number generator seed. + +Logging options: + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_tracking_local_dev.py.help b/scripts/.hidden/scil_tracking_local_dev.py.help new file mode 100644 index 000000000..53d69d330 --- /dev/null +++ b/scripts/.hidden/scil_tracking_local_dev.py.help @@ -0,0 +1,158 @@ +usage: scil_tracking_local_dev.py [-h] [--step STEP_SIZE] [--min_length m] + [--max_length M] [--theta THETA] + [--sfthres sf_th] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [--npv NPV | --nt NT] [--algo {det,prob}] + [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}] + [--sub_sphere SUB_SPHERE] + [--sfthres_init sf_th] [--rk_order K] + [--max_invalid_nb_points MAX] + [--forward_only] + [--sh_interp {nearest,trilinear}] + [--mask_interp {nearest,trilinear}] + [--keep_last_out_point] + [--n_repeats_per_seed N_REPEATS_PER_SEED] + [--rng_seed RNG_SEED] [--skip SKIP] + [--processes NBR] [--compress [COMPRESS_TH]] + [-f] [--save_seeds] + [-v [{DEBUG,INFO,WARNING}]] + in_odf in_seed in_mask out_tractogram + +Local streamline HARDI tractography using scilpy-only methods -- no dipy (i.e +no cython). The goal of this is to have a python-only version that can be +modified more easily by our team when testing new algorithms and parameters, +and that can be used as parent classes in sub-projects of our lab such as in +dwi_ml. + +WARNING. MUCH SLOWER THAN scil_tracking_local.py. We recommand using multi- +processing with option --nb_processes. + +Similar to scil_tracking_local: + The tracking direction is chosen in the aperture cone defined by the + previous tracking direction and the angular constraint. + - Algo 'det': the maxima of the spherical function (SF) the most closely + aligned to the previous direction. + - Algo 'prob': a direction drawn from the empirical distribution function + defined from the SF. + +Contrary to scil_tracking_local: + - Algo 'eudx' is not yet available! + - Input nifti files do not necessarily need to be in isotropic resolution. + - The script works with asymmetric input ODF. + - The interpolation for the tracking mask and spherical function can be + one of 'nearest' or 'trilinear'. + - Runge-Kutta integration is supported for the step function. + +A few notes on Runge-Kutta integration. + 1. Runge-Kutta integration is used to approximate the next tracking + direction by estimating directions from future tracking steps. This + works well for deterministic tracking. However, in the context of + probabilistic tracking, the next tracking directions cannot be estimated + in advance, because they are picked randomly from a distribution. It is + therefore recommanded to keep the rk_order to 1 for probabilistic + tracking. + 2. As a rule of thumb, doubling the rk_order will double the computation + time in the worst case. + +References: [1] Girard, G., Whittingstall K., Deriche, R., and + Descoteaux, M. (2014). Towards quantitative connectivity analysis: + reducing tractography biases. Neuroimage, 98, 266-278. + +Formerly: scil_compute_local_tracking_dev.py + +positional arguments: + in_odf File containing the orientation diffusion function + as spherical harmonics file (.nii.gz). Ex: ODF or fODF. + in_seed Seeding mask (.nii.gz). + in_mask Tracking mask (.nii.gz). + Tracking will stop outside this mask. The last point of each + streamline (triggering the stopping criteria) IS added to the streamline. + out_tractogram Tractogram output file (must be .trk or .tck). + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + +Tracking options: + --step STEP_SIZE Step size in mm. [0.5] + --min_length m Minimum length of a streamline in mm. [10.0] + --max_length M Maximum length of a streamline in mm. [300.0] + --theta THETA Maximum angle between 2 steps. If the angle is too big, streamline is + stopped and the following point is NOT included. + ["eudx"=60, "det"=45, "prob"=20, "ptt"=20] + --sfthres sf_th Spherical function relative threshold. [0.1] + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + --algo {det,prob} Algorithm to use. [prob] + --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} + Dipy sphere; set of possible directions. + Default: [repulsion724] + --sub_sphere SUB_SPHERE + Subdivides each face of the sphere into 4^s new faces. [0] + --sfthres_init sf_th Spherical function relative threshold value for the + initial direction. [0.5] + --rk_order K The order of the Runge-Kutta integration used for the step function. + For more information, refer to the note in the script description. [1] + --max_invalid_nb_points MAX + Maximum number of steps without valid direction, + ex: if threshold on ODF or max angles are reached. + Default: 0, i.e. do not add points following an invalid direction. + --forward_only If set, tracks in one direction only (forward) given the + initial seed. The direction is randomly drawn from the ODF. + --sh_interp {nearest,trilinear} + Spherical harmonic interpolation: nearest-neighbor + or trilinear. [trilinear] + --mask_interp {nearest,trilinear} + Mask interpolation: nearest-neighbor or trilinear. [nearest] + --keep_last_out_point + If set, keep the last point (once out of the tracking mask) of + the streamline. Default: discard them. This is the default in + Dipy too. Note that points obtained after an invalid direction + (ex when angle is too sharp or sh_threshold not reached) are + never added. + --n_repeats_per_seed N_REPEATS_PER_SEED + By default, each seed position is used only once. This option + allows for tracking from the exact same seed n_repeats_per_seed + times. [1] + +Seeding options: + When no option is provided, uses --npv 1. + + --npv NPV Number of seeds per voxel. + --nt NT Total number of seeds to use. + +Random seeding options: + --rng_seed RNG_SEED Initial value for the random number generator. [0] + --skip SKIP Skip the first N random number. + Useful if you want to create new streamlines to add to + a previously created tractogram with a fixed --rng_seed. + Ex: If tractogram_1 was created with -nt 1,000,000, + you can create tractogram_2 with + --skip 1,000,000. + +Memory options: + --processes NBR Number of sub-processes to start. + Default: [1] + +Output options: + --compress [COMPRESS_TH] + If set, compress the resulting streamline. Value is the maximum + compression distance in mm. + A rule of thumb is to set it to 0.1mm for deterministic + streamlines and to 0.2mm for probabilitic streamlines.[0.1] + -f Force overwriting of the output files. + --save_seeds If set, save the seeds used for the tracking + in the data_per_streamline property. + Hint: you can then use scil_tractogram_seed_density_map. diff --git a/scripts/.hidden/scil_tracking_pft.py.help b/scripts/.hidden/scil_tracking_pft.py.help new file mode 100644 index 000000000..54db07bd3 --- /dev/null +++ b/scripts/.hidden/scil_tracking_pft.py.help @@ -0,0 +1,107 @@ +usage: scil_tracking_pft.py [-h] [--algo {det,prob}] [--step STEP_SIZE] + [--min_length MIN_LENGTH] + [--max_length MAX_LENGTH] [--theta THETA] [--act] + [--sfthres SF_THRESHOLD] + [--sfthres_init SF_THRESHOLD_INIT] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [--npv NPV | --nt NT] [--particles PARTICLES] + [--back BACK_TRACKING] + [--forward FORWARD_TRACKING] [--all] [--seed SEED] + [-f] [--save_seeds] [--compress [COMPRESS_TH]] + [-v [{DEBUG,INFO,WARNING}]] + in_sh in_seed in_map_include map_exclude_file + out_tractogram + +Local streamline HARDI tractography including Particle Filtering tracking. + +WARNING: This script DOES NOT support asymetric FODF input (aFODF). + +The tracking is done inside partial volume estimation maps and uses the +particle filtering tractography (PFT) algorithm. See +scil_tracking_pft_maps.py to generate PFT required maps. + +Streamlines longer than min_length and shorter than max_length are kept. +The tracking direction is chosen in the aperture cone defined by the +previous tracking direction and the angular constraint. +Default parameters as suggested in [1]. + +Algo 'det': the maxima of the spherical function (SF) the most closely aligned +to the previous direction. +Algo 'prob': a direction drawn from the empirical distribution function defined +from the SF. + +For streamline compression, a rule of thumb is to set it to 0.1mm for the +deterministic algorithm and 0.2mm for probabilitic algorithm. + +All the input nifti files must be in isotropic resolution. + +Formerly: scil_compute_pft.py + +positional arguments: + in_sh Spherical harmonic file (.nii.gz). + in_seed Seeding mask (.nii.gz). + in_map_include The probability map (.nii.gz) of ending the + streamline and including it in the output (CMC, PFT [1]) + map_exclude_file The probability map (.nii.gz) of ending the + streamline and excluding it in the output (CMC, PFT [1]). + out_tractogram Tractogram output file (must be .trk or .tck). + +Generic options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + +Tracking options: + --algo {det,prob} Algorithm to use (must be "det" or "prob"). [prob] + --step STEP_SIZE Step size in mm. [0.2] + --min_length MIN_LENGTH + Minimum length of a streamline in mm. [10.0] + --max_length MAX_LENGTH + Maximum length of a streamline in mm. [300.0] + --theta THETA Maximum angle between 2 steps. ["det"=45, "prob"=20] + --act If set, uses anatomically-constrained tractography (ACT) + instead of continuous map criterion (CMC). + --sfthres SF_THRESHOLD + Spherical function relative threshold. [0.1] + --sfthres_init SF_THRESHOLD_INIT + Spherical function relative threshold value for the + initial direction. [0.5] + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + +Seeding options: + When no option is provided, uses --npv 1. + + --npv NPV Number of seeds per voxel. + --nt NT Total number of seeds to use. + +PFT options: + --particles PARTICLES + Number of particles to use for PFT. [15] + --back BACK_TRACKING Length of PFT back tracking (mm). [2.0] + --forward FORWARD_TRACKING + Length of PFT forward tracking (mm). [1.0] + +Output options: + --all If set, keeps "excluded" streamlines. + NOT RECOMMENDED, except for debugging. + --seed SEED Random number generator seed. + -f Force overwriting of the output files. + --save_seeds If set, save the seeds used for the tracking + in the data_per_streamline property. + --compress [COMPRESS_TH] + If set, compress the resulting streamline. Value is the maximum + compression distance in mm.[0.1] + +References: [1] Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M. (2014). Towards quantitative connectivity analysis: reducing tractography biases. Neuroimage, 98, 266-278. diff --git a/scripts/.hidden/scil_tracking_pft_maps.py.help b/scripts/.hidden/scil_tracking_pft_maps.py.help new file mode 100644 index 000000000..a29968244 --- /dev/null +++ b/scripts/.hidden/scil_tracking_pft_maps.py.help @@ -0,0 +1,31 @@ +usage: scil_tracking_pft_maps.py [-h] [--include filename] + [--exclude filename] [--interface filename] + [-t THRESHOLD] [-f] + [-v [{DEBUG,INFO,WARNING}]] + in_wm in_gm in_csf + +Compute include and exclude maps, and the seeding interface mask from partial +volume estimation (PVE) maps. Maps should have values in [0,1], gm+wm+csf=1 in +all voxels of the brain, gm+wm+csf=0 elsewhere. + +References: Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M. +(2014). Towards quantitative connectivity analysis: reducing tractography +biases. Neuroimage. + +Formerly: scil_compute_maps_for_particle_filter_tracking.py + +positional arguments: + in_wm White matter PVE map (nifti). From normal FAST output, has a PVE_2 name suffix. + in_gm Grey matter PVE map (nifti). From normal FAST output, has a PVE_1 name suffix. + in_csf Cerebrospinal fluid PVE map (nifti). From normal FAST output, has a PVE_0 name suffix. + +options: + -h, --help show this help message and exit + --include filename Output include map (nifti). [map_include.nii.gz] + --exclude filename Output exclude map (nifti). [map_exclude.nii.gz] + --interface filename Output interface seeding mask (nifti). [interface.nii.gz] + -t THRESHOLD Minimum gm and wm PVE values in a voxel to be into the interface. [0.1] + -f Force overwriting of the output files. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_tracking_pft_maps_edit.py.help b/scripts/.hidden/scil_tracking_pft_maps_edit.py.help new file mode 100644 index 000000000..49889877d --- /dev/null +++ b/scripts/.hidden/scil_tracking_pft_maps_edit.py.help @@ -0,0 +1,21 @@ +usage: scil_tracking_pft_maps_edit.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] + map_include map_exclude additional_mask + map_include_corr map_exclude_corr + +Modify PFT maps to allow PFT tracking in given mask (e.g edema). + +Formerly: scil_add_tracking_mask_to_pft_maps.py. + +positional arguments: + map_include PFT map include. + map_exclude PFT map exclude. + additional_mask Allow PFT tracking in this mask. + map_include_corr Corrected PFT map include output file name. + map_exclude_corr Corrected PFT map exclude output file name. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_apply_transform.py.help b/scripts/.hidden/scil_tractogram_apply_transform.py.help new file mode 100644 index 000000000..d74fb6374 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_apply_transform.py.help @@ -0,0 +1,78 @@ +usage: scil_tractogram_apply_transform.py [-h] [--inverse] + [--in_deformation file] + [--reverse_operation] + [--cut_invalid | --remove_invalid | --keep_invalid] + [--no_empty] [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_moving_tractogram in_target_file + in_transfo out_tractogram + +Transform a tractogram using an affine/rigid transformation and nonlinear +deformation (optional). + +For more information on how to use the registration script, follow this link: +https://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html + +Applying transformation to a tractogram can lead to invalid streamlines (out of +the bounding box), and thus three strategies are available: +1) Do nothing, may crash at saving if invalid streamlines are present. + [This is the default] +2) --keep_invalid, save invalid streamlines. Leave it to the user to run + scil_tractogram_remove_invalid.py if needed. +3) --remove_invalid, automatically remove invalid streamlines before saving. + Should not remove more than a few streamlines. Typically, the streamlines + that are rejected are the ones reaching the limits of the brain, ex, near + the pons. +4) --cut_invalid, automatically cut invalid streamlines before saving, i.e. the + streamlines are kept but the points out of the bounding box are cut. + +Example: +To apply a transformation from ANTs to a tractogram, if the ANTs command was +MOVING->REFERENCE... +1) To apply the original transformation: +scil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE} + 0GenericAffine.mat ${OUTPUT_NAME} + --inverse + --in_deformation 1InverseWarp.nii.gz + +2) To apply the inverse transformation, i.e. REFERENCE->MOVING: +scil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE} + 0GenericAffine.mat ${OUTPUT_NAME} + --in_deformation 1Warp.nii.gz + --reverse_operation + +Formerly: scil_apply_transform_to_tractogram.py + +positional arguments: + in_moving_tractogram Path of the tractogram to be transformed. + Bounding box validity will not be checked (could + contain invalid streamlines). + in_target_file Path of the reference target file (trk or nii). + in_transfo Path of the file containing the 4x4 + transformation, matrix (.txt, .npy or .mat). + out_tractogram Output tractogram filename (transformed data). + +options: + -h, --help show this help message and exit + --no_empty Do not write file if there is no streamline. + You may save an empty file if you use remove_invalid. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Transformation options: + --inverse Apply the inverse linear transformation. + --in_deformation file + Path to the file containing a deformation field. + --reverse_operation Apply the transformation in reverse (see doc), warp + first, then linear. + +Management of invalid streamlines: + --cut_invalid Cut invalid streamlines rather than removing them. + Keep the longest segment only. + --remove_invalid Remove the streamlines landing out of the bounding box. + --keep_invalid Keep the streamlines landing out of the bounding box. diff --git a/scripts/.hidden/scil_tractogram_apply_transform_to_hdf5.py.help b/scripts/.hidden/scil_tractogram_apply_transform_to_hdf5.py.help new file mode 100644 index 000000000..b60da727a --- /dev/null +++ b/scripts/.hidden/scil_tractogram_apply_transform_to_hdf5.py.help @@ -0,0 +1,52 @@ +usage: scil_tractogram_apply_transform_to_hdf5.py [-h] [--inverse] + [--in_deformation file] + [--reverse_operation] + [--cut_invalid | --remove_invalid | --keep_invalid] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] + [-f] + in_hdf5 in_target_file + in_transfo out_hdf5 + +Transform tractogram(s) contained in the hdf5 output from a connectivity +script, using an affine/rigid transformation and nonlinear deformation +(optional). + +See scil_tractogram_apply_transform.py to apply directly to a tractogram. + +For more information on how to use the registration script, follow this link: +https://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html + +Or use >> scil_tractogram_apply_transform.py --help + +Formerly: scil_apply_transform_to_hdf5.py + +positional arguments: + in_hdf5 Path of the hdf5 containing the moving tractogram, to be transformed. (.h5 extension). + in_target_file Path of the reference target file (.trk or .nii). + in_transfo Path of the file containing the 4x4 + transformation, matrix (.txt, .npy or .mat). + out_hdf5 Output tractogram filename (transformed data). + +options: + -h, --help show this help message and exit + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Transformation options: + --inverse Apply the inverse linear transformation. + --in_deformation file + Path to the file containing a deformation field. + --reverse_operation Apply the transformation in reverse (see doc), warp + first, then linear. + +Management of invalid streamlines: + --cut_invalid Cut invalid streamlines rather than removing them. + Keep the longest segment only. + --remove_invalid Remove the streamlines landing out of the bounding box. + --keep_invalid Keep the streamlines landing out of the bounding box. diff --git a/scripts/.hidden/scil_tractogram_assign_custom_color.py.help b/scripts/.hidden/scil_tractogram_assign_custom_color.py.help new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/.hidden/scil_tractogram_assign_uniform_color.py.help b/scripts/.hidden/scil_tractogram_assign_uniform_color.py.help new file mode 100644 index 000000000..ece21001f --- /dev/null +++ b/scripts/.hidden/scil_tractogram_assign_uniform_color.py.help @@ -0,0 +1,50 @@ +usage: scil_tractogram_assign_uniform_color.py [-h] + (--fill_color str | --dict_colors file.json) + (--out_suffix [suffix] | --out_tractogram file.trk) + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] + [-f] + in_tractograms + [in_tractograms ...] + +Assign an hexadecimal RGB color to one or more Trackvis (.trk) tractogram. +(If called with .tck, the output will always be .trk, because data_per_point +has no equivalent in tck file.) + +Saves the RGB values in the data_per_point 'color' with values +(color_x, color_y, color_z). + +The hexadecimal RGB color should be formatted as 0xRRGGBB or "#RRGGBB". + +See also: scil_tractogram_assign_custom_color.py + +Formerly: scil_assign_uniform_color_to_tractograms.py + +positional arguments: + in_tractograms Input tractograms (.trk or .tck). + +options: + -h, --help show this help message and exit + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Coloring Methods: + --fill_color str Can be hexadecimal (ie. either "#RRGGBB" or 0xRRGGBB). + --dict_colors file.json + Json file: dictionnary mapping each tractogram's basename to a color. + Do not put your file's extension in your dict. + Same convention as --fill_color. + +Output options: + --out_suffix [suffix] + Specify suffix to append to input basename. + Mandatory choice if you run this script on multiple tractograms. + Mandatory choice with --dict_colors. + [None] + --out_tractogram file.trk + Output filename of colored tractogram (.trk). diff --git a/scripts/.hidden/scil_tractogram_commit.py.help b/scripts/.hidden/scil_tractogram_commit.py.help new file mode 100644 index 000000000..dae781282 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_commit.py.help @@ -0,0 +1,160 @@ +usage: scil_tractogram_commit.py [-h] [--nbr_dir NBR_DIR] + [--nbr_iter NBR_ITER] [--in_peaks IN_PEAKS] + [--in_tracking_mask IN_TRACKING_MASK] + [--commit2] + [--lambda_commit_2 LAMBDA_COMMIT_2] + [--ball_stick] [--para_diff PARA_DIFF] + [--perp_diff PERP_DIFF [PERP_DIFF ...]] + [--iso_diff ISO_DIFF [ISO_DIFF ...]] + [--keep_whole_tractogram] + [--save_kernels DIRECTORY | --load_kernels DIRECTORY] + [--compute_only] [--tolerance tol] + [--skip_b0_check] [--processes NBR] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram in_dwi in_bval in_bvec out_dir + +Convex Optimization Modeling for Microstructure Informed Tractography (COMMIT) +estimates, globally, how a given tractogram explains the DWI in terms of signal +fit, assuming a certain forward microstructure model. It assigns a weight to +each streamline, which represents how well it explains the DWI signal globally. +The default forward microstructure model is stick-zeppelin-ball, which requires +multi-shell data and a peak file (principal fiber directions in each voxel, +typically from a field of fODFs). + +It is possible to use the ball-and-stick model for single-shell and multi-shell +data. In this case, the peak file is not mandatory. Multi-shell should follow a +"NODDI protocol" (low and high b-values), multiple shells with similar b-values +should not be used with COMMIT. + +The output from COMMIT is: +- fit_NRMSE.nii.gz + fiting error (Normalized Root Mean Square Error) +- fit_RMSE.nii.gz + fiting error (Root Mean Square Error) +- results.pickle + Dictionary containing the experiment parameters and final weights +- compartment_EC.nii.gz + (est. Extra-Cellular signal fraction) +- compartment_IC.nii.gz + (est. Intra-Cellular signal fraction) +- compartment_ISO.nii.gz + (est. isotropic signal fraction (freewater comportment)): + Each of COMMIT compartments +- streamline_weights.txt + Text file containing the commit weights for each streamline of the + input tractogram. +- streamlines_length.txt + Text file containing the length (mm) of each streamline. +- streamline_weights_by_length.txt + Text file containing the commit weights for each streamline of the + input tractogram, ordered by their length. +- tot_streamline_weights + Text file containing the total commit weights of each streamline. + Equal to commit_weights * streamlines_length (W_i * L_i) +- essential.trk / non_essential.trk + Tractograms containing the streamlines below or equal (essential) and + above (non_essential) a threshold_weights of 0. +- decompose_commit.h5 + In the case where the input is a hdf5 file only, we will save an output + hdf5 with the following information separated into each bundle's dps: + - streamlines_weights + - streamline_weights_by_length + For each bundle, only the essential streamlines are kept. + +This script can divide the input tractogram in two using a threshold to apply +on the streamlines' weight. The threshold used is 0.0, keeping only streamlines +that have non-zero weight and that contribute to explain the DWI signal. +Streamlines with 0 weight are essentially not necessary according to COMMIT. + +COMMIT2 is available only for HDF5 data from +scil_tractogram_segment_bundles_for_connectivity.py and +with the --ball_stick option. Use the --commit2 option to activite it, slightly +longer computation time. This wrapper offers a simplify way to call COMMIT, +but does not allow to use (or fine-tune) every parameter. If you want to use +COMMIT with full access to all parameters, +visit: https://github.com/daducci/COMMIT + +When tunning parameters, such as --iso_diff, --para_diff, --perp_diff or +--lambda_commit_2 you should evaluate the quality of results by: + - Looking at the 'density' (GTM) of the connnectome (essential tractogram) + - Confirm the quality of WM bundles reconstruction (essential tractogram) + - Inspect the (N)RMSE map and look for peaks or anomalies + - Compare the density map before and after (essential tractogram) + +Formerly: scil_run_commit.py + +positional arguments: + in_tractogram Input tractogram (.trk or .tck or .h5). + in_dwi Diffusion-weighted image used by COMMIT (.nii.gz). + in_bval b-values in the FSL format (.bval). + in_bvec b-vectors in the FSL format (.bvec). + out_dir Output directory for the COMMIT maps. + +options: + -h, --help show this help message and exit + --nbr_dir NBR_DIR Number of directions, on the half of the sphere, + representing the possible orientations of the response functions [500]. + --nbr_iter NBR_ITER Maximum number of iterations [1000]. + --in_peaks IN_PEAKS Peaks file representing principal direction(s) locally, + typically coming from fODFs. This file is mandatory for the default + stick-zeppelin-ball model. + --in_tracking_mask IN_TRACKING_MASK + Binary mask where tratography was allowed. + If not set, uses a binary mask computed from the streamlines. + --tolerance tol The tolerated gap between the b-values to extract and the current b-value. + [Default: 20] + * Note. We would expect to find at least one b-value in the + range [0, tolerance]. To skip this check, use --skip_b0_check. + --skip_b0_check By default, we supervise that at least one b0 exists in your data + (i.e. b-values below the default --b0_threshold). Use this option to + allow continuing even if the minimum b-value is suspiciously high. + If no b-value is found below the threshold, the script will continue + with your minimal b-value as new --b0_threshold. + Use with care, and only if you understand your data. + --processes NBR Number of sub-processes to start. + Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +COMMIT2 options: + --commit2 Run commit2, requires .h5 as input and will force + ball&stick model. + --lambda_commit_2 LAMBDA_COMMIT_2 + Specify the clustering prior strength [0.001]. + +Model options: + --ball_stick Use the ball&Stick model, disable the zeppelin compartment. + Only model suitable for single-shell data. + --para_diff PARA_DIFF + Parallel diffusivity in mm^2/s. + Default for both ball_stick and stick_zeppelin_ball: 1.7E-3. + --perp_diff PERP_DIFF [PERP_DIFF ...] + Perpendicular diffusivity in mm^2/s. + Default for ball_stick: None + Default for stick_zeppelin_ball: [0.51E-3] + --iso_diff ISO_DIFF [ISO_DIFF ...] + Istropic diffusivity in mm^2/s. + Default for ball_stick: [2.0E-3] + Default for stick_zeppelin_ball: [1.7E-3, 3.0E-3] + +Tractogram options: + --keep_whole_tractogram + Save a tractogram copy with streamlines weights in the data_per_streamline + [False]. + --compute_only Compute kernels only, --save_kernels must be used. + +Kernels options: + --save_kernels DIRECTORY + Output directory for the COMMIT kernels. + --load_kernels DIRECTORY + Input directory where the COMMIT kernels are located. + +References: +[1] Daducci, Alessandro, et al. "COMMIT: convex optimization modeling for + microstructure informed tractography." IEEE transactions on medical + imaging 34.1 (2014): 246-257. +[2] Schiavi, Simona, et al. "A new method for accurate in vivo mapping of + human brain connections using microstructural and anatomical information." + Science advances 6.31 (2020): eaba8245. diff --git a/scripts/.hidden/scil_tractogram_compress.py.help b/scripts/.hidden/scil_tractogram_compress.py.help new file mode 100644 index 000000000..03f751591 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_compress.py.help @@ -0,0 +1,22 @@ +usage: scil_tractogram_compress.py [-h] [-e ERROR_RATE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram out_tractogram + +Compress tractogram by removing collinear (or almost) points. + +The compression threshold represents the maximum distance (in mm) to the +original position of the point. + +Formerly: scil_compress_streamlines.py + +positional arguments: + in_tractogram Path of the input tractogram file (trk or tck). + out_tractogram Path of the output tractogram file (trk or tck). + +options: + -h, --help show this help message and exit + -e ERROR_RATE Maximum compression distance in mm [0.1]. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_compute_TODI.py.help b/scripts/.hidden/scil_tractogram_compute_TODI.py.help new file mode 100644 index 000000000..9f203a5f2 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_compute_TODI.py.help @@ -0,0 +1,74 @@ +usage: scil_tractogram_compute_TODI.py [-h] [--sphere SPHERE] [--mask MASK] + [--sh_order SH_ORDER] + [--normalize_per_voxel] + [--smooth_todi | --asymmetric] + [--n_steps N_STEPS] + [--out_mask OUT_MASK] + [--out_tdi OUT_TDI] + [--out_todi_sf OUT_TODI_SF] + [--out_todi_sh OUT_TODI_SH] + [--reference REFERENCE] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram + +Compute a Track Orientation Density Image (TODI). + +Each segment of the streamlines is weighted by its length (to support +compressed streamlines). + +This script can afterwards output a Track Density Image (TDI) or a TODI with SF +or SH representation, based on streamlines' segments. + +Formerly: scil_compute_todi.py + +positional arguments: + in_tractogram Input streamlines file. + +options: + -h, --help show this help message and exit + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Computing options: + --sphere SPHERE Sphere used for the angular discretization. [repulsion724] + --mask MASK If set, use the given mask. + --sh_order SH_ORDER Order of the original SH. [8] + --normalize_per_voxel + If set, normalize each SF/SH at each voxel. + --smooth_todi If set, smooth TODI (angular and spatial). + --asymmetric If set, compute asymmetric TODI. + Cannot be used with --smooth_todi. + --n_steps N_STEPS Number of steps for streamline segments subdivision prior to binning [1]. + +Output files. Saves only when filename is set: + --out_mask OUT_MASK Mask showing where TDI > 0. + --out_tdi OUT_TDI Output Track Density Image (TDI). + --out_todi_sf OUT_TODI_SF + Output TODI, with SF (each directions + on the sphere, requires a lot of memory) + --out_todi_sh OUT_TODI_SH + Output TODI, with SH coefficients. + +References: + [1] Dhollander T, Emsell L, Van Hecke W, Maes F, Sunaert S, Suetens P. + Track orientation density imaging (TODI) and + track orientation distribution (TOD) based tractography. + NeuroImage. 2014 Jul 1;94:312-36. diff --git a/scripts/.hidden/scil_tractogram_compute_density_map.py.help b/scripts/.hidden/scil_tractogram_compute_density_map.py.help new file mode 100644 index 000000000..f78ac000e --- /dev/null +++ b/scripts/.hidden/scil_tractogram_compute_density_map.py.help @@ -0,0 +1,28 @@ +usage: scil_tractogram_compute_density_map.py [-h] [--binary [FIXED_VALUE]] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bundle out_img + +Compute a density map from a streamlines file. Can be binary. + +This script correctly handles compressed streamlines. + +Formerly: scil_compute_streamlines_density_map.py + +positional arguments: + in_bundle Tractogram filename. + out_img path of the output image file. + +options: + -h, --help show this help message and exit + --binary [FIXED_VALUE] + If set, will store the same value for all intersected voxels, + creating a binary map.When set without a value, 1 is used (and dtype + uint8). If a value is given, will be used as the stored value. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_convert.py.help b/scripts/.hidden/scil_tractogram_convert.py.help new file mode 100644 index 000000000..bbb6b6074 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_convert.py.help @@ -0,0 +1,28 @@ +usage: scil_tractogram_convert.py [-h] [--no_bbox_check] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram output_name + +Conversion of '.tck', '.trk', '.fib', '.vtk' and 'dpy' files using updated file +format standard. TRK file always needs a reference file, a NIFTI, for +conversion. The FIB file format is in fact a VTK, MITK Diffusion supports it. + +Formerly: scil_convert_tractogram.py + +positional arguments: + in_tractogram Tractogram filename. Format must be one of + trk, tck, vtk, fib, dpy + output_name Output filename. Format must be one of + trk, tck, vtk, fib, dpy + +options: + -h, --help show this help message and exit + --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of + tractograms (ignores the presence of invalid streamlines). + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_convert_hdf5_to_trk.py.help b/scripts/.hidden/scil_tractogram_convert_hdf5_to_trk.py.help new file mode 100644 index 000000000..8046c19e4 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_convert_hdf5_to_trk.py.help @@ -0,0 +1,50 @@ +usage: scil_tractogram_convert_hdf5_to_trk.py [-h] [--include_dps] + [--edge_keys LABEL1_LABEL2 [LABEL1_LABEL2 ...] + | --node_keys NODE [NODE ...]] + [--save_empty labels_list] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_hdf5 out_dir + +Save connections of a hdf5 created with +>> scil_tractogram_segment_bundles_for_connectivity.py. + +Useful for quality control and visual inspections. + +It can either save all connections (default), individual connections specified +with --edge_keys or connections from specific nodes specified with --node_keys. + +With the option --save_empty, a label_lists, as a txt file, must be provided. +This option saves existing connections and empty connections. + +The output is a directory containing the thousands of connections: +out_dir/ + |-- LABEL1_LABEL1.trk + |-- LABEL1_LABEL2.trk + |-- [...] + |-- LABEL90_LABEL90.trk + +Formerly: scil_save_connections_from_hdf5.py + +positional arguments: + in_hdf5 HDF5 filename (.h5) containing decomposed connections. + out_dir Path of the output directory. + +options: + -h, --help show this help message and exit + --include_dps Include the data_per_streamline the metadata. + --edge_keys LABEL1_LABEL2 [LABEL1_LABEL2 ...] + Keys to identify the edges (connections) of interest. + --node_keys NODE [NODE ...] + Node keys to identify the sub-networks of interest. + Equivalent to adding any --edge_keys node_LABEL2 or LABEL2_node. + --save_empty labels_list + Save empty connections. Then, the list of possible connections is + not found from the hdf5 but inferred from labels_list, a txt file + containing a list of nodes saved by the decomposition script. + *If used together with edge_keys or node_keys, the provided nodes must + exist in labels_list. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + CAREFUL. The whole output directory will be deleted if it exists. diff --git a/scripts/.hidden/scil_tractogram_count_streamlines.py.help b/scripts/.hidden/scil_tractogram_count_streamlines.py.help new file mode 100644 index 000000000..72ab12609 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_count_streamlines.py.help @@ -0,0 +1,24 @@ +usage: scil_tractogram_count_streamlines.py [-h] [--print_count_alone] + [--indent INDENT] [--sort_keys] + [-v [{DEBUG,INFO,WARNING}]] + in_tractogram + +Return the number of streamlines in a tractogram. Only support trk and tck in +order to support the lazy loading from nibabel. + +Formerly: scil_count_streamlines.py + +positional arguments: + in_tractogram Path of the input tractogram file. + +options: + -h, --help show this help message and exit + --print_count_alone If true, prints the result only. + Else, prints the bundle name and count formatted as a json dict.(default) + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_cut_streamlines.py.help b/scripts/.hidden/scil_tractogram_cut_streamlines.py.help new file mode 100644 index 000000000..24395fc68 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_cut_streamlines.py.help @@ -0,0 +1,60 @@ +usage: scil_tractogram_cut_streamlines.py [-h] (--mask MASK | --label LABEL) + [--label_ids LABEL_IDS LABEL_IDS] + [--resample STEP_SIZE] + [--biggest_blob] + [--compress [COMPRESS_TH]] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram out_tractogram + +Filters streamlines and only keeps the parts of streamlines within or +between the ROIs. Two options are available. + +Input mask: + +The mask has either 1 entity/blob or +2 entities/blobs (does not support disconnected voxels). +The option --biggest_blob can help if you have such a scenario. + +The 1 entity scenario will 'trim' the streamlines so their longest segment is +within the bounding box or a binary mask. + +The 2 entities scenario will cut streamlines so their segment are within the +bounding box or going from binary mask #1 to binary mask #2. + +Input label: + +The label MUST contain 2 labels different from zero. +Label values could be anything. +The script will cut streamlines going from label 1 to label 2. + +Both inputs and scenarios will erase data_per_point and data_per_streamline. + +Formerly: scil_cut_streamlines.py + +positional arguments: + in_tractogram Input tractogram file. + out_tractogram Output tractogram file. Note: data_per_point will be discarded, if any! + +options: + -h, --help show this help message and exit + --label_ids LABEL_IDS LABEL_IDS + List of labels indices to use to cut streamlines (2 values). + --resample STEP_SIZE Resample streamlines to a specific step-size in mm [None]. + --biggest_blob Use the biggest entity and force the 1 ROI scenario. + --compress [COMPRESS_TH] + If set, compress the resulting streamline. Value is the maximum + compression distance in mm.[0.1] + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Mandatory mask options: + Choose between mask or label input. + + --mask MASK Binary mask containing either 1 or 2 blobs. + --label LABEL Label containing 2 blobs. diff --git a/scripts/.hidden/scil_tractogram_detect_loops.py.help b/scripts/.hidden/scil_tractogram_detect_loops.py.help new file mode 100644 index 000000000..6559c83b8 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_detect_loops.py.help @@ -0,0 +1,57 @@ +usage: scil_tractogram_detect_loops.py [-h] + [--looping_tractogram out_filename] + [--qb [threshold]] [--angle ANGLE] + [--display_counts] [--no_empty] + [--indent INDENT] [--sort_keys] + [--processes NBR] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram out_tractogram + +This script can be used to remove loops in two types of streamline datasets: + + - Whole brain: For this type, the script removes streamlines if they + make a loop with an angle of more than 360 degrees. It's possible to change + this angle with the --angle option. Warning: Don't use --qb option for a + whole brain tractography. + + - Bundle dataset: For this type, it is possible to remove loops and + streamlines outside the bundle. For the sharp angle turn, use --qb option. + +See also: + scil_tractogram_filter_by_anatomy.py + +Formerly: scil_detect_streamlines_loops.py + +positional arguments: + in_tractogram Tractogram input file name. + out_tractogram Output tractogram without loops. + +options: + -h, --help show this help message and exit + --looping_tractogram out_filename + If set, saves detected looping streamlines. + --qb [threshold] If set, uses QuickBundles to detect outliers (loops, sharp angle + turns). Given threshold is the maximal streamline to bundle + distance for a streamline to be considered as a tracking error. + Default if set: [8.0] + --angle ANGLE Maximum looping (or turning) angle of + a streamline in degrees. [360] + --display_counts Print streamline count before and after filtering + --no_empty If set, will not save outputs if they are empty. + --processes NBR Number of sub-processes to start. + Default: [1] + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. + +References: + QuickBundles, based on [Garyfallidis12] Frontiers in Neuroscience, 2012. diff --git a/scripts/.hidden/scil_tractogram_dpp_math.py.help b/scripts/.hidden/scil_tractogram_dpp_math.py.help new file mode 100644 index 000000000..59c47e0c6 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_dpp_math.py.help @@ -0,0 +1,76 @@ +usage: scil_tractogram_dpp_math.py [-h] --mode {dpp,dps} --in_dpp_name key + [key ...] --out_keys key [key ...] + [--endpoints_only] [--keep_all_dpp_dps] + [--overwrite_dpp_dps] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + [--no_bbox_check] + OPERATION INPUT_FILE OUTPUT_FILE + +Performs an operation on data per point (dpp) from input streamlines. + +Although the input data always comes from the dpp, the output can be either +a dpp or a data_per_streamline (dps), depending on the chosen options. +Two modes of operation are supported: dpp and dps. + - In dps mode, the operation is performed on dpp across the dimension of + the streamlines resulting in a single value (or array in the 4D case) + per streamline, stored as dps. + - In dpp mode, the operation is performed on each point separately, + resulting in a new dpp. + +If endpoints_only and dpp mode is set the operation will only be calculated at +the streamline endpoints the rest of the values along the streamline will be +NaN. + +If endpoints_only and dps mode is set operation will be calculated across the +data at the endpoints and stored as a single value (or array in the 4D case) +per streamline. + +Endpoint only operation: +correlation: correlation calculated between arrays extracted from streamline +endpoints (data must be multivalued per point) and dps mode must be set. + +positional arguments: + OPERATION The type of operation to be performed on the + streamlines. Must be one of the following: + [mean, sum, min, max, correlation.] + INPUT_FILE Input tractogram containing streamlines and metadata. + OUTPUT_FILE The file where the remaining streamlines + are saved. + +options: + -h, --help show this help message and exit + --mode {dpp,dps} Set to dps if the operation is to be performed + across all dimensions resulting in a single value per + streamline. Set to dpp if the operation is to be + performed on each point separately resulting in a + single value per point. + --in_dpp_name key [key ...] + Name or list of names of the data_per_point for + operation to be performed on. If more than one dpp + is selected, the same operation will be applied + separately to each one. + --out_keys key [key ...] + Name of the resulting data_per_point or + data_per_streamline to be saved in the output + tractogram. If more than one --in_dpp_name was used, + enter the same number of --out_keys values. + --endpoints_only If set, will only perform operation on endpoints + If not set, will perform operation on all streamline + points. + --keep_all_dpp_dps If set, previous data_per_point will be preserved + in the output tractogram. Else, only --out_dpp_name + keys will be saved. + --overwrite_dpp_dps If set, if --keep_all_dpp_dps is set and some + --out_keys keys already existed in your + data_per_point or data_per_streamline, allow + overwriting old data_per_point. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of + tractograms (ignores the presence of invalid streamlines). diff --git a/scripts/.hidden/scil_tractogram_extract_ushape.py.help b/scripts/.hidden/scil_tractogram_extract_ushape.py.help new file mode 100644 index 000000000..6050a0e92 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_extract_ushape.py.help @@ -0,0 +1,41 @@ +usage: scil_tractogram_extract_ushape.py [-h] [--minU MINU] [--maxU MAXU] + [--remaining_tractogram filename] + [--no_empty] [--display_counts] + [--indent INDENT] [--sort_keys] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram out_tractogram + +This script extracts streamlines depending on their U-shapeness. +This script is a replica of Trackvis method. + +When ufactor is close to: +* 0 it defines straight streamlines +* 1 it defines U-fibers +* -1 it defines S-fibers + +Formerly: scil_extract_ushape.py + +positional arguments: + in_tractogram Tractogram input file name. + out_tractogram Output tractogram file name. + +options: + -h, --help show this help message and exit + --minU MINU Min ufactor value. [0.5] + --maxU MAXU Max ufactor value. [1.0] + --remaining_tractogram filename + If set, saves remaining streamlines. + --no_empty Do not write file if there is no streamline. + --display_counts Print streamline count before and after filtering. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_filter_by_anatomy.py.help b/scripts/.hidden/scil_tractogram_filter_by_anatomy.py.help new file mode 100644 index 000000000..b711f8591 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_filter_by_anatomy.py.help @@ -0,0 +1,111 @@ +usage: scil_tractogram_filter_by_anatomy.py [-h] [--minL MINL] [--maxL MAXL] + [--angle ANGLE] + [--csf_bin CSF_BIN] + [--dilate_ctx value] + [--save_intermediate_tractograms] + [--save_volumes] [--save_counts] + [--save_rejected] [--no_empty] + [--indent INDENT] [--sort_keys] + [--processes NBR] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram in_wmparc out_path + +This script filters streamlines in a tractogram according to their geometrical +properties (i.e. limiting their length and looping angle) and their anatomical +ending properties (i.e. the anatomical tissue or region their endpoints lie +in). + +See also: + - scil_tractogram_detect_loops.py + - scil_tractogram_filter_by_length.py + - scil_tractogram_filter_by_orientation.py + - scil_tractogram_filter_by_roi.py + +The filtering is performed sequentially in four steps, each step processing the +data on the output of the previous step: + + Step 1 - Remove streamlines below the minimum length and above the + maximum length. These thresholds must be set with the ``--minL`` + and ``--maxL`` options. + Step 2 - Ensure that no streamlines end in the cerebrospinal fluid + according to the provided parcellation. A binary mask can be used + alternatively through the ``--csf_bin`` option. + Step 3 - Ensure that no streamlines end in white matter by ensuring that + they reach the cortical regions according to the provided + parcellation. The cortical regions of the parcellation can be + dilated using the ``--ctx_dilation_radius``. + Step 4 - Remove streamlines if they make a loop with an angle above a + certain threshold. It's possible to change this angle with the + ``-a`` option. + +Length and loop-based filtering (steps 1 and 2) will not have practical effects +if no specific thresholds are provided (but will be still executed), since +default values are 0 for the minimum allowed length and infinite for the +maximum allowed length and angle. + +The anatomical region endings filtering requires a parcellation or label image +file including the cerebrospinal fluid and gray matter (cortical) regions +according to the Desikan-Killiany atlas. Intermediate tractograms (results of +each step and outliers) and volumes can be saved throughout the process. + +Example usages: + +# Filter length, looping angle and anatomical ending region +>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz + path/to/output/directory --minL 20 --maxL 200 -a 300 +# Filter only anatomical ending region, with WM dilation and provided csf mask +>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz + path/to/output/directory --csf_bin csf_bin.nii.gz --ctx_dilation_radius 2 + +Formerly: scil_filter_streamlines_anatomically.py + +NOTE: As of version 2.0.0, the dilation of the cortical mask has changed; from +an in-house process to scipy's dilation. Results may differ from previous +versions. + +positional arguments: + in_tractogram Path of the input tractogram file. + in_wmparc Path of the white matter parcellation atlas (.nii or .nii.gz) + out_path Path to the output files. + +options: + -h, --help show this help message and exit + --minL MINL Minimum length of streamlines, in mm. [0.0] + --maxL MAXL Maximum length of streamlines, in mm. [inf] + --angle ANGLE Maximum looping (or turning) angle of a streamline, + in degrees. [inf] + --csf_bin CSF_BIN Allow CSF endings filtering with this binary + mask instead of using the atlas (.nii or .nii.gz) + --dilate_ctx value If set, dilate the cortical labels. Value is the dilation + radius, in voxels (an integer > 0) + --save_intermediate_tractograms + Save accepted and discarded streamlines after each step. + --save_volumes Save volumetric images (e.g. binarised label + images, etc) in the filtering process. + --save_counts Save the streamline counts to a file (.json) + --save_rejected Save rejected streamlines to output tractogram. + --no_empty Do not write file if there is no streamlines. + --processes NBR Number of sub-processes to start. + Default: [1] + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. + + References: + [1] Jörgens, D., Descoteaux, M., Moreno, R., 2021. Challenges for + tractogram filtering. In: Özarslan, E., Schultz, T., Zhang, E., Fuster, + A. (Eds.), Anisotropy Across Fields and Scales. Springer. Mathematics + and Visualization. + [2] Legarreta, J., Petit, L., Rheault, F., Theaud, G., Lemaire, C., + Descoteaux, M., Jodoin, P.M. Filtering in tractography using + autoencoders (FINTA). Medical Image Analysis. 2021 + diff --git a/scripts/.hidden/scil_tractogram_filter_by_length.py.help b/scripts/.hidden/scil_tractogram_filter_by_length.py.help new file mode 100644 index 000000000..e1f33549e --- /dev/null +++ b/scripts/.hidden/scil_tractogram_filter_by_length.py.help @@ -0,0 +1,41 @@ +usage: scil_tractogram_filter_by_length.py [-h] [--minL MINL] [--maxL MAXL] + [--no_empty] [--display_counts] + [--save_rejected] [--indent INDENT] + [--sort_keys] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram out_tractogram + +Script to filter streamlines based on their lengths. + +See also: + - scil_tractogram_detect_loops.py + - scil_tractogram_filter_by_anatomy.py + (Filtering by length is its step1) + - scil_tractogram_filter_by_orientation.py + - scil_tractogram_filter_by_roi.py + +Formerly: scil_filter_streamlines_by_length.py + +positional arguments: + in_tractogram Streamlines input file name. + out_tractogram Streamlines output file name. + +options: + -h, --help show this help message and exit + --minL MINL Minimum length of streamlines, in mm. [0.0] + --maxL MAXL Maximum length of streamlines, in mm. [inf] + --no_empty Do not write file if there is no streamline. + --display_counts Print streamline count before and after filtering + --save_rejected Save rejected streamlines to output tractogram. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_filter_by_orientation.py.help b/scripts/.hidden/scil_tractogram_filter_by_orientation.py.help new file mode 100644 index 000000000..85f41e4b9 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_filter_by_orientation.py.help @@ -0,0 +1,65 @@ +usage: scil_tractogram_filter_by_orientation.py [-h] [--min_x MIN_X] + [--max_x MAX_X] + [--min_y MIN_Y] + [--max_y MAX_Y] + [--min_z MIN_Z] + [--max_z MAX_Z] [--use_abs] + [--no_empty] + [--display_counts] + [--save_rejected filename] + [--indent INDENT] + [--sort_keys] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] + [-f] + in_tractogram out_tractogram + +Script to filter streamlines based on their distance traveled in a specific +dimension (x, y, or z). + +Useful to help differentiate bundles. + +Examples: In a brain aligned with x coordinates in left - right axis and y +coordinates in anterior-posterior axis, a streamline from the ... + - corpus callosum will likely travel a very short distance in the y axis. + - cingulum will likely travel a very short distance in the x axis. + +Note: we consider that x, y, z are the coordinates of the streamlines; we +do not verify if they are aligned with the brain's orientation. + +See also: + - scil_tractogram_detect_loops.py + - scil_tractogram_filter_by_anatomy.py + - scil_tractogram_filter_by_length.py + - scil_tractogram_filter_by_roi.py + +Formerly: scil_filter_streamlines_by_orientation.py + +positional arguments: + in_tractogram Streamlines input file name. + out_tractogram Streamlines output file name. + +options: + -h, --help show this help message and exit + --min_x MIN_X Minimum distance in the first dimension, in mm.[0.0] + --max_x MAX_X Maximum distance in the first dimension, in mm.[inf] + --min_y MIN_Y Minimum distance in the second dimension, in mm.[0.0] + --max_y MAX_Y Maximum distance in the second dimension, in mm.[inf] + --min_z MIN_Z Minimum distance in the third dimension, in mm.[0.0] + --max_z MAX_Z Maximum distance in the third dimension, in mm.[inf] + --use_abs If set, will use the total of distances in absolute value (ex, coming back on yourself will contribute to the total distance instead of cancelling it). + --no_empty Do not write file if there is no streamline. + --display_counts Print streamline count before and after filtering. + --save_rejected filename + Save the SFT of rejected streamlines. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_filter_by_roi.py.help b/scripts/.hidden/scil_tractogram_filter_by_roi.py.help new file mode 100644 index 000000000..f2c796254 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_filter_by_roi.py.help @@ -0,0 +1,127 @@ +usage: scil_tractogram_filter_by_roi.py [-h] + [--drawn_roi DRAWN_ROI [DRAWN_ROI ...]] + [--atlas_roi ATLAS_ROI [ATLAS_ROI ...]] + [--bdo BDO [BDO ...]] + [--x_plane X_PLANE [X_PLANE ...]] + [--y_plane Y_PLANE [Y_PLANE ...]] + [--z_plane Z_PLANE [Z_PLANE ...]] + [--filtering_list FILTERING_LIST] + [--overwrite_distance OVERWRITE_DISTANCE [OVERWRITE_DISTANCE ...]] + [--save_masks_atlas_roi SAVE_MASKS_ATLAS_ROI] + [--no_empty] [--display_counts] + [--save_rejected FILENAME] + [--indent INDENT] [--sort_keys] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram out_tractogram + +Filtering of a tractogram based on any combination of conditions involving a +ROI (ex: keep streamlines whose endoints are inside the ROI, exclude +streamlines not entirely included in a ROI, etc.) + +See also: + - scil_tractogram_detect_loops.py + - scil_tractogram_filter_by_anatomy.py + (Can reject streamlines with endpoints in the WM or the CSF based on + labels) + - scil_tractogram_filter_by_length.py + - scil_tractogram_filter_by_orientation.py + +Condition +--------- +For every type of ROI, two or three values are expected: MODE CRITERIA DISTANCE +(DISTANCE is always optional) +- MODE must be one of these values: ['any', 'all', 'either_end', 'both_ends'] + - any: any part of the streamline must be in the mask + - all: all parts of the streamline must be in the mask. + - either_end: at least one end of the streamline must be in the mask. + - both_ends: both ends of the streamline must be in the mask. +- CRITERIA must be one of these values: ['include', 'exclude'] + - Include: If condition from MODE is respected, streamline is included. + - Exlucde: If condition from MODE is respected, streamline is excluded. +- DISTANCE must be an int and is optional. + +Type of ROI +----------- +- Drawn ROI: Directly loaded from a binary file. +- Atlas ROI: Selected label from an atlas. + - ID is one or multiple integer values in the atlas. If multiple values, + ID needs to be between quotes. + Example: "1:6 9 10:15" will use values between 1 and 6 and values + between 10 and 15 included as well as value 9. +- BDO: The ROI is the interior of a bounding box. +- Planes: The ROI is the equivalent of a one-voxel plane. + * Using mode 'all' with x/y/z plane works but makes very little sense. + +Note: `--drawn_roi MASK.nii.gz all include` is equivalent to + `--drawn_roi INVERSE_MASK.nii.gz any exclude` + +For example, this allows to find out all streamlines entirely in the WM in one +command (without manually inverting the mask first) or to remove any streamline +staying in the GM without getting out. + +Supports multiple filtering conditions +-------------------------------------- +Multiple filtering conditions can be used, with varied ROI types if necessary. +Combining two conditions is equivalent to a logical AND between the conditions. +Order of application does not matter for the final result, but may change the +intermediate files, if any. + +Distance management +------------------- +DISTANCE is optional, and it should be used carefully with large voxel size +(e.g > 2.5mm). The value is in voxel for ROIs and in mm for bounding boxes. +Anisotropic data will affect each direction differently. + When using --overwrite_distance, any filtering option with given criteria +will have its DISTANCE value replaced. + +Formerly: scil_filter_tractogram.py + +positional arguments: + in_tractogram Path of the input tractogram file. + out_tractogram Path of the output tractogram file. + +options: + -h, --help show this help message and exit + --drawn_roi DRAWN_ROI [DRAWN_ROI ...] + ROI_NAME MODE CRITERIA DISTANCE (distance in voxel is optional) + Filename of a hand drawn ROI (.nii or .nii.gz). + --atlas_roi ATLAS_ROI [ATLAS_ROI ...] + ATLAS_NAME ID MODE CRITERIA DISTANCE (distance in voxel is optional) + Filename of an atlas (.nii or .nii.gz). + --bdo BDO [BDO ...] BDO_NAME MODE CRITERIA DISTANCE (distance in mm is optional) + Filename of a bounding box (bdo) file from MI-Brain. + --x_plane X_PLANE [X_PLANE ...] + PLANE MODE CRITERIA DISTANCE (distance in voxel is optional) + Slice number in X, in voxel space. + --y_plane Y_PLANE [Y_PLANE ...] + PLANE MODE CRITERIA DISTANCE (distance in voxel is optional) + Slice number in Y, in voxel space. + --z_plane Z_PLANE [Z_PLANE ...] + PLANE MODE CRITERIA DISTANCE (distance in voxel is optional) + Slice number in Z, in voxel space. + --filtering_list FILTERING_LIST + Text file containing one rule per line + (i.e. drawn_roi mask.nii.gz both_ends include 1). + --overwrite_distance OVERWRITE_DISTANCE [OVERWRITE_DISTANCE ...] + MODE CRITERIA DISTANCE (distance in voxel for ROIs and in mm for bounding box). + If set, it will overwrite the distance associated to a specific mode/criteria. + --save_masks_atlas_roi SAVE_MASKS_ATLAS_ROI + If set, will save the atlas roi masks. The value to provide is the + prefix, ex: my_path/atlas_roi_. Whole filename will be + my_path/atlas_roi_{id}.nii.gz + --no_empty Do not write file if there is no streamline. + --display_counts Print streamline count before and after filtering + --save_rejected FILENAME + Save rejected streamlines to output tractogram. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_fix_trk.py.help b/scripts/.hidden/scil_tractogram_fix_trk.py.help new file mode 100644 index 000000000..124b0f01c --- /dev/null +++ b/scripts/.hidden/scil_tractogram_fix_trk.py.help @@ -0,0 +1,80 @@ +usage: scil_tractogram_fix_trk.py [-h] [--software string] + [--cut_invalid | --remove_invalid] + [--in_dsi_fa IN_DSI_FA] + [--in_native_fa IN_NATIVE_FA] [--auto_crop] + [--save_transfo FILE | --load_transfo FILE] + [--reference REFERENCE] [--no_bbox_check] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram out_tractogram + +This script is made to fix DSI-Studio or Startrack TRK file +(unknown space/convention) to make it compatible with TrackVis, +MI-Brain, Dipy Horizon (Stateful Tractogram). + +DSI-Studio +========== + +The script either make it match with an anatomy from DSI-Studio (AC-PC aligned, +sometimes flipped) or if --in_native_fa is provided it moves it back to native +DWI space (this involved registration). + +Since DSI-Studio sometimes leaves some skull around the brain, the --auto_crop +aims to stabilize registration. If this option fails, manually BET both FA. +Registration is more robust at resolution above 2mm (iso), be careful. + +If you are fixing bundles, use this script once with --save_transfo and verify +results. Once satisfied, call the scripts on bundles using a bash for loop with +--load_transfo to save computation. + +We recommand the --cut_invalid to remove invalid points of streamlines rather +removing entire streamlines. + +This script was tested on various datasets and worked on all of them. However, +always verify the results and if a specific case does not work. Open an issue +on the Scilpy GitHub repository. + +Startrack +========== + +The script will create a new stateful tractogram using the reference in +order to fix the missing information in the header of the trk. + +WARNING: This script is still experimental, DSI-Studio and Startrack +evolve quickly and results may vary depending on the data itself +as well as DSI-studio/Startrack version. + +Formerly: scil_fix_dsi_studio_trk.py + +positional arguments: + in_tractogram Path of the input tractogram file from DSI studio (.trk). + out_tractogram Path of the output tractogram file. + +options: + -h, --help show this help message and exit + --software string Software used to create in_tractogram. + Choices: ['dsi_studio', 'startrack'] + --cut_invalid Cut invalid streamlines rather than removing them. + Keep the longest segment only. + --remove_invalid Remove the streamlines landing out of the bounding box. + --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of + tractograms (ignores the presence of invalid streamlines). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +DSI options: + --in_dsi_fa IN_DSI_FA + Path of the input FA from DSI Studio (.nii.gz). + --in_native_fa IN_NATIVE_FA + Path of the input FA from Dipy/MRtrix (.nii.gz). + Move the tractogram back to a "proper" space, include registration. + --auto_crop If both FA are not already BET, perform registration + using a centered-cube crop to ignore the skull. + A good BET for both is more robust. + --save_transfo FILE Save estimated transformation to avoid recomputing (.txt). + --load_transfo FILE Load estimated transformation to apply to other files (.txt). + +StarTrack options: + --reference REFERENCE + Reference anatomy (.nii or .nii.gz). diff --git a/scripts/.hidden/scil_tractogram_flip.py.help b/scripts/.hidden/scil_tractogram_flip.py.help new file mode 100644 index 000000000..20e260b2c --- /dev/null +++ b/scripts/.hidden/scil_tractogram_flip.py.help @@ -0,0 +1,27 @@ +usage: scil_tractogram_flip.py [-h] [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram out_tractogram {x,y,z} + [{x,y,z} ...] + +Flip streamlines locally around specific axes. + +IMPORTANT: this script should only be used in case of absolute necessity. +It's better to fix the real tools than to force flipping streamlines to +have them fit in the tools. + +Formerly: scil_flip_streamlines.py + +positional arguments: + in_tractogram Path of the input tractogram file. + out_tractogram Path of the output tractogram file. + {x,y,z} The axes you want to flip. eg: to flip the x and y axes use: x y. + +options: + -h, --help show this help message and exit + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_math.py.help b/scripts/.hidden/scil_tractogram_math.py.help new file mode 100644 index 000000000..d7f5f2226 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_math.py.help @@ -0,0 +1,75 @@ +usage: scil_tractogram_math.py [-h] [--precision NBR_OF_DECIMALS] [--robust] + [--no_metadata] [--fake_metadata] + [--save_indices OUT_INDEX_FILE] [--save_empty] + [--no_bbox_check] [--indent INDENT] + [--sort_keys] [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + OPERATION INPUT_FILES [INPUT_FILES ...] + OUTPUT_FILE + +Performs an operation on a list of streamline files. The supported +operations are: + +difference: Keep the streamlines from the first file that are not in + any of the following files. + +intersection: Keep the streamlines that are present in all files. + +union: Keep all streamlines while removing duplicates. + +concatenate: Keep all streamlines with duplicates. + +lazy_concatenate: Keep all streamlines with duplicates, never load the whole + tractograms in memory. Only works with trk/tck file, + metadata will be lost and invalid streamlines are kept. + +If a file 'duplicate.trk' have identical streamlines, calling the script using +the difference/intersection/union with a single input will remove these +duplicated streamlines. + +To allow a soft match, use the --precision option to increase the allowed +threshold for similarity. A precision of 1 represents 10**(-1), so a +maximum distance of 0.1mm is allowed. If the streamlines are identical, the +default value of 3 (or 0.001mm distance) should work. + +If there is a 0.5mm shift, use a precision of 0 (or 1mm distance) and the +--robust option. Should make it work, but slightly slower. Will merge all +streamlines similar when rounded to that precision level. + +The metadata (data per point, data per streamline) of the streamlines that +are kept in the output will be preserved. This requires that all input files +share the same type of metadata. If this is not the case, use the option +--no_metadata to strip the metadata from the output. Or --fake_metadata to +initialize dummy metadata in the file missing them. + +Formerly: scil_streamlines_math.py + +positional arguments: + OPERATION The type of operation to be performed on the streamlines. Must + be one of the following: difference, intersection, union, concatenate, lazy_concatenate. + INPUT_FILES The list of files that contain the streamlines to operate on. + OUTPUT_FILE The file where the remaining streamlines are saved. + +options: + -h, --help show this help message and exit + --precision NBR_OF_DECIMALS, -p NBR_OF_DECIMALS + Precision used to compare streamlines [4]. + --robust, -r Use version robust to small translation/rotation. + --no_metadata, -n Strip the streamline metadata from the output. + --fake_metadata Skip the metadata verification, create fake metadata if missing, can lead to unexpected behavior. + --save_indices OUT_INDEX_FILE, -s OUT_INDEX_FILE + Save the streamline indices to the supplied json file. + --save_empty If set, we will save all results, even if tractogram if empty. + --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of + tractograms (ignores the presence of invalid streamlines). + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_pairwise_comparison.py.help b/scripts/.hidden/scil_tractogram_pairwise_comparison.py.help new file mode 100644 index 000000000..d22379961 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_pairwise_comparison.py.help @@ -0,0 +1,51 @@ +usage: scil_tractogram_pairwise_comparison.py [-h] [--out_dir OUT_DIR] + [--out_prefix OUT_PREFIX] + [--in_mask IN_FILE] + [--skip_streamlines_distance] + [--processes NBR] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram_1 in_tractogram_2 + +This script is designed to compare and help visualize differences between two +tractograms. This can be especially useful in studies where multiple +tractograms from different algorithms or parameters need to be compared. + +A similar script (scil_bundle_pairwise_comparison.py) is available for bundles, +with metrics more adapted to bundles (and spatial agreement). + +The difference is computed in terms of +- A voxel-wise spatial distance between streamlines crossing each voxel. + This can help to see if both tractography reconstructions at each voxel + look similar (out_diff.nii.gz) +- An angular correlation (ACC) between streamline orientation from TODI. + This compares the local orientation of streamlines at each voxel + (out_acc.nii.gz) +- A patch-wise correlation between streamline density maps from both + tractograms. This compares where the high/low density regions agree or not + (out_corr.nii.gz) +- A heatmap combining all the previous metrics using a harmonic means of the + normalized metrics to summarize general agreement (out_heatmap.nii.gz) + +positional arguments: + in_tractogram_1 Input tractogram 1. + in_tractogram_2 Input tractogram 2. + +options: + -h, --help show this help message and exit + --out_dir OUT_DIR Directory where all output files will be saved. + If not specified, outputs will be saved in the current directory. + --out_prefix OUT_PREFIX + Prefix for output files. Useful for distinguishing between different runs [out]. + --in_mask IN_FILE Optional input mask. + --skip_streamlines_distance + Skip computation of the spatial distance between streamlines. Slowest part of the computation. + --processes NBR Number of sub-processes to start. + Default: [1] + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_print_info.py.help b/scripts/.hidden/scil_tractogram_print_info.py.help new file mode 100644 index 000000000..22c270049 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_print_info.py.help @@ -0,0 +1,32 @@ +usage: scil_tractogram_print_info.py [-h] [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] + [--indent INDENT] [--sort_keys] + in_tractogram + +Prints information on a loaded tractogram: number of streamlines, and +mean / min / max / std of + - length in number of points + - length in mm + - step size. + +For trk files: also prints the data_per_point and data_per_streamline keys. + +See also: + - scil_header_print_info.py to see the header, affine, volume dimension. + - scil_bundle_shape_measures.py to see bundle-specific information. + +positional arguments: + in_tractogram Tractogram file. + +options: + -h, --help show this help message and exit + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_project_map_to_streamlines.py.help b/scripts/.hidden/scil_tractogram_project_map_to_streamlines.py.help new file mode 100644 index 000000000..1dcdde3ff --- /dev/null +++ b/scripts/.hidden/scil_tractogram_project_map_to_streamlines.py.help @@ -0,0 +1,68 @@ +usage: scil_tractogram_project_map_to_streamlines.py [-h] --in_maps IN_MAPS + [IN_MAPS ...] + --out_dpp_name + OUT_DPP_NAME + [OUT_DPP_NAME ...] + [--trilinear] + [--endpoints_only] + [--keep_all_dpp] + [--overwrite_dpp] + [--reference REFERENCE] + [-f] + [-v [{DEBUG,INFO,WARNING}]] + in_tractogram + out_tractogram + +Projects maps extracted from a map onto the points of streamlines. + +The default options will take data from a nifti image (3D or 4D) and project it +onto the points of streamlines. If the image is 4D, the data is stored as a +list of 1D arrays per streamline. If the image is 3D, the data is stored as a +list of values per streamline. + +See also scil_tractogram_project_streamlines_to_map.py for the reverse action. + +* Note that the data from your maps will be projected only on the coordinates +of the points of your streamlines. Data underlying the whole segments between +two consecutive points is not used. If your streamlines are strongly +compressed, or if they have a very big step size, the result will possibly +reflect poorly your map. You may use scil_tractogram_resample.py to upsample +your streamlines first. + +* Hint: The streamlines themselves are not modified here, only their dpp. To +avoid multiplying data on disk, you could use the following arguments to save +the new dpp in your current tractogram: +>> scil_tractogram_project_map_to_streamlines.py $in_bundle $in_bundle + --keep_all_dpp -f + +positional arguments: + in_tractogram Fiber bundle file. + out_tractogram Output file. + +options: + -h, --help show this help message and exit + --in_maps IN_MAPS [IN_MAPS ...] + Nifti map to project onto streamlines. + --out_dpp_name OUT_DPP_NAME [OUT_DPP_NAME ...] + Name of the data_per_point to be saved in the + output tractogram. + --trilinear If set, will use trilinear interpolation + else will use nearest neighbor interpolation + by default. + --endpoints_only If set, will only project the map onto the + endpoints of the streamlines (all other values along + streamlines will be NaN). If not set, will project + the map onto all points of the streamlines. + --keep_all_dpp If set, previous data_per_point will be preserved + in the output tractogram. Else, only --out_dpp_name + keys will be saved. + --overwrite_dpp If set, if --keep_all_dpp is set and some + --out_dpp_name keys already existed in your + data_per_point, allow overwriting old data_per_point. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -f Force overwriting of the output files. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_tractogram_project_streamlines_to_map.py.help b/scripts/.hidden/scil_tractogram_project_streamlines_to_map.py.help new file mode 100644 index 000000000..c99ec3045 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_project_streamlines_to_map.py.help @@ -0,0 +1,77 @@ +usage: scil_tractogram_project_streamlines_to_map.py [-h] + (--use_dps key [key ...] | --use_dpp key [key ...] | --load_dps file [file ...] | --load_dpp file [file ...]) + (--mean_endpoints | --mean_streamline | --point_by_point) + (--to_endpoints | --to_wm) + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] + [-f] + in_bundle out_prefix + +Projects metrics onto the underlying voxels of a streamlines. This script can +project data from data_per_point (dpp) or data_per_streamline (dps) to maps. + +You choose to project data from all points of the streamlines, or from the +endpoints only. The idea then is to visualize the cortical areas affected by +metrics (assuming streamlines start/end in the cortex). + +See also scil_tractogram_project_map_to_streamlines.py for the reverse action. + +How to the data is loaded: + - From dps: uses the same value for each point of the streamline. + - From dpp: one value per point. + +How the data is used: + 1. Average all points of the streamline to get a mean value, set this value + to all points. + 2. Average the two endpoints and get their mean value, set this value to + all points. + 3. Keep each point individually. + +How the data is projected to a map: + A. Using each point. + B. Using the endpoints only. + +For more complex operations than the average per streamline, see +scil_tractogram_dpp_math.py. + +positional arguments: + in_bundle Fiber bundle file. + out_prefix Folder + prefix to save endpoints metric(s). We will save + one nifti file per per dpp/dps key given. + Ex: my_path/subjX_bundleY_ with --use_dpp key1 will output + my_path/subjX_bundleY_key1.nii.gz + +options: + -h, --help show this help message and exit + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Where to get the statistics from. (Choose one): + --use_dps key [key ...] + Use the data_per_streamline from the tractogram. + It must be a .trk + --use_dpp key [key ...] + Use the data_per_point from the tractogram. + It must be a trk. + --load_dps file [file ...] + Load data per streamline (scalar) .txt or .npy. + Must load an array with the right shape. + --load_dpp file [file ...] + Load data per point (scalar) from .txt or .npy. + Must load an array with the right shape. + +Processing choices. (Choose one): + --mean_endpoints Uses one single value per streamline: the mean of the two + endpoints. + --mean_streamline Use one single value per streamline: the mean of all + points of the streamline. + --point_by_point Directly project the streamlines values onto the map. + +Where to send the statistics. (Choose one): + --to_endpoints Project metrics onto a mask of the endpoints. + --to_wm Project metrics into streamlines coverage. diff --git a/scripts/.hidden/scil_tractogram_qbx.py.help b/scripts/.hidden/scil_tractogram_qbx.py.help new file mode 100644 index 000000000..8ff05ebba --- /dev/null +++ b/scripts/.hidden/scil_tractogram_qbx.py.help @@ -0,0 +1,43 @@ +usage: scil_tractogram_qbx.py [-h] [--nb_points NB_POINTS] + [--out_centroids OUT_CENTROIDS] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram dist_thresh out_clusters_dir + +Compute clusters using QuickBundlesX and save them separately. +We cannot know the number of clusters in advance. + +Quickbundles: +Garyfallidis, E. et al. (2012). Quickbundles, a method for tractography +simplification. Frontiers in neuroscience, 6, 175. + +QuickbundlesX: +Garyfallidis, E. et al. (2016) QuickBundlesX: sequential clustering of millions +of streamlines in multiple levels of detail at record execution time. 24th +International Society of Magnetic Resonance in Medicine (ISMRM). + +"QuickBundlesX shows a remarkable 20+X speedup over its predecessor" + +Formerly: scil_compute_qbx.py + +positional arguments: + in_tractogram Tractogram filename. + Path of the input tractogram or bundle. + dist_thresh Last QuickBundlesX threshold in mm. Typically + the value are between 10-20mm. + out_clusters_dir Path where to save the clusters directory. + +options: + -h, --help show this help message and exit + --nb_points NB_POINTS + Streamlines will be resampled to have this number of points [20]. + --out_centroids OUT_CENTROIDS + Output tractogram filename. + Format must be readable by the Nibabel API. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_register.py.help b/scripts/.hidden/scil_tractogram_register.py.help new file mode 100644 index 000000000..c2bde4463 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_register.py.help @@ -0,0 +1,42 @@ +usage: scil_tractogram_register.py [-h] [--out_name OUT_NAME] [--only_rigid] + [--moving_tractogram_ref MOVING_TRACTOGRAM_REF] + [--static_tractogram_ref STATIC_TRACTOGRAM_REF] + [-v [{DEBUG,INFO,WARNING}]] [-f] + moving_tractogram static_tractogram + +Generate a linear transformation matrix from the registration of 2 tractograms. +Typically, this script is run before scil_tractogram_apply_transform.py. + +For more information on how to use the various registration scripts, see the +doc at: +https://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html + +Formerly: scil_register_tractogram.py + +positional arguments: + moving_tractogram Path of the moving tractogram. + static_tractogram Path of the target tractogram. + +options: + -h, --help show this help message and exit + --out_name OUT_NAME Filename of the transformation matrix. + The registration type will be appended as a suffix, + [_.txt]. + Default: [transformation.txt] + --only_rigid If set, will only use a rigid transformation (uses affine by default). + --moving_tractogram_ref MOVING_TRACTOGRAM_REF + Reference anatomy for moving_tractogram (if tck/vtk/fib/dpy) file + support (.nii or .nii.gz). + --static_tractogram_ref STATIC_TRACTOGRAM_REF + Reference anatomy for static_tractogram (if tck/vtk/fib/dpy) file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +References: +[1] E. Garyfallidis, O. Ocegueda, D. Wassermann, M. Descoteaux +Robust and efficient linear registration of white-matter fascicles in the +space of streamlines, NeuroImage, Volume 117, 15 August 2015, Pages 124-140 +(http://www.sciencedirect.com/science/article/pii/S1053811915003961) diff --git a/scripts/.hidden/scil_tractogram_remove_invalid.py.help b/scripts/.hidden/scil_tractogram_remove_invalid.py.help new file mode 100644 index 000000000..a57dbbd40 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_remove_invalid.py.help @@ -0,0 +1,41 @@ +usage: scil_tractogram_remove_invalid.py [-h] [--cut_invalid] + [--remove_single_point] + [--remove_overlapping_points] + [--threshold THRESHOLD] [--no_empty] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram out_tractogram + +Removal of streamlines that are out of the volume bounding box. In voxel space +no negative coordinate and no above volume dimension coordinate are possible. +Any streamline that do not respect these two conditions are removed. + +The --cut_invalid option will cut streamlines so that their longest segment are +within the bounding box + +Formerly: scil_remove_invalid_streamlines.py + +positional arguments: + in_tractogram Tractogram filename. Format must be one of + trk, tck, vtk, fib, dpy. + out_tractogram Output filename. Format must be one of + trk, tck, vtk, fib, dpy. + +options: + -h, --help show this help message and exit + --cut_invalid Cut invalid streamlines rather than removing them. + Keep the longest segment only. + --remove_single_point + Consider single point streamlines invalid. + --remove_overlapping_points + Consider streamlines with overlapping points invalid. + --threshold THRESHOLD + Maximum distance between two points to be considered overlapping [0.001 mm]. + --no_empty Do not save empty tractogram. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_resample.py.help b/scripts/.hidden/scil_tractogram_resample.py.help new file mode 100644 index 000000000..a224b167f --- /dev/null +++ b/scripts/.hidden/scil_tractogram_resample.py.help @@ -0,0 +1,72 @@ +usage: scil_tractogram_resample.py [-h] [--never_upsample] + [--point_wise_std POINT_WISE_STD] + [--tube_radius TUBE_RADIUS] + [--gaussian SIGMA] [-e ERROR_RATE] + [--keep_invalid_streamlines] + [--downsample_per_cluster] + [--qbx_thresholds QBX_THRESHOLDS [QBX_THRESHOLDS ...]] + [--seed SEED] [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram nb_streamlines out_tractogram + +Script to resample a tractogram to a set number of streamlines. +Default behavior: +- IF number of requested streamlines is lower than streamline count: DOWNSAMPLE +- IF number of requested streamlines is higher than streamline count: UPSAMPLE +To prevent upsample if not desired use --never_upsample. + +Can be useful to build training sets for machine learning algorithms, to +upsample under-represented bundles or downsample over-represented bundles. + +Works by either selecting a subset of streamlines or by generating new +streamlines by adding gaussian noise to existing ones. + +Upsampling: + Includes smoothing to compensate for the noisiness of new streamlines + generated by the process. +Downsampling: + Includes the possibility of choosing randomly *per Quickbundle cluster* to + ensure that all clusters are represented in the final tractogram. + +Example usage: +$ scil_tractogram_resample.py input.trk 1000 output.trk --point_wise_std 0.5 --gaussian 5 --keep_invalid_streamlines +$ scil_visualize_bundles.py output.trk --local_coloring --width=0.1 + +positional arguments: + in_tractogram Input tractography file. + nb_streamlines Number of streamlines to resample the tractogram to. + out_tractogram Output tractography file. + +options: + -h, --help show this help message and exit + --never_upsample Make sure to never upsample a tractogram. + Useful when downsample batch of files using bash. + --seed SEED Use a specific random seed for the resampling. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Upsampling params: + --point_wise_std POINT_WISE_STD + Noise to add to existing streamlines points to generate new ones [1]. + --tube_radius TUBE_RADIUS + Maximum distance to generate streamlines around the original ones [1]. + --gaussian SIGMA Sigma for smoothing. Use the value of surrounding X,Y,Z points on the streamline to blur the streamlines. + A good sigma choice would be around 5. + -e ERROR_RATE Maximum compression distance in mm [0.1]. + --keep_invalid_streamlines + Keep invalid newly generated streamlines that may go out of the + bounding box. + +Downsampling params: + --downsample_per_cluster + If set, downsampling will be done per cluster (computed with + Quickbundles) to ensure that at least some streamlines are + kept per bundle. Else, random downsampling is performed (default). + --qbx_thresholds QBX_THRESHOLDS [QBX_THRESHOLDS ...] + If you chose option '--downsample_per_cluster', you may set + the QBx threshold value(s) here. Default: [40, 30, 20] diff --git a/scripts/.hidden/scil_tractogram_resample_nb_points.py.help b/scripts/.hidden/scil_tractogram_resample_nb_points.py.help new file mode 100644 index 000000000..3a7d23f3d --- /dev/null +++ b/scripts/.hidden/scil_tractogram_resample_nb_points.py.help @@ -0,0 +1,28 @@ +usage: scil_tractogram_resample_nb_points.py [-h] + (--nb_pts_per_streamline NB_PTS_PER_STREAMLINE | --step_size STEP_SIZE) + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram out_tractogram + +Script to resample a set of streamlines to either a new number of points per +streamline or to a fixed step size. WARNING: data_per_point is not carried. + +Formerly: scil_resample_streamlines.py + +positional arguments: + in_tractogram Streamlines input file name. + out_tractogram Streamlines output file name. + +options: + -h, --help show this help message and exit + --nb_pts_per_streamline NB_PTS_PER_STREAMLINE + Number of points per streamline in the output. + --step_size STEP_SIZE + Step size in the output (in mm). + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_seed_density_map.py.help b/scripts/.hidden/scil_tractogram_seed_density_map.py.help new file mode 100644 index 000000000..8ecbf4470 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_seed_density_map.py.help @@ -0,0 +1,29 @@ +usage: scil_tractogram_seed_density_map.py [-h] [--binary [FIXED_VALUE]] + [--no_bbox_check] + [-v [{DEBUG,INFO,WARNING}]] [-f] + tractogram_filename + seed_density_filename + +Compute a density map of seeds saved in .trk file. + +Formerly: scil_compute_seed_density_map.py + +positional arguments: + tractogram_filename Tracts filename. Format must be .trk. + File should contain a "seeds" value in the data_per_streamline. + These seeds must be in space: voxel, origin: corner. + seed_density_filename + Output seed density filename. Format must be Nifti. + +options: + -h, --help show this help message and exit + --binary [FIXED_VALUE] + If set, will store the same value for all intersected voxels, creating a binary map. + When set without a value, 1 is used (and dtype uint8). + If a value is given, will be used as the stored value. + --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of + tractograms (ignores the presence of invalid streamlines). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_segment_and_score.py.help b/scripts/.hidden/scil_tractogram_segment_and_score.py.help new file mode 100644 index 000000000..63434cac7 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_segment_and_score.py.help @@ -0,0 +1,164 @@ +usage: scil_tractogram_segment_and_score.py [-h] [--json_prefix p] + [--gt_dir DIR] + [--use_gt_masks_as_all_masks] + [--dilate_endpoints NB_PASS] + [--remove_invalid] + [--save_wpc_separately] + [--compute_ic] [--unique] + [--remove_wpc_belonging_to_another_bundle] + [--no_empty] [--indent INDENT] + [--sort_keys] [--no_bbox_check] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram gt_config out_dir + +Scores input tractogram overall and bundlewise. + +Outputs +------- + + - results.json: Contains a full tractometry report. + - processing_stats.json: Contains information on the segmentation of + bundles (ex: the number of wpc per criteria). + - Splits the input tractogram into + segmented_VB/*_VS.trk. + segmented_IB/*_*_IC.trk (if args.compute_ic) + segmented_WPC/*_wpc.trk (if args.save_wpc_separately) + IS.trk OR NC.trk (if args.compute_ic) + +By default, if a streamline fits in many bundles, it will be included in every +one. This means a streamline may be a VS for a bundle and an IS for +(potentially many) others. If you want to assign each streamline to at most one +bundle, use the `--unique` flag. + +Config file +----------- + +The config file needs to be a json containing a dict of the ground-truth +bundles as keys. The value for each bundle is itself a dictionnary with: + +Mandatory: + - endpoints OR [head AND tail]: filename for the endpoints ROI. + If 'enpoints' is used, we will automatically separate the mask into two + ROIs, acting as head and tail. Quality check is strongly recommended. + +Optional: + Concerning metrics: + - gt_mask: expected result. OL and OR metrics will be computed from this.* + + Concerning inclusion criteria (other streamlines will be WPC): + - all_mask: ROI serving as "all" criteria: to be included in the bundle, + ALL points of a streamline must be inside the mask.* + - any_mask: ROI serving as "any" criteria: streamlines + must touch that mask in at least one point ("any" point) to be included + in the bundle. + - angle: angle criteria. Streamlines containing loops and sharp turns above + given angle will be rejected from the bundle. + - length: maximum and minimum lengths per bundle. + - length_x / length_x_abs: maximum and minimum total distance in the x + direction (i.e. first coordinate).** + - length_y / length_y_abs: maximum and minimum total distance in the y + direction (i.e. second coordinate).** + - length_z / length_z_abs: maximum and minimum total distance in the z + direction (i.e. third coordinate).** + +* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will +be created. If it is a nifti file, it will be considered to be a mask. +** With absolute values: coming back on yourself will contribute to the total +distance instead of cancelling it. + +Exemple config file: +{ + "Ground_truth_bundle_0": { + "gt_mask": "PATH/bundle0.nii.gz", + "angle": 300, + "length": [140, 150], + "endpoints": "PATH/file1.nii.gz" + } +} + +Tractometry +----------- +Global connectivity metrics: + +- Computed by default: + - VS: valid streamlines, belonging to a bundle (i.e. respecting all the + criteria for that bundle; endpoints, limit_mask, gt_mask.). + - IS: invalid streamlines. All other streamlines. IS = IC + NC. + +- Optional: + - WPC: wrong path connections, streamlines connecting correct ROIs but not + respecting the other criteria for that bundle. Such streamlines always + exist but they are only saved separately if specified in the options. + Else, they are merged back with the IS. + By definition. WPC are only computed if "limits masks" are provided. + - IC: invalid connections, streamlines joining an incorrect combination of + ROIs. Use carefully, quality depends on the quality of your ROIs and no + analysis is done on the shape of the streamlines. + - NC: no connections. Invalid streamlines minus invalid connections. + +- Fidelity metrics: + - OL: Overlap. Percentage of ground truth voxels containing streamline(s) + for a given bundle. + - OR: Overreach. Amount of voxels containing streamline(s) when they + shouldn't, for a given bundle. We compute two versions : + OR_pct_vs = divided by the total number of voxel covered by the bundle. + (percentage of the voxels touched by VS). + Values range between 0 and 100%. Values are not defined when we + recovered no streamline for a bundle, but we set the OR_pct_vs to 0 + in that case. + OR_pct_gt = divided by the total size of the ground truth bundle mask. + Values could be higher than 100%. + - f1 score: which is the same as the Dice score. + +positional arguments: + in_tractogram Input tractogram to score + gt_config .json dict configured as specified above. + out_dir Output directory for the resulting segmented bundles. + +options: + -h, --help show this help message and exit + --json_prefix p Prefix of the two output json files. Ex: 'study_x_'.Files will be saved inside out_dir. + Suffixes will be 'processing_stats.json' and 'results.json'. + --no_empty Do not write file if there is no streamline. + --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of + tractograms (ignores the presence of invalid streamlines). + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Additions to gt_config: + --gt_dir DIR Root path of the ground truth files listed in the gt_config. + If not set, filenames in the config file are considered + as absolute paths. + --use_gt_masks_as_all_masks + If set, the gt_config's 'gt_mask' will also be used as + 'all_mask' for each bundle. Note that this means the + OR will necessarily be 0. + +Preprocessing: + --dilate_endpoints NB_PASS + Dilate endpoint masks n-times. Default: 0. + --remove_invalid Remove invalid streamlines before scoring. + +Tractometry choices: + --save_wpc_separately + If set, streamlines rejected from VC based on the config + file criteria will be saved separately from IS (and IC) + in one file *_wpc.tck per bundle. + --compute_ic If set, IS are split into NC + IC, where IC are computed as one bundle per + pair of ROI not belonging to a true connection, named + *_*_IC.tck. + --unique If set, streamlines are assigned to the first bundle they fit in and not to all. + --remove_wpc_belonging_to_another_bundle + If set, WPC actually belonging to any VB (in the + case of overlapping ROIs) will be removed + from the WPC classification. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_segment_bundles.py.help b/scripts/.hidden/scil_tractogram_segment_bundles.py.help new file mode 100644 index 000000000..567dd9376 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_segment_bundles.py.help @@ -0,0 +1,65 @@ +usage: scil_tractogram_segment_bundles.py [-h] [--out_dir OUT_DIR] + [--minimal_vote_ratio MINIMAL_VOTE_RATIO] + [--seed SEED] [--inverse] + [--reference REFERENCE] + [--processes NBR] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractograms [in_tractograms ...] + in_config_file in_directory + in_transfo + +Compute BundleSeg & supports multi-atlas & multi-parameters (RBx-like). +The model needs to be cleaned and lightweight. +Transform should come from ANTs: (using the --inverse flag) +AntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF + +If you are not sure about the transformation 'direction' you can try +scil_tractogram_segment_bundles.py (with the -v option), a warning will popup +if the provided transformation is not used correctly. + +The number of folders inside 'models_directories' will increase the number of +runs. Each folder is considered like an atlas and bundles inside will initiate +more BundleSeg executions. The more atlases you have, the more robust the +recognition will be. + +--minimal_vote_ratio is a value between 0 and 1. If you have 5 input model +directories and a minimal_vote_ratio of 0.5, you will need at least 3 votes + +Example data and usage available at: https://zenodo.org/record/7950602 + +For RAM usage, it is recommanded to use this heuristic: + (size of inputs tractogram (GB) * number of processes) < RAM (GB) +This is important because many instances of data structures are initialized +in parallel and can lead to a RAM overflow. + +Formerly: scil_recognize_multi_bundles.py + +positional arguments: + in_tractograms Input tractogram filename (.trk or .tck). + in_config_file Path of the config file (.json) + in_directory Path of parent folder of models directories. + Each folder inside will be considered as adifferent atlas. + in_transfo Path for the transformation to model space (.txt, .npy or .mat). + +options: + -h, --help show this help message and exit + --out_dir OUT_DIR Path for the output directory [voting_results]. + --minimal_vote_ratio MINIMAL_VOTE_RATIO + Streamlines will only be considered for saving if + recognized often enough [0.5]. + --seed SEED Random number generator seed 0. + --inverse Use the inverse transformation. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + --processes NBR Number of sub-processes to start. + Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +[1] St-Onge, Etienne, Kurt G. Schilling, and Francois Rheault. +"BundleSeg: A versatile,reliable and reproducible approach to white +matter bundle segmentation." International Workshop on Computational +Diffusion MRI. Cham: Springer Nature Switzerland (2023) diff --git a/scripts/.hidden/scil_tractogram_segment_bundles_for_connectivity.py.help b/scripts/.hidden/scil_tractogram_segment_bundles_for_connectivity.py.help new file mode 100644 index 000000000..a966fa827 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_segment_bundles_for_connectivity.py.help @@ -0,0 +1,105 @@ +usage: scil_tractogram_segment_bundles_for_connectivity.py [-h] [--no_pruning] + [--no_remove_loops] + [--no_remove_outliers] + [--no_remove_curv_dev] + [--min_length MIN_LENGTH] + [--max_length MAX_LENGTH] + [--outlier_threshold OUTLIER_THRESHOLD] + [--loop_max_angle LOOP_MAX_ANGLE] + [--curv_qb_distance CURV_QB_DISTANCE] + [--out_dir OUT_DIR] + [--save_raw_connections] + [--save_intermediate] + [--save_discarded] + [--out_labels_list OUT_FILE] + [--reference REFERENCE] + [--no_bbox_check] + [--processes NBR] + [-v [{DEBUG,INFO,WARNING}]] + [-f] + in_tractograms + [in_tractograms ...] + in_labels out_hdf5 + +Compute a connectivity matrix from a tractogram and a parcellation. + +Current strategy is to keep the longest streamline segment connecting 2 +regions. If the streamline crosses other gray matter regions before reaching +its final connected region, the kept connection is still the longest. This is +robust to compressed streamlines. + +The output file is a hdf5 (.h5) where the keys are 'LABEL1_LABEL2' and each +group is composed of 'data', 'offsets' and 'lengths' from the array_sequence. +The 'data' is stored in VOX/CORNER for simplicity and efficiency. See script +scil_tractogram_convert_hdf5_to_trk.py to convert to a list of .trk bundles. + +For the --outlier_threshold option the default is a recommended good trade-off +for a freesurfer parcellation. With smaller parcels (brainnetome, glasser) the +threshold should most likely be reduced. + +Good candidate connections to QC are the brainstem to precentral gyrus +connection and precentral left to precentral right connection, or equivalent +in your parcellation. + +NOTE: this script can take a while to run. Please be patient. +Example: on a tractogram with 1.8M streamlines, running on a SSD: +- 15 minutes without post-processing, only saving final bundles. +- 30 minutes with full post-processing, only saving final bundles. +- 60 minutes with full post-processing, saving all possible files. + +Formerly: scil_decompose_connectivity.py + +positional arguments: + in_tractograms Tractogram filenames. Format must be one of + trk, tck, vtk, fib, dpy. + in_labels Labels file name (nifti). Labels must have 0 as background. + out_hdf5 Output hdf5 file (.h5). + +options: + -h, --help show this help message and exit + --out_labels_list OUT_FILE + Save the labels list as text file. + Needed for scil_connectivity_compute_matrices.py and others. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of + tractograms (ignores the presence of invalid streamlines). + --processes NBR Number of sub-processes to start. + Default: [1] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Post-processing options: + --no_pruning If set, will NOT prune on length. + Length criteria in --min_length, --max_length. + --no_remove_loops If set, will NOT remove streamlines making loops. + Angle criteria based on --loop_max_angle. + --no_remove_outliers If set, will NOT remove outliers using QB. + Criteria based on --outlier_threshold. + --no_remove_curv_dev If set, will NOT remove streamlines that deviate from the mean curvature. + Threshold based on --curv_qb_distance. + +Pruning options: + --min_length MIN_LENGTH + Pruning minimal segment length. [20.0] + --max_length MAX_LENGTH + Pruning maximal segment length. [200.0] + +Outliers and loops options: + --outlier_threshold OUTLIER_THRESHOLD + Outlier removal threshold when using hierarchical QB. [0.6] + --loop_max_angle LOOP_MAX_ANGLE + Maximal winding angle over which a streamline is considered as looping. [330.0] + --curv_qb_distance CURV_QB_DISTANCE + Clustering threshold for centroids curvature filtering with QB. [10.0] + +Saving options: + --out_dir OUT_DIR Output directory for each connection as separate file (.trk). + --save_raw_connections + If set, will save all raw cut connections in a subdirectory. + --save_intermediate If set, will save the intermediate results of filtering. + --save_discarded If set, will save discarded streamlines in subdirectories. + Includes loops, outliers and qb_loops. diff --git a/scripts/.hidden/scil_tractogram_segment_one_bundle.py.help b/scripts/.hidden/scil_tractogram_segment_one_bundle.py.help new file mode 100644 index 000000000..604735985 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_segment_one_bundle.py.help @@ -0,0 +1,62 @@ +usage: scil_tractogram_segment_one_bundle.py [-h] + [--tractogram_clustering_thr TRACTOGRAM_CLUSTERING_THR] + [--model_clustering_thr MODEL_CLUSTERING_THR] + [--pruning_thr PRUNING_THR] + [--slr_threads SLR_THREADS] + [--seed SEED] [--inverse] + [--no_empty] + [--in_pickle IN_PICKLE | --out_pickle OUT_PICKLE] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram in_model in_transfo + out_tractogram + +Compute a simple Recobundles (single-atlas & single-parameters). +The model need to be cleaned and lightweight. +Transform should come from ANTs: (using the --inverse flag) +AntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF + +If you are unsure about the transformation 'direction' use the verbose +option (-v) and try with and without the --inverse flag. If you are not using +the right transformation 'direction' a warning will popup. If there is no +warning in both case it means the transformation is very close to identity and +both 'direction' will work. + +Formerly: scil_recognize_single_bundles.py + +positional arguments: + in_tractogram Input tractogram filename. + in_model Model to use for recognition. + in_transfo Path for the transformation to model space (.txt, .npy or .mat). + out_tractogram Output tractogram filename. + +options: + -h, --help show this help message and exit + --tractogram_clustering_thr TRACTOGRAM_CLUSTERING_THR + Clustering threshold used for the whole brain [8mm]. + --model_clustering_thr MODEL_CLUSTERING_THR + Clustering threshold used for the model [4mm]. + --pruning_thr PRUNING_THR + MDF threshold used for final streamlines selection [6mm]. + --slr_threads SLR_THREADS + Number of threads for SLR [1]. + --seed SEED Random number generator seed [None]. + --inverse Use the inverse transformation. + --no_empty Do not write file if there is no streamline. + --in_pickle IN_PICKLE + Input pickle clusters map file. + Will override the tractogram_clustering_thr parameter. + --out_pickle OUT_PICKLE + Output pickle clusters map file. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Garyfallidis, E., Cote, M. A., Rheault, F., ... & +Descoteaux, M. (2018). Recognition of white matter +bundles using local and global streamline-based registration and +clustering. NeuroImage, 170, 283-295. diff --git a/scripts/.hidden/scil_tractogram_shuffle.py.help b/scripts/.hidden/scil_tractogram_shuffle.py.help new file mode 100644 index 000000000..88d645f66 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_shuffle.py.help @@ -0,0 +1,22 @@ +usage: scil_tractogram_shuffle.py [-h] [--seed SEED] [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram out_tractogram + +Shuffle the ordering of streamlines. + +Formerly: scil_shuffle_streamlines.py + +positional arguments: + in_tractogram Input tractography file. + out_tractogram Output tractography file. + +options: + -h, --help show this help message and exit + --seed SEED Random number generator seed [None]. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_smooth.py.help b/scripts/.hidden/scil_tractogram_smooth.py.help new file mode 100644 index 000000000..06983fed4 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_smooth.py.help @@ -0,0 +1,51 @@ +usage: scil_tractogram_smooth.py [-h] + (--gaussian SIGMA | --spline SIGMA NB_CTRL_POINT) + [--compress [COMPRESS_TH]] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram out_tractogram + +This script will smooth the streamlines, usually to remove the 'wiggles' in +probabilistic tracking. + +Two choices of methods are available: +- Gaussian will use the surrounding coordinates for smoothing. Streamlines are +resampled to 1mm step-size and the smoothing is performed on the coordinate +array. The sigma will be indicative of the number of points surrounding the +center points to be used for blurring. +- Spline will fit a spline curve to every streamline using a sigma and the +number of control points. The sigma represents the allowed distance from the +control points. The final streamlines are obtained by evaluating the spline at +constant intervals so that it will have the same number of points as initially. + +This script enforces endpoints to remain the same. + +WARNING: +- too low of a sigma (e.g: 1) with a lot of control points (e.g: 15) +will create crazy streamlines that could end up out of the bounding box. +- data_per_point will be lost. + +Formerly: scil_smooth_streamlines.py + +positional arguments: + in_tractogram Input tractography file. + out_tractogram Output tractography file. + +options: + -h, --help show this help message and exit + --gaussian SIGMA Sigma for smoothing. Use the value of surronding + X,Y,Z points on the streamline to blur the streamlines. + A good sigma choice would be around 5. + --spline SIGMA NB_CTRL_POINT + Sigma for smoothing. Model each streamline as a spline. + A good sigma choice would be around 5 and control point around 10. + --compress [COMPRESS_TH] + If set, compress the resulting streamline. Value is the maximum + compression distance in mm.[0.1] + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_split.py.help b/scripts/.hidden/scil_tractogram_split.py.help new file mode 100644 index 000000000..3b58901c5 --- /dev/null +++ b/scripts/.hidden/scil_tractogram_split.py.help @@ -0,0 +1,48 @@ +usage: scil_tractogram_split.py [-h] [--out_dir OUT_DIR] + (--chunk_size CHUNK_SIZE | --nb_chunks NB_CHUNKS) + [--split_per_cluster | --do_not_randomize] + [--qbx_thresholds t [t ...]] [--seed SEED] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_tractogram out_prefix + +Split a tractogram into multiple files, 2 options available : +Split into X files, or split into files of Y streamlines. + +By default, streamlines to add to each chunk will be chosen randomly. +Optionally, you can split streamlines... + - sequentially (the first n/nb_chunks streamlines in the first chunk and so + on). + - randomly, but per Quickbundles clusters. + +Formerly: scil_split_tractogram.py + +positional arguments: + in_tractogram Tractogram input file name. + out_prefix Prefix for the output tractogram, index will be appended + automatically (ex, _0.trk), based on input type. + +options: + -h, --help show this help message and exit + --out_dir OUT_DIR Put all output tractogram in a specific directory. + --chunk_size CHUNK_SIZE + The maximum number of streamlines per file. + --nb_chunks NB_CHUNKS + Divide the file in equal parts. + --split_per_cluster If set, splitting will be done per cluster (computed with + Quickbundles) to ensure that at least some streamlines are + kept from each bundle in each chunk. Else, random splitting is + performed (default). + --do_not_randomize If set, splitting is done sequentially through the original + sft instead of using random indices. + --qbx_thresholds t [t ...] + If you chose option '--split_per_cluster', you may set the + QBx threshold value(s) here. Default: [40, 30, 20] + --seed SEED Use a specific random seed for the subsampling. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_viz_bingham_fit.py.help b/scripts/.hidden/scil_viz_bingham_fit.py.help new file mode 100644 index 000000000..af1f892cf --- /dev/null +++ b/scripts/.hidden/scil_viz_bingham_fit.py.help @@ -0,0 +1,38 @@ +usage: scil_viz_bingham_fit.py [-h] [--slice_index SLICE_INDEX] + [--win_dims WIDTH HEIGHT] + [--interactor {image,trackball}] + [--axis_name {sagittal,coronal,axial}] + [--silent] [--output OUTPUT] + [-v [{DEBUG,INFO,WARNING}]] [-f] + [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}] + [--color_per_lobe] + in_bingham + +Visualize 2-dimensional Bingham volume slice loaded from disk. The volume is +assumed to be saved from scil_fodf_to_bingham.py. + +Given an image of Bingham coefficients, this script displays a slice in a +given orientation. + +positional arguments: + in_bingham Input SH image file. + +options: + -h, --help show this help message and exit + --slice_index SLICE_INDEX + Index of the slice to visualize along a given axis. Defaults to middle of volume. + --win_dims WIDTH HEIGHT + The dimensions for the vtk window. [(768, 768)] + --interactor {image,trackball} + Specify interactor mode for vtk window. [trackball] + --axis_name {sagittal,coronal,axial} + Name of the axis to visualize. [axial] + --silent Disable interactive visualization. + --output OUTPUT Path to output file. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} + Name of the sphere used to reconstruct SF. [symmetric362] + --color_per_lobe Color each bingham distribution with a different color. [False] diff --git a/scripts/.hidden/scil_viz_bundle.py.help b/scripts/.hidden/scil_viz_bundle.py.help new file mode 100644 index 000000000..57d58effb --- /dev/null +++ b/scripts/.hidden/scil_viz_bundle.py.help @@ -0,0 +1,56 @@ +usage: scil_viz_bundle.py [-h] + [--random_coloring SEED | --uniform_coloring R G B | --local_coloring | --color_dict JSON | --color_from_streamlines KEY | --color_from_points KEY] + [--shape {line,tube}] [--width WIDTH] + [--subsample SUBSAMPLE] [--downsample DOWNSAMPLE] + [--background R G B] [-v [{DEBUG,INFO,WARNING}]] + in_bundles [in_bundles ...] + +Visualize bundles. + +Example usages: + +# Visualize streamlines as tubes, each bundle with a different color +>>> scil_viz_bundle.py path_to_bundles/ --shape tube --random_coloring 1337 + +# Visualize a tractogram with each streamlines drawn as lines, colored with +# their local orientation, but only load 1 in 10 streamlines +>>> scil_viz_bundle.py tractogram.trk --shape line --subsample 10 + +# Visualize CSTs as large tubes and color them from a list of colors in a file +>>> scil_viz_bundle.py path_to_bundles/CST_* --width 0.5 + --color_dict colors.json + +positional arguments: + in_bundles List of tractography files supported by nibabel. + +options: + -h, --help show this help message and exit + --shape {line,tube} Display streamlines either as lines or tubes. + [Default: tube] + --width WIDTH Width of tubes or lines representing streamlines + [Default: 0.25] + --subsample SUBSAMPLE + Only load 1 in N streamlines. + [Default: 1] + --downsample DOWNSAMPLE + Downsample streamlines to N points. + [Default: None] + --background R G B RBG values [0, 255] of the color of the background. + [Default: [0, 0, 0]] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + +Colouring options: + --random_coloring SEED + Assign a random color to bundles. + --uniform_coloring R G B + Assign a uniform color to streamlines. + --local_coloring Assign coloring to streamlines depending on their local orientations. + --color_dict JSON JSON file containing colors for each bundle. + Bundle filenames are indicated as keys and colors as values. + A 'default' key and value can be included. + --color_from_streamlines KEY + Extract a color per streamline from the data_per_streamline property of the tractogram at the specified key. + --color_from_points KEY + Extract a color per point from the data_per_point property of the tractogram at the specified key. diff --git a/scripts/.hidden/scil_viz_bundle_screenshot_mni.py.help b/scripts/.hidden/scil_viz_bundle_screenshot_mni.py.help new file mode 100644 index 000000000..2eb32fe86 --- /dev/null +++ b/scripts/.hidden/scil_viz_bundle_screenshot_mni.py.help @@ -0,0 +1,48 @@ +usage: scil_viz_bundle_screenshot_mni.py [-h] + [--target_template TARGET_TEMPLATE] + [--local_coloring | --uniform_coloring R G B | --reference_coloring COLORBAR] + [--roi ROI [ROI ...]] [--right] + [--anat_opacity ANAT_OPACITY] + [--output_suffix OUTPUT_SUFFIX] + [--out_dir OUT_DIR] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_bundle in_anat + +Register bundle to a template for screenshots using a reference. +The template can be any MNI152 (any resolution, cropped or not) +If your in_anat has a skull, select a MNI152 template with a skull and +vice-versa. + +If the bundle is already in MNI152 space, do not use --target_template. + +Axial, coronal and sagittal slices are captured. +Sagittal can be capture from the left (default) or the right. + +For the --roi argument: If 1 value is provided, the ROI will be white, +if 4 values are provided, the ROI will be colored with the RGB values +provided, if 5 values are provided, it is RGBA (values from 0-255). + +positional arguments: + in_bundle Path of the input bundle. + in_anat Path of the reference file (.nii or nii.gz). + +options: + -h, --help show this help message and exit + --target_template TARGET_TEMPLATE + Path to the target MNI152 template for registration. If in_anat has a skull, select a MNI152 template with a skull and vice-versa. + --local_coloring Color streamlines using local segments orientation. + --uniform_coloring R G B + Color streamlines with uniform coloring. + --reference_coloring COLORBAR + Color streamlines with reference coloring (0-255). + --roi ROI [ROI ...] Path to a ROI file (.nii or nii.gz). + --right Take screenshot from the right instead of the left for the sagittal plane. + --anat_opacity ANAT_OPACITY + Set the opacity for the anatomy, use 0 for complete transparency, 1 for opaque. [0.3] + --output_suffix OUTPUT_SUFFIX + Add a suffix to the output, else the axis name is used. + --out_dir OUT_DIR Put all images in a specific directory. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_viz_bundle_screenshot_mosaic.py.help b/scripts/.hidden/scil_viz_bundle_screenshot_mosaic.py.help new file mode 100644 index 000000000..97c2c78ce --- /dev/null +++ b/scripts/.hidden/scil_viz_bundle_screenshot_mosaic.py.help @@ -0,0 +1,49 @@ +usage: scil_viz_bundle_screenshot_mosaic.py [-h] [--uniform_coloring R G B] + [--random_coloring SEED] + [--zoom ZOOM] [--ttf TTF] + [--ttf_size TTF_SIZE] + [--opacity_background OPACITY_BACKGROUND] + [--resolution_of_thumbnails RESOLUTION_OF_THUMBNAILS] + [--light_screenshot] + [--no_information] + [--no_bundle_name] + [--no_streamline_number] + [--reference REFERENCE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_volume in_bundles + [in_bundles ...] out_image + +Visualize bundles from a list. The script will output a mosaic (image) with +screenshots, 6 views per bundle in the list. + +positional arguments: + in_volume Volume used as background (e.g. T1, FA, b0). + in_bundles List of tractography files supported by nibabel or binary mask files. + out_image Name of the output image mosaic (e.g. mosaic.jpg, mosaic.png). + +options: + -h, --help show this help message and exit + --uniform_coloring R G B + Assign an uniform color to streamlines (or ROIs). + --random_coloring SEED + Assign a random color to streamlines (or ROIs). + --zoom ZOOM Rendering zoom. A value greater than 1 is a zoom-in, + a value less than 1 is a zoom-out [1.0]. + --ttf TTF Path of the true type font to use for legends. + --ttf_size TTF_SIZE Font size (int) to use for the legends [35]. + --opacity_background OPACITY_BACKGROUND + Opacity of background image, between 0 and 1.0 [0.4]. + --resolution_of_thumbnails RESOLUTION_OF_THUMBNAILS + Resolution of thumbnails used in mosaic [300]. + --light_screenshot Keep only 3 views instead of 6 [False]. + --no_information Don't display axis and bundle information [False]. + --no_bundle_name Don't display bundle name [False]. + --no_streamline_number + Don't display bundle streamlines number [False]. + --reference REFERENCE + Reference anatomy for tck/vtk/fib/dpy file + support (.nii or .nii.gz). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_viz_connectivity.py.help b/scripts/.hidden/scil_viz_connectivity.py.help new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/.hidden/scil_viz_dti_screenshot.py.help b/scripts/.hidden/scil_viz_dti_screenshot.py.help new file mode 100644 index 000000000..c5f045dc1 --- /dev/null +++ b/scripts/.hidden/scil_viz_dti_screenshot.py.help @@ -0,0 +1,30 @@ +usage: scil_viz_dti_screenshot.py [-h] [--shells SHELLS [SHELLS ...]] + [--out_suffix OUT_SUFFIX] + [--out_dir OUT_DIR] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_dwi in_bval in_bvec in_template + +Register DWI to a template for screenshots. +The templates are on http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009 + +For quick quality control, the MNI template can be downsampled to 2mm iso. +Axial, coronal and sagittal slices are captured. + +positional arguments: + in_dwi Path of the input diffusion volume. + in_bval Path of the bval file, in FSL format. + in_bvec Path of the bvec file, in FSL format. + in_template Path to the target MNI152 template for + registration, use the one provided online. + +options: + -h, --help show this help message and exit + --shells SHELLS [SHELLS ...] + Shells to use for DTI fit (usually below 1200), b0 must be listed. + --out_suffix OUT_SUFFIX + Add a suffix to the output, else the axis name is used. + --out_dir OUT_DIR Put all images in a specific directory. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_viz_fodf.py.help b/scripts/.hidden/scil_viz_fodf.py.help new file mode 100644 index 000000000..d79004688 --- /dev/null +++ b/scripts/.hidden/scil_viz_fodf.py.help @@ -0,0 +1,119 @@ +usage: scil_viz_fodf.py [-h] [--slice_index SLICE_INDEX] + [--win_dims WIDTH HEIGHT] + [--interactor {image,trackball}] + [--axis_name {sagittal,coronal,axial}] [--silent] + [--in_transparency_mask IN_TRANSPARENCY_MASK] + [--output OUTPUT] [-f] + [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] + [--sphere {repulsion200,repulsion100,symmetric724,symmetric642,symmetric362,repulsion724}] + [--sph_subdivide SPH_SUBDIVIDE] [--mask MASK] + [--colormap COLORMAP | --color_rgb COLOR_RGB COLOR_RGB COLOR_RGB] + [--scale SCALE] [--radial_scale_off] [--norm_off] + [-v [{DEBUG,INFO,WARNING}]] [--background BACKGROUND] + [--bg_range MIN MAX] [--bg_opacity BG_OPACITY] + [--bg_offset BG_OFFSET] + [--bg_interpolation {nearest,linear}] + [--bg_color BG_COLOR BG_COLOR BG_COLOR] + [--peaks PEAKS] + [--peaks_color PEAKS_COLOR PEAKS_COLOR PEAKS_COLOR] + [--peaks_width PEAKS_WIDTH] + [--peaks_values PEAKS_VALUES | --peaks_length PEAKS_LENGTH] + [--variance VARIANCE] [--variance_k VARIANCE_K] + [--var_color VAR_COLOR VAR_COLOR VAR_COLOR] + in_fodf + +Visualize 2-dimensional fODF slice loaded from disk. + +Given an image of SH coefficients, this script displays a slice in a +given orientation. The user can also add a background on top of which the +fODF are to be displayed. Using a full SH basis, the script can be used to +visualize asymmetric fODF. The user can supply a peaks image to visualize +peaks on top of fODF. + +If a transparency_mask is given (e.g. a brain mask), all values outside the +mask non-zero values are set to full transparency in the saved scene. + +!!! CAUTION !!! The script is memory intensive about (9kB of allocated RAM per +voxel, or 9GB for a 1M voxel volume) with a sphere interpolated to 362 points. + +positional arguments: + in_fodf Input SH image file. + +options: + -h, --help show this help message and exit + --slice_index SLICE_INDEX + Index of the slice to visualize along a given axis. Defaults to middle of volume. + --win_dims WIDTH HEIGHT + The dimensions for the vtk window. [(768, 768)] + --interactor {image,trackball} + Specify interactor mode for vtk window. [trackball] + --axis_name {sagittal,coronal,axial} + Name of the axis to visualize. [axial] + --silent Disable interactive visualization. + --in_transparency_mask IN_TRANSPARENCY_MASK + Input mask image file. + --output OUTPUT Path to output file. + -f Force overwriting of the output files. + --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} + Spherical harmonics basis used for the SH coefficients. + Must be either descoteaux07', 'tournier07', + 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: + 'descoteaux07' : SH basis from the Descoteaux et al. + MRM 2007 paper + 'tournier07' : SH basis from the new Tournier et al. + NeuroImage 2019 paper, as in MRtrix 3. + 'descoteaux07_legacy': SH basis from the legacy Dipy implementation + of the Descoteaux et al. MRM 2007 paper + 'tournier07_legacy' : SH basis from the legacy Tournier et al. + NeuroImage 2007 paper. + --sphere {repulsion200,repulsion100,symmetric724,symmetric642,symmetric362,repulsion724} + Name of the sphere used to reconstruct SF. [symmetric362] + --sph_subdivide SPH_SUBDIVIDE + Number of subdivisions for given sphere. If not supplied, use the given sphere as is. + --mask MASK Optional mask file. Only fODF inside the mask are displayed. + --colormap COLORMAP Colormap for the ODF slicer. If None, then a RGB colormap will be used. [None] + --color_rgb COLOR_RGB COLOR_RGB COLOR_RGB + Uniform color for the ODF slicer given as RGB, scaled between 0 and 1. [None] + --scale SCALE Scaling factor for FODF. [0.5] + --radial_scale_off Disable radial scale for ODF slicer. + --norm_off Disable normalization of ODF slicer. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + +Background arguments: + --background BACKGROUND + Background image file. If RGB, values must be between 0 and 255. + --bg_range MIN MAX The range of values mapped to range [0, 1] for background image. [(bg.min(), bg.max())] + --bg_opacity BG_OPACITY + The opacity of the background image. Opacity of 0.0 means transparent and 1.0 is completely visible. [1.0] + --bg_offset BG_OFFSET + The offset of the background image. [0.5] + --bg_interpolation {nearest,linear} + Interpolation mode for the background image. [nearest] + --bg_color BG_COLOR BG_COLOR BG_COLOR + The color of the overall background, behind everything. Must be RGB values scaled between 0 and 1. [(0, 0, 0)] + +Peaks arguments: + --peaks PEAKS Peaks image file. + --peaks_color PEAKS_COLOR PEAKS_COLOR PEAKS_COLOR + Color used for peaks, as RGB values scaled between 0 and 1. If None, then a RGB colormap is used. [None] + --peaks_width PEAKS_WIDTH + Width of peaks segments. [1.0] + +Peaks scaling arguments: + Choose between peaks values and arbitrary length. + + --peaks_values PEAKS_VALUES + Peaks values file. + --peaks_length PEAKS_LENGTH + Length of the peaks segments. [0.65] + +Variance arguments: + For the visualization of fodf uncertainty, the variance is used as follow: mean + k * sqrt(variance), where mean is the input fodf (in_fodf) and k is the scaling factor (variance_k). + + --variance VARIANCE FODF variance file. + --variance_k VARIANCE_K + Scaling factor (k) for the computation of the fodf uncertainty. [1] + --var_color VAR_COLOR VAR_COLOR VAR_COLOR + Color of variance outline. Must be RGB values scaled between 0 and 1. [(1, 1, 1)] diff --git a/scripts/.hidden/scil_viz_gradients_screenshot.py.help b/scripts/.hidden/scil_viz_gradients_screenshot.py.help new file mode 100644 index 000000000..074c16285 --- /dev/null +++ b/scripts/.hidden/scil_viz_gradients_screenshot.py.help @@ -0,0 +1,38 @@ +usage: scil_viz_gradients_screenshot.py [-h] + (--in_gradient_scheme IN_GRADIENT_SCHEME [IN_GRADIENT_SCHEME ...] | --dipy_sphere {symmetric362,symmetric642,symmetric724,repulsion724,repulsion100,repulsion200}) + [--dis-sym] + [--out_basename OUT_BASENAME] + [--res RES] [--dis-sphere] + [--dis-proj] [--plot_shells] + [--same-color] [--opacity OPACITY] + [-v [{DEBUG,INFO,WARNING}]] [-f] + +Vizualisation for directions on a sphere, either from a gradient sampling (i.e. +a list of b-vectors) or from a Dipy sphere. + +options: + -h, --help show this help message and exit + --in_gradient_scheme IN_GRADIENT_SCHEME [IN_GRADIENT_SCHEME ...] + Gradient sampling filename. (only accepts .bvec and + .bval together or only .b). + --dipy_sphere {symmetric362,symmetric642,symmetric724,repulsion724,repulsion100,repulsion200} + Dipy sphere choice. + --dis-sym Disable antipodal symmetry. + --out_basename OUT_BASENAME + Output file name picture without extension (will be + png file(s)). + --res RES Resolution of the output picture(s). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided + level. Default level is warning, default when using -v + is info. + -f Force overwriting of the output files. + +Enable/Disable renderings.: + --dis-sphere Disable the rendering of the sphere. + --dis-proj Disable rendering of the projection supershell. + --plot_shells Enable rendering each shell individually. + +Rendering options.: + --same-color Use same color for all shell. + --opacity OPACITY Opacity for the shells. diff --git a/scripts/.hidden/scil_viz_tractogram_seeds.py.help b/scripts/.hidden/scil_viz_tractogram_seeds.py.help new file mode 100644 index 000000000..7b8e9c702 --- /dev/null +++ b/scripts/.hidden/scil_viz_tractogram_seeds.py.help @@ -0,0 +1,21 @@ +usage: scil_viz_tractogram_seeds.py [-h] [--save SAVE] + [-v [{DEBUG,INFO,WARNING}]] [-f] + tractogram + +Visualize seeds used to generate the tractogram or bundle. +When tractography was run, each streamline produced by the tracking algorithm +saved its seeding point (its origin). + +The tractogram must have been generated from scil_tracking_local.py or +scil_tracking_pft.py with the --save_seeds option. + +positional arguments: + tractogram Tractogram file (must be trk) + +options: + -h, --help show this help message and exit + --save SAVE If set, save a screenshot of the result in the specified filename + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_viz_tractogram_seeds_3d.py.help b/scripts/.hidden/scil_viz_tractogram_seeds_3d.py.help new file mode 100644 index 000000000..352dbfcb7 --- /dev/null +++ b/scripts/.hidden/scil_viz_tractogram_seeds_3d.py.help @@ -0,0 +1,46 @@ +usage: scil_viz_tractogram_seeds_3d.py [-h] [--tractogram TRACTOGRAM] + [--colormap COLORMAP] + [--seed_opacity SEED_OPACITY] + [--tractogram_shape {line,tube}] + [--tractogram_opacity TRACTOGRAM_OPACITY] + [--tractogram_width TRACTOGRAM_WIDTH] + [--tractogram_color R G B [R G B ...]] + [--background R G B [R G B ...]] + [-v [{DEBUG,INFO,WARNING}]] + in_seed_map + +Visualize seeds as 3D points, with heatmaps corresponding to seed density + +Example usages: + +>>> scil_viz_tractogram_seeds_3d.py seeds.nii.gz --tractogram tractogram.trk + +positional arguments: + in_seed_map Seed density map. + +options: + -h, --help show this help message and exit + --tractogram TRACTOGRAM + Tractogram coresponding to the seeds. + --colormap COLORMAP Name of the map for the density coloring. Can be any colormap that matplotlib offers. + [Default: bone] + --seed_opacity SEED_OPACITY + Opacity of the contour generated. + [Default: 0.5] + --tractogram_shape {line,tube} + Display streamlines either as lines or tubes. + [Default: tube] + --tractogram_opacity TRACTOGRAM_OPACITY + Opacity of the streamlines. + [Default: 0.5] + --tractogram_width TRACTOGRAM_WIDTH + Width of tubes or lines representing streamlines. + [Default: 0.05] + --tractogram_color R G B [R G B ...] + Color for the tractogram. + --background R G B [R G B ...] + RBG values [0, 255] of the color of the background. + [Default: [0, 0, 0]] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_viz_volume_histogram.py.help b/scripts/.hidden/scil_viz_volume_histogram.py.help new file mode 100644 index 000000000..7ca783940 --- /dev/null +++ b/scripts/.hidden/scil_viz_volume_histogram.py.help @@ -0,0 +1,30 @@ +usage: scil_viz_volume_histogram.py [-h] [--title TITLE] [--x_label X_LABEL] + [--colors COLORS] [--show_only] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_metric in_mask n_bins out_png + +Script to display a histogram of a metric (FA, MD, etc.) from a binary mask +(wm mask, vascular mask, ect.). +These two images must be coregister with each other. + +>>> scil_viz_volume_histogram.py metric.nii.gz mask_bin.nii.gz 8 + out_filename_image.png + +positional arguments: + in_metric Metric map ex : FA, MD,... . + in_mask Binary mask data to extract value. + n_bins Number of bins to use for the histogram. + out_png Output filename for the figure. + +options: + -h, --help show this help message and exit + --show_only Do not save the figure, only display. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Histogram options: + --title TITLE Use the provided info for the histogram title. [Histogram] + --x_label X_LABEL Use the provided info for the x axis name. + --colors COLORS Use the provided info for the bars color. [#0504aa] diff --git a/scripts/.hidden/scil_viz_volume_scatterplot.py.help b/scripts/.hidden/scil_viz_volume_scatterplot.py.help new file mode 100644 index 000000000..0cd1eb402 --- /dev/null +++ b/scripts/.hidden/scil_viz_volume_scatterplot.py.help @@ -0,0 +1,94 @@ +usage: scil_viz_volume_scatterplot.py [-h] [--out_dir OUT_DIR] [--thr THR] + [--not_exclude_zero] + [--in_bin_mask IN_BIN_MASK | --in_prob_maps IN_PROB_MAPS IN_PROB_MAPS | --in_atlas IN_ATLAS] + [--atlas_lut ATLAS_LUT] + [--specific_label SPECIFIC_LABEL [SPECIFIC_LABEL ...]] + [--in_folder] [--title TITLE] + [--x_label X_LABEL] [--y_label Y_LABEL] + [--label LABEL] + [--label_prob LABEL_PROB] + [--marker MARKER] + [--marker_size MARKER_SIZE] + [--transparency TRANSPARENCY] + [--dpi DPI] [--colors color1 color2] + [--show_only] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_x_map in_y_map out_name + +Script to display scatter plot between two maps (ex. FA and MD, ihMT and MT). +By default, no mask is applied to the data. +Different options are available to mask or threshold data: + - a binary mask + - two probability maps, which can be used to threshold maps with + --in_prob_maps. A same threshold is applied on these two maps (--thr). + - parcellation, which can be used to plot values for each region of + an atlas (--in_atlas) or a subset of regions (--specific_label). + Atlas option required a json file (--atlas_lut) with indices and + names of each label corresponding to the atlas as following: + "1": "lh_A8m", + "2": "rh_A8m", + The numbers must be corresponding to the label indices in the json file. + +Be careful, you can not use all of them at the same time. + +For general scatter plot without mask: +>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png + +For scatter plot with mask: +>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png + --in_bin_mask mask_wm.nii.gz + +For tissue probability scatter plot: +>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png + --prob_maps wm_map.nii.gz gm_map.nii.gz + +For scatter plot using atlas: +>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png + --in_atlas atlas.nii.gz --atlas_lut atlas.json + +>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png + --in_atlas atlas.nii.gz --atlas_lut atlas.json + --specific_label 34 67 87 + +positional arguments: + in_x_map Map in x axis, FA for example. + in_y_map Map in y axis, MD for example. + out_name Output filename for the figure without extension. + +options: + -h, --help show this help message and exit + --out_dir OUT_DIR Output directory to save scatter plot. + --thr THR Use to apply threshold only on probability maps (same for both map) with --in_prob_maps option. [0.9] + --not_exclude_zero Keep zero value in data. + --in_bin_mask IN_BIN_MASK + Binary mask. Use this option to extract x and y maps value from specific mask or region: wm_mask or roi_mask for example. + --in_prob_maps IN_PROB_MAPS IN_PROB_MAPS + Probability maps, WM and GW for example. + --in_atlas IN_ATLAS Path to the input atlas image. + --show_only Do not save the figure, only display. Not avalaible with --in_atlas option. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Atlas options: + --atlas_lut ATLAS_LUT + Path of the LUT file corresponding to atlas used to name the regions of interest. + --specific_label SPECIFIC_LABEL [SPECIFIC_LABEL ...] + Label list to use to do scatter plot. Label must corresponding to atlas LUT file. [None] + --in_folder Save label plots in subfolder "Label_plots". + +Scatter plot options: + --title TITLE Use the provided info for the title name. [Scatter Plot] + --x_label X_LABEL Use the provided info for the x axis name. [x] + --y_label Y_LABEL Use the provided info for the y axis name. [y] + --label LABEL Use the provided info for the legend box corresponding to mask or first probability map. [None] + --label_prob LABEL_PROB + Use the provided info for the legend box corresponding to the second probability map. [Threshold prob_map 2] + --marker MARKER Use the provided info for the marker shape. [.] + --marker_size MARKER_SIZE + Use the provided info for the marker size. [15] + --transparency TRANSPARENCY + Use the provided info for the point transparency. [0.4] + --dpi DPI Use the provided info for the dpi resolution. [300] + --colors color1 color2 diff --git a/scripts/.hidden/scil_viz_volume_screenshot.py.help b/scripts/.hidden/scil_viz_volume_screenshot.py.help new file mode 100644 index 000000000..e59e04640 --- /dev/null +++ b/scripts/.hidden/scil_viz_volume_screenshot.py.help @@ -0,0 +1,118 @@ +usage: scil_viz_volume_screenshot.py [-h] + [--volume_cmap_name VOLUME_CMAP_NAME] + [--volume_opacity VOLUME_OPACITY] + [--transparency TRANSPARENCY] + [--slices SID [SID ...]] + [--axis {sagittal,coronal,axial}] + [--size WIDTH HEIGHT] + [--display_slice_number] [--display_lr] + [--labelmap LABELMAP] + [--labelmap_cmap_name LABELMAP_CMAP_NAME] + [--labelmap_opacity LABELMAP_OPACITY] + [--overlays OVERLAYS [OVERLAYS ...]] + [--overlays_as_contours] + [--overlays_colors R G B [R G B ...]] + [--overlays_opacity OVERLAYS_OPACITY] + [--peaks PEAKS [PEAKS ...]] + [--peaks_width PEAKS_WIDTH] + [--peaks_opacity PEAKS_OPACITY] + [-v [{DEBUG,INFO,WARNING}]] + volume out_fname + +Take screenshot(s) of one or more slices in a given image volume along the +requested axis. If slice indices are not provided, all slices in the volume +are used. The name of the output images are suffixed with _slice_{id}, with +id being the slice number in the volume. If a labelmap image is provided (e.g. +a tissue segmentation map), it is overlaid on the volume slices. Same goes if +a mask is provided, with the difference that it can be rendered as a +transparency overlay as well as a contour. + +A labelmap image can be provided as the image volume, without requiring it as +the optional argument if only the former needs to be plot. + +Example: +>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_annotated.png + --display_slice_number --display_lr + +>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_masked.png + --transparency brainmask.nii.gz + +>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial.png + --slices 30 40 50 60 70 80 90 100 + +>>> scil_viz_volume_screenshot.py t1.nii.gz t1_sagittal.png --axis sagittal + +>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_plasma_cmap.png + --slices 30 40 50 60 70 80 90 100 --volume_cmap_name plasma + +>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_overlay.png + --slices 30 40 50 60 70 80 90 100 --overlays brain_mask.nii.gz + +>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_contour.png + --slices 30 40 50 60 70 80 90 100 + --overlays brain_mask.nii.gz --overlays_as_contours + +>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_map.png + --slices 30 40 50 60 70 80 90 100 --labelmap tissue_map.nii.gz + +>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_viridis_cmap.png + --slices 30 40 50 60 70 80 90 100 + --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis + +>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_peaks.png + --slices 30 40 50 60 70 80 90 100 --peaks peaks.nii.gz --volume_opacity 0.5 + +positional arguments: + volume Input 3D Nifti file (.nii/.nii.gz). + out_fname Name of the output image(s). If multiple slices are provided (or none), their index will be append to the name (e.g. volume.jpg, volume.png becomes volume_slice_0.jpg, volume_slice_0.png). + +options: + -h, --help show this help message and exit + --transparency TRANSPARENCY + Transparency Nifti image (.nii/.nii.gz). Can either be a binary mask or a scalar image in the range [0, 1]. + --size WIDTH HEIGHT Size of the output image. [(768, 768)] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + +Slicing: + --slices SID [SID ...] + Slice indices to screenshot. If None are supplied, all slices inside the transparency mask are selected. + --axis {sagittal,coronal,axial} + Name of the axis to visualize. [axial] + +Input overlays: + --labelmap LABELMAP Input labelmap file (.nii/.nii.gz). + --overlays OVERLAYS [OVERLAYS ...] + 3D Nifti image(s) to overlay (.nii/.nii.gz). + --peaks PEAKS [PEAKS ...] + Peaks Nifti image (.nii/.nii.gz). + +Volume rendering: + --volume_cmap_name VOLUME_CMAP_NAME + Colormap name for the 3D Nifti image data. [None] + --volume_opacity VOLUME_OPACITY + Opacity value for the 3D Nifti image data. [1.0] + --labelmap_cmap_name LABELMAP_CMAP_NAME + Colormap name for the labelmap image data. [viridis] + --labelmap_opacity LABELMAP_OPACITY + Opacity value for the labelmap image data. [0.5] + +Peaks rendering: + --peaks_width PEAKS_WIDTH + Width of the peaks lines. [3.0] + --peaks_opacity PEAKS_OPACITY + Opacity value for the peaks overlay. [1.0] + +Overlay rendering: + --overlays_as_contours + Display overlays contours and reduce the opacity of their inner region (see the `--overlays_opacity` argument). + --overlays_colors R G B [R G B ...] + Colors for the overlays or contours. You may provide a single color, for all overlays/contours, or one color for each. Each color is given as three values: R G B + --overlays_opacity OVERLAYS_OPACITY + Opacity value for the masks overlays. When combined with `--overlays_as_contours`, this will be the opacity of the region inside the computed contours. [0.5] + +Annotations: + --display_slice_number + If true, displays the slice number in the upper left corner. + --display_lr If true, add left and right annotations to the images. diff --git a/scripts/.hidden/scil_viz_volume_screenshot_mosaic.py.help b/scripts/.hidden/scil_viz_volume_screenshot_mosaic.py.help new file mode 100644 index 000000000..a780553e0 --- /dev/null +++ b/scripts/.hidden/scil_viz_volume_screenshot_mosaic.py.help @@ -0,0 +1,96 @@ +usage: scil_viz_volume_screenshot_mosaic.py [-h] + [--volume_cmap_name VOLUME_CMAP_NAME] + [--volume_opacity VOLUME_OPACITY] + [--axis {sagittal,coronal,axial}] + [--size WIDTH HEIGHT] + [--labelmap LABELMAP] + [--labelmap_cmap_name LABELMAP_CMAP_NAME] + [--labelmap_opacity LABELMAP_OPACITY] + [--overlays OVERLAYS [OVERLAYS ...]] + [--overlays_as_contours] + [--overlays_colors R G B [R G B ...]] + [--overlays_opacity OVERLAYS_OPACITY] + [--overlap rWIDTH rHEIGHT] + [-v [{DEBUG,INFO,WARNING}]] [-f] + rows cols volume transparency + out_fname SID [SID ...] + +Compose a mosaic of screenshots of the given image volume slices along the +requested axis. The provided transparency mask (e.g. a brain mask volume) is +used to set the screenshot values outside the mask non-zero values to full +transparency. Additionally, if a labelmap image is provided (e.g. a tissue +segmentation map), it is overlaid on the volume slices. Also, a series of +masks can be provided and will be used to generate contours overlaid on each +volume slice. + +A labelmap image can be provided as the image volume, without requiring it as +the optional argument if only the former needs to be plot. + +The screenshots are overlapped according to the given factors. + +The mosaic supports either horizontal, vertical or matrix arrangements. + +Example: +>>> scil_viz_volume_screenshot_mosaic.py 1 8 t1.nii.gz brain_mask.nii.gz + mosaic_overlap_t1_axial.png 30 40 50 60 70 80 90 100 + +>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz + mosaic_overlap_t1_axial_plasma_cmap.png 30 40 50 60 70 80 90 100 + --overlap_factor 0.6 0.5 --volume_cmap_name plasma + +>>> scil_viz_volume_screenshot_mosaic.py 2 4 tissues.nii.gz brain_mask.nii.gz + mosaic_overlap_tissues_axial_plasma_cmap.png 30 40 50 60 70 80 90 100 + --volume_cmap_name plasma + +>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz + mosaic_overlap_t1_sagittal_tissue_viridis_cmap.png + 30 40 50 60 70 80 90 100 --axis sagittal + --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis + +>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz + mosaic_overlap_t1_sagittal_tissue_contours.png + 30 40 50 60 70 80 90 100 --axis sagittal + --overlays wm_mask.nii.gz gm_mask.nii.gz csf_mask.nii.gz + +positional arguments: + rows The mosaic row count. + cols The mosaic column count. + volume Input 3D Nifti file (.nii/.nii.gz). + transparency Transparency Nifti image (.nii/.nii.gz). Can either be a binary mask or a scalar image in the range [0, 1]. + out_fname Name of the output image (e.g. img.jpg, img.png). + SID Slice indices to screenshot. + +options: + -h, --help show this help message and exit + --axis {sagittal,coronal,axial} + Name of the axis to visualize. [axial] + --size WIDTH HEIGHT Size of the output image. [(768, 768)] + --overlap rWIDTH rHEIGHT + The overlap factor as a ratio of each image dimension. [(0.6, 0.0)] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Input overlays: + --labelmap LABELMAP Input labelmap file (.nii/.nii.gz). + --overlays OVERLAYS [OVERLAYS ...] + 3D Nifti image(s) to overlay (.nii/.nii.gz). + +Volume rendering: + --volume_cmap_name VOLUME_CMAP_NAME + Colormap name for the 3D Nifti image data. [None] + --volume_opacity VOLUME_OPACITY + Opacity value for the 3D Nifti image data. [1.0] + --labelmap_cmap_name LABELMAP_CMAP_NAME + Colormap name for the labelmap image data. [viridis] + --labelmap_opacity LABELMAP_OPACITY + Opacity value for the labelmap image data. [0.5] + +Overlay rendering: + --overlays_as_contours + Display overlays contours and reduce the opacity of their inner region (see the `--overlays_opacity` argument). + --overlays_colors R G B [R G B ...] + Colors for the overlays or contours. You may provide a single color, for all overlays/contours, or one color for each. Each color is given as three values: R G B + --overlays_opacity OVERLAYS_OPACITY + Opacity value for the masks overlays. When combined with `--overlays_as_contours`, this will be the opacity of the region inside the computed contours. [0.5] diff --git a/scripts/.hidden/scil_volume_apply_transform.py.help b/scripts/.hidden/scil_volume_apply_transform.py.help new file mode 100644 index 000000000..753ce1883 --- /dev/null +++ b/scripts/.hidden/scil_volume_apply_transform.py.help @@ -0,0 +1,27 @@ +usage: scil_volume_apply_transform.py [-h] [--inverse] [--keep_dtype] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_file in_target_file in_transfo + out_name + +Transform Nifti (.nii.gz) using an affine/rigid transformation. + +For more information on how to use the registration script, follow this link: +https://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html + +Formerly: scil_apply_transform_to_image.py. + +positional arguments: + in_file Path of the file to be transformed (nii or nii.gz) + in_target_file Path of the reference target file (.nii.gz). + in_transfo Path of the file containing the 4x4 + transformation, matrix (.txt, .npy or .mat). + out_name Output filename of the transformed data. + +options: + -h, --help show this help message and exit + --inverse Apply the inverse transformation. + --keep_dtype If True, keeps the data_type of the input image (in_file) when saving the output image (out_name). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_volume_b0_synthesis.py.help b/scripts/.hidden/scil_volume_b0_synthesis.py.help new file mode 100644 index 000000000..0510f7432 --- /dev/null +++ b/scripts/.hidden/scil_volume_b0_synthesis.py.help @@ -0,0 +1,34 @@ +usage: scil_volume_b0_synthesis.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] + in_b0 in_b0_mask in_t1 in_t1_mask out_b0 + +Wrapper for SyNb0 available in Dipy, to run it on a single subject. +Requires Skull-Strip b0 and t1w images as input, the script will normalize the +t1w's WM to 110, co-register both images, then register it to the appropriate +template, run SyNb0 and then transform the result back to the original space. + +SyNb0 is a deep learning model that predicts a synthetic a distortion-free +b0 image from a distorted b0 and T1w. + +This script must be used carefully, as it is meant to be used in an +environment with the following dependencies already installed (not installed by +default in Scilpy): +- tensorflow-addons +- tensorrt +- tensorflow + +positional arguments: + in_b0 Input b0 image. + in_b0_mask Input b0 mask. + in_t1 Input t1w image. + in_t1_mask Input t1w mask. + out_b0 Output b0 image without distortion. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +[1] Schilling, Kurt G., et al. "Synthesized b0 for diffusion distortion + correction (Synb0-DisCo)." Magnetic resonance imaging 64 (2019): 62-70. diff --git a/scripts/.hidden/scil_volume_count_non_zero_voxels.py.help b/scripts/.hidden/scil_volume_count_non_zero_voxels.py.help new file mode 100644 index 000000000..df2606d66 --- /dev/null +++ b/scripts/.hidden/scil_volume_count_non_zero_voxels.py.help @@ -0,0 +1,31 @@ +usage: scil_volume_count_non_zero_voxels.py [-h] [--out OUT_FILE] [--stats] + [--id VALUE_ID] + [-v [{DEBUG,INFO,WARNING}]] [-f] + IN_FILE + +Count the number of non-zero voxels in an image file. + +If you give it an image with more than 3 dimensions, it will summarize the 4th +(or more) dimension to one voxel, and then find non-zero voxels over this. +This means that if there is at least one non-zero voxel in the 4th dimension, +this voxel of the 3D volume will be considered as non-zero. + +Formerly: scil_count_non_zero_voxels.py + +positional arguments: + IN_FILE Input file name, in nifti format. + +options: + -h, --help show this help message and exit + --out OUT_FILE Name of the output file, which will be saved as a text file. + --stats If set, output the value using a stats format. Using this synthax will append + a line to the output file, instead of creating a file with only one line. + This is useful to create a file to be used as the source of data for a graph. + Can be combined with --id + --id VALUE_ID Id of the current count. If used, the value of this argument will be + output (followed by a ":") before the count value. + Mostly useful with --stats. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_volume_crop.py.help b/scripts/.hidden/scil_volume_crop.py.help new file mode 100644 index 000000000..71c5789c5 --- /dev/null +++ b/scripts/.hidden/scil_volume_crop.py.help @@ -0,0 +1,30 @@ +usage: scil_volume_crop.py [-h] [--ignore_voxel_size] + [-v [{DEBUG,INFO,WARNING}]] [-f] + [--input_bbox INPUT_BBOX | --output_bbox OUTPUT_BBOX] + in_image out_image + +Crop a volume using a given or an automatically computed bounding box. If a +previously computed bounding box file is given, the cropping will be applied +and the affine fixed accordingly. + +Warning: This works well on masked images (like with FSL-Bet) volumes since +it's looking for non-zero data. Therefore, you should validate the results on +other types of images that haven't been masked. + +Formerly: scil_crop_volume.py + +positional arguments: + in_image Path of the nifti file to crop. + out_image Path of the cropped nifti file to write. + +options: + -h, --help show this help message and exit + --ignore_voxel_size Ignore voxel size compatibility test between input bounding box and data. Warning, use only if you know what you are doing. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + --input_bbox INPUT_BBOX + Path of the pickle file from which to take the bounding box to crop input file. + --output_bbox OUTPUT_BBOX + Path of the pickle file where to write the computed bounding box. (.pickle extension) diff --git a/scripts/.hidden/scil_volume_flip.py.help b/scripts/.hidden/scil_volume_flip.py.help new file mode 100644 index 000000000..54134b2bd --- /dev/null +++ b/scripts/.hidden/scil_volume_flip.py.help @@ -0,0 +1,18 @@ +usage: scil_volume_flip.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] + in_image out_image dimension [dimension ...] + +Flip the volume according to the specified axis. + +Formerly: scil_flip_volume.py + +positional arguments: + in_image Path of the input volume (nifti). + out_image Path of the output volume (nifti). + dimension The axes you want to flip. eg: to flip the x and y axes use: x y. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_volume_math.py.help b/scripts/.hidden/scil_volume_math.py.help new file mode 100644 index 000000000..1d8aa074e --- /dev/null +++ b/scripts/.hidden/scil_volume_math.py.help @@ -0,0 +1,176 @@ +usage: scil_volume_math.py [-h] [--data_type DATA_TYPE] [--exclude_background] + [-v [{DEBUG,INFO,WARNING}]] [-f] + {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference,concatenate,dilation,erosion,closing,opening,blur} + in_args [in_args ...] out_image + +Performs an operation on a list of images. The supported operations are +listed below. + +This script is loading all images in memory, will often crash after a few +hundred images. + +Some operations such as multiplication or addition accept float value as +parameters instead of images. +> scil_volume_math.py multiplication img.nii.gz 10 mult_10.nii.gz + +Formerly: scil_image_math.py + + lower_threshold: IMG THRESHOLD + All values below the threshold will be set to zero. + All values above the threshold will be set to one. + + upper_threshold: IMG THRESHOLD + All values below the threshold will be set to one. + All values above the threshold will be set to zero. + Equivalent to lower_threshold followed by an inversion. + + lower_threshold_eq: IMG THRESHOLD + All values below the threshold will be set to zero. + All values above or equal the threshold will be set to one. + + upper_threshold_eq: IMG THRESHOLD + All values below or equal the threshold will be set to one. + All values above the threshold will be set to zero. + Equivalent to lower_threshold followed by an inversion. + + lower_threshold_otsu: IMG + All values below or equal to the Otsu threshold will be set to zero. + All values above the Otsu threshold will be set to one. + (Otsu's method is an algorithm to perform automatic image thresholding + of the background.) + + upper_threshold_otsu: IMG + All values below the Otsu threshold will be set to one. + All values above or equal to the Otsu threshold will be set to zero. + Equivalent to lower_threshold_otsu followed by an inversion. + + lower_clip: IMG THRESHOLD + All values below the threshold will be set to threshold. + + upper_clip: IMG THRESHOLD + All values above the threshold will be set to threshold. + + absolute_value: IMG + All negative values will become positive. + + round: IMG + Round all decimal values to the closest integer. + + ceil: IMG + Ceil all decimal values to the next integer. + + floor: IMG + Floor all decimal values to the previous integer. + + normalize_sum: IMG + Normalize the image so the sum of all values is one. + + normalize_max: IMG + Normalize the image so the maximum value is one. + + log_10: IMG + Apply a log (base 10) to all non zeros values of an image. + + log_e: IMG + Apply a natural log to all non zeros values of an image. + + convert: IMG + Perform no operation, but simply change the data type. + + invert: IMG + Operation on binary image to interchange 0s and 1s in a binary mask. + + addition: IMGs + Add multiple images together. + + subtraction: IMG_1 IMG_2 + Subtract first image by the second (IMG_1 - IMG_2). + + multiplication: IMGs + Multiply multiple images together (danger of underflow and overflow) + + division: IMG_1 IMG_2 + Divide first image by the second (danger of underflow and overflow) + Ignore zeros values, excluded from the operation. + + mean: IMGs + Compute the mean of images. + If a single 4D image is provided, average along the last dimension. + + std: IMGs + Compute the standard deviation average of multiple images. + If a single 4D image is provided, compute the STD along the last + dimension. + + correlation: IMGs + Computes the correlation of the 3x3x3 neighborhood of each voxel, for + all pair of input images. The final image is the average correlation + (through all pairs). + For a given pair of images + - Background is considered as 0. May lead to very high correlations + close to the border of the background regions, or very poor ones if the + background in both images differ. + - Images are zero-padded. For the same reason as higher, may lead to + very high correlations if you have data close to the border of the + image. + - NaN values (if a voxel's neighborhood is entirely uniform; std 0) are + replaced by + - 0 if at least one neighborhood was entirely containing background. + - 1 if the voxel's neighborhoods are uniform in both images + - 0 if the voxel's neighborhoods is uniform in one image, but not + the other. + + UPDATE AS OF VERSION 2.0: Random noise was previously added in the + process to help avoid NaN values. Now replaced by either 0 or 1 as + explained above. + + union: IMGs + Operation on binary image to keep voxels, that are non-zero, in at + least one file. + + intersection: IMGs + Operation on binary image to keep the voxels, that are non-zero, + are present in all files. + + difference: IMG_1 IMG_2 + Operation on binary image to keep voxels from the first file that are + not in the second file (non-zeros). + + concatenate: IMGs + Concatenate a list of 3D and 4D images into a single 4D image. + + dilation: IMG, VALUE + Binary morphological operation to spatially extend the values of an + image to their neighbors. VALUE is in voxels: an integer > 0. + + erosion: IMG, VALUE + Binary morphological operation to spatially shrink the volume contained + in a binary image. VALUE is in voxels: an integer > 0. + + closing: IMG, VALUE + Binary morphological operation, dilation followed by an erosion. + + opening: IMG, VALUE + Binary morphological operation, erosion followed by a dilation. + + blur: IMG, VALUE + Apply a gaussian blur to a single image. VALUE is sigma, the standard + deviation of the Gaussian kernel. + + +positional arguments: + {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference,concatenate,dilation,erosion,closing,opening,blur} + The type of operation to be performed on the images. + in_args The list of image files or parameters. Refer to each operation's documentation of the expected arguments. + out_image Output image path. + +options: + -h, --help show this help message and exit + --data_type DATA_TYPE + Data type of the output image. Use the format: + uint8, int16, int/float32, int/float64. + --exclude_background Does not affect the background of the original images. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_volume_remove_outliers_ransac.py.help b/scripts/.hidden/scil_volume_remove_outliers_ransac.py.help new file mode 100644 index 000000000..572a6734b --- /dev/null +++ b/scripts/.hidden/scil_volume_remove_outliers_ransac.py.help @@ -0,0 +1,26 @@ +usage: scil_volume_remove_outliers_ransac.py [-h] [--min_fit MIN_FIT] + [--max_iter MAX_ITER] + [--fit_thr FIT_THR] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_image out_image + +Remove outliers from image using the RANSAC algorithm. +The RANSAC algorithm parameters are sensitive to the input data. + +NOTE: Current default parameters are tuned for ad/md/rd images only. + +Formerly: scil_remove_outliers_ransac.py + +positional arguments: + in_image Nifti image. + out_image Corrected Nifti image. + +options: + -h, --help show this help message and exit + --min_fit MIN_FIT The minimum number of data values required to fit the model. [50] + --max_iter MAX_ITER The maximum number of iterations allowed in the algorithm. [1000] + --fit_thr FIT_THR Threshold value for determining when a data point fits a model. [0.01] + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_volume_resample.py.help b/scripts/.hidden/scil_volume_resample.py.help new file mode 100644 index 000000000..984352569 --- /dev/null +++ b/scripts/.hidden/scil_volume_resample.py.help @@ -0,0 +1,36 @@ +usage: scil_volume_resample.py [-h] + (--ref REF | --volume_size VOLUME_SIZE [VOLUME_SIZE ...] | --voxel_size VOXEL_SIZE [VOXEL_SIZE ...] | --iso_min) + [--interp {nn,lin,quad,cubic}] + [--enforce_dimensions] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_image out_image + +Script to resample a dataset to match the resolution of another +reference dataset or to the resolution specified as in argument. + +Formerly: scil_resample_volume.py + +positional arguments: + in_image Path of the input volume. + out_image Path of the resampled volume. + +options: + -h, --help show this help message and exit + --ref REF Reference volume to resample to. + --volume_size VOLUME_SIZE [VOLUME_SIZE ...] + Sets the size for the volume. If the value is set to is Y, it will resample to a shape of Y x Y x Y. + --voxel_size VOXEL_SIZE [VOXEL_SIZE ...] + Sets the voxel size. If the value is set to is Y, it will set a voxel size of Y x Y x Y. + --iso_min Resample the volume to R x R x R with R being the smallest current voxel dimension. + --interp {nn,lin,quad,cubic} + Interpolation mode. + nn: nearest neighbour + lin: linear + quad: quadratic + cubic: cubic + Defaults to linear + --enforce_dimensions Enforce the reference volume dimension. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_volume_reshape_to_reference.py.help b/scripts/.hidden/scil_volume_reshape_to_reference.py.help new file mode 100644 index 000000000..4cf6dfc08 --- /dev/null +++ b/scripts/.hidden/scil_volume_reshape_to_reference.py.help @@ -0,0 +1,29 @@ +usage: scil_volume_reshape_to_reference.py [-h] + [--interpolation {linear,nearest}] + [--keep_dtype] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_file in_ref_file out_file + +Reshape / reslice / resample *.nii or *.nii.gz using a reference. +This script can be used to align freesurfer/civet output, as .mgz, +to the original input image. + +>>> scil_volume_reshape_to_reference.py wmparc.mgz t1.nii.gz wmparc_t1.nii.gz\ + --interpolation nearest + +Formerly: scil_reshape_to_reference.py + +positional arguments: + in_file Path of the image (.nii or .mgz) to be reshaped. + in_ref_file Path of the reference image (.nii). + out_file Output filename of the reshaped image (.nii). + +options: + -h, --help show this help message and exit + --interpolation {linear,nearest} + Interpolation: "linear" or "nearest". [linear] + --keep_dtype If True, keeps the data_type of the input image (in_file) when saving the output image (out_file). + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_volume_stats_in_ROI.py.help b/scripts/.hidden/scil_volume_stats_in_ROI.py.help new file mode 100644 index 000000000..0ea80791c --- /dev/null +++ b/scripts/.hidden/scil_volume_stats_in_ROI.py.help @@ -0,0 +1,39 @@ +usage: scil_volume_stats_in_ROI.py [-h] + (--metrics_dir dir | --metrics file [file ...]) + [--bin] [--normalize_weights] + [--indent INDENT] [--sort_keys] + [-v [{DEBUG,INFO,WARNING}]] [-f] + in_mask + +Compute the statistics (mean, std) of scalar maps, which can represent +diffusion metrics, in a ROI. Prints the results. + +The mask can either be a binary mask, or a weighting mask. If the mask is +a weighting mask it should either contain floats between 0 and 1 or should be +normalized with --normalize_weights. IMPORTANT: if the mask contains weights +(and not 0 and 1 exclusively), the standard deviation will also be weighted. + +positional arguments: + in_mask Mask volume filename. + Can be a binary mask or a weighted mask. + +options: + -h, --help show this help message and exit + --bin If set, will consider every value of the mask higherthan 0 to be + part of the mask (equivalent weighting for every voxel). + --normalize_weights If set, the weights will be normalized to the [0,1] range. + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. + +Metrics input options: + --metrics_dir dir Name of the directory containing metrics files: we will + load all nifti files. + --metrics file [file ...] + Metrics nifti filename. List of the names of the metrics file, + in nifti format. + +Json options: + --indent INDENT Indent for json pretty print. + --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_volume_stats_in_labels.py.help b/scripts/.hidden/scil_volume_stats_in_labels.py.help new file mode 100644 index 000000000..2a70a383e --- /dev/null +++ b/scripts/.hidden/scil_volume_stats_in_labels.py.help @@ -0,0 +1,22 @@ +usage: scil_volume_stats_in_labels.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] + in_labels in_labels_lut in_map + +Computes the information from the input map for each cortical region +(corresponding to an atlas). + +Hint: For instance, this script could be useful if you have a seed map from a +specific bundle, to know from which regions it originated. + +Formerly: scil_compute_seed_by_labels.py + +positional arguments: + in_labels Path of the input label file. + in_labels_lut Path of the LUT file corresponding to labels,used to name the regions of interest. + in_map Path of the input map file. Expecting a 3D file. + +options: + -h, --help show this help message and exit + -v [{DEBUG,INFO,WARNING}] + Produces verbose output depending on the provided level. + Default level is warning, default when using -v is info. + -f Force overwriting of the output files. diff --git a/subprocess b/subprocess new file mode 100644 index 000000000..e69de29bb From 6004993e099dfeebc9945f4e2d819cfc69c22a5f Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Fri, 31 May 2024 14:00:23 -0400 Subject: [PATCH 02/69] use help files only if search_parser argument is provided --- scripts/scil_search_keywords.py | 161 +++++++++++++++++++------------- 1 file changed, 97 insertions(+), 64 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index d65d58f2e..21a42eeef 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -19,7 +19,6 @@ import pathlib import re import subprocess - import numpy as np from scilpy.io.utils import add_verbose_arg @@ -57,77 +56,111 @@ def main(): # Use directory of this script, should work with most installation setups script_dir = pathlib.Path(__file__).parent + hidden_dir = script_dir / '.hidden' matches = [] keywords_regexes = [re.compile('(' + re.escape(kw) + ')', re.IGNORECASE) for kw in args.keywords] + - for script in sorted(script_dir.glob('*.py')): - filename = script.name - if filename == '__init__.py': - continue - - # Skip this script - if filename == pathlib.Path(__file__).name: - continue - - error_msg = "" - if args.search_parser: - # Run the script's argparser, and catch the output in case there - # is an error, such as ModuleNotFoundException. - sub = subprocess.run(['{}'.format(script.absolute()), '--help'], - capture_output=True) - search_text = sub.stdout.decode("utf-8") - if sub.stderr: - # Fall back on the docstring in case of error - error_msg = "There was an error executing script parser, " \ - "searching through docstring instead...\n\n" - search_text = _get_docstring_from_script_path(str(script)) - else: - # Fetch the docstring - search_text = _get_docstring_from_script_path(str(script)) + # Search through the argparser instead of the docstring + if args.search_parser: + #Use precomputed help files + for help_file in sorted(hidden_dir.glob('*.help')): + script_name = pathlib.Path(help_file.stem).stem + with open(help_file, 'r') as f: + search_text = f.read() + # Test intersection of all keywords, either in filename or docstring - if not _test_matching_keywords(args.keywords, [filename, search_text]): - continue - - matches.append(filename) - search_text = search_text or 'No docstring available!' - - display_filename = filename - display_short_info, display_long_info = _split_first_sentence( - search_text) - - # NOTE: It is important to do the formatting before adding color style, - # because python does not ignore ANSI color codes, and will count them - # as characters! - - # Center text, add spacing and make BOLD - header = _make_title(" {} ".format(display_filename)) - footer = _make_title(" End of {} ".format(display_filename)) - - # Highlight found keywords using ANSI color codes - colored_keyword = '{}\\1{}'.format(RED + BOLD, END_COLOR) - for regex in keywords_regexes: - header = regex.sub(colored_keyword, header) - footer = regex.sub(colored_keyword, footer) - display_short_info = regex.sub(colored_keyword, display_short_info) - display_long_info = regex.sub(colored_keyword, display_long_info) - - # Restore BOLD in header/footer after matching keywords, and make sure - # to add a END_COLOR at the end. - header = header.replace(END_COLOR, END_COLOR + BOLD) + END_COLOR - footer = footer.replace(END_COLOR, END_COLOR + BOLD) + END_COLOR - - # Print everything - logging.info(header) - if error_msg: - logging.info(RED + BOLD + error_msg + END_COLOR) - logging.info(display_short_info) - logging.debug(display_long_info) - logging.info(footer) - logging.info("\n") + if not _test_matching_keywords(args.keywords, [script_name, search_text]): + continue + + matches.append(script_name) + search_text = search_text or 'No docstring available!' + + display_filename = script_name + display_short_info, display_long_info = _split_first_sentence( + search_text) + + # NOTE: It is important to do the formatting before adding color style, + # because python does not ignore ANSI color codes, and will count them + # as characters! + + # Center text, add spacing and make BOLD + header = _make_title(" {} ".format(display_filename)) + footer = _make_title(" End of {} ".format(display_filename)) + + # Highlight found keywords using ANSI color codes + colored_keyword = '{}\\1{}'.format(RED + BOLD, END_COLOR) + for regex in keywords_regexes: + header = regex.sub(colored_keyword, header) + footer = regex.sub(colored_keyword, footer) + display_short_info = regex.sub(colored_keyword, display_short_info) + display_long_info = regex.sub(colored_keyword, display_long_info) + + # Restore BOLD in header/footer after matching keywords, and make sure + # to add a END_COLOR at the end. + header = header.replace(END_COLOR, END_COLOR + BOLD) + END_COLOR + footer = footer.replace(END_COLOR, END_COLOR + BOLD) + END_COLOR + + # Print everything + logging.info(header) + logging.info(display_short_info) + logging.debug(display_long_info) + logging.info(footer) + logging.info("\n") + + # Search through the docstring instead of the argparser + else: + for script in sorted(script_dir.glob('scripts/*.py')): + filename = script.name + if filename == '__init__.py' or filename =='scil_search_keywords.py': + continue + + search_text = _get_docstring_from_script_path(str(script)) + # Test intersection of all keywords, either in filename or docstring + if not _test_matching_keywords(args.keywords, [filename, search_text]): + continue + + matches.append(filename) + search_text = search_text or 'No docstring available!' + + display_filename = filename + display_short_info, display_long_info = _split_first_sentence( + search_text) + + # NOTE: It is important to do the formatting before adding color style, + # because python does not ignore ANSI color codes, and will count them + # as characters! + + # Center text, add spacing and make BOLD + header = _make_title(" {} ".format(display_filename)) + footer = _make_title(" End of {} ".format(display_filename)) + + # Highlight found keywords using ANSI color codes + colored_keyword = '{}\\1{}'.format(RED + BOLD, END_COLOR) + for regex in keywords_regexes: + header = regex.sub(colored_keyword, header) + footer = regex.sub(colored_keyword, footer) + display_short_info = regex.sub(colored_keyword, display_short_info) + display_long_info = regex.sub(colored_keyword, display_long_info) + + # Restore BOLD in header/footer after matching keywords, and make sure + # to add a END_COLOR at the end. + header = header.replace(END_COLOR, END_COLOR + BOLD) + END_COLOR + footer = footer.replace(END_COLOR, END_COLOR + BOLD) + END_COLOR + + # Print everything + logging.info(header) + logging.info(display_short_info) + logging.debug(display_long_info) + logging.info(footer) + logging.info("\n") + + + if not matches: logging.info(_make_title(' No results found! ')) From 0430fdb5c3aaca511cb767237154647ba2ce6be7 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Tue, 4 Jun 2024 09:35:41 -0400 Subject: [PATCH 03/69] modify scripts path --- scripts/scil_search_keywords.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 21a42eeef..bf5754455 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -61,7 +61,6 @@ def main(): keywords_regexes = [re.compile('(' + re.escape(kw) + ')', re.IGNORECASE) for kw in args.keywords] - # Search through the argparser instead of the docstring if args.search_parser: @@ -113,7 +112,7 @@ def main(): # Search through the docstring instead of the argparser else: - for script in sorted(script_dir.glob('scripts/*.py')): + for script in sorted(script_dir.glob('*.py')): filename = script.name if filename == '__init__.py' or filename =='scil_search_keywords.py': continue From 6311d108c236b90e8470e39a1bdfcaa135e15fdc Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Tue, 4 Jun 2024 13:15:18 -0400 Subject: [PATCH 04/69] add stemming functions --- scripts/scil_search_keywords.py | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index bf5754455..1d5706d23 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -20,15 +20,20 @@ import re import subprocess import numpy as np +import nltk +from nltk.stem import PorterStemmer from scilpy.io.utils import add_verbose_arg +nltk.download('punkt', quiet=True) + RED = '\033[31m' BOLD = '\033[1m' END_COLOR = '\033[0m' SPACING_CHAR = '=' SPACING_LEN = 80 +stemmer = PorterStemmer() def _build_arg_parser(): p = argparse.ArgumentParser(description=__doc__, @@ -54,6 +59,8 @@ def main(): else: logging.getLogger().setLevel(logging.getLevelName(args.verbose)) + stemmed_keywords = _stem_keywords(args.keywords) + # Use directory of this script, should work with most installation setups script_dir = pathlib.Path(__file__).parent hidden_dir = script_dir / '.hidden' @@ -72,7 +79,7 @@ def main(): # Test intersection of all keywords, either in filename or docstring - if not _test_matching_keywords(args.keywords, [script_name, search_text]): + if not _contains_stemmed_keywords(stemmed_keywords, search_text): continue matches.append(script_name) @@ -92,11 +99,8 @@ def main(): # Highlight found keywords using ANSI color codes colored_keyword = '{}\\1{}'.format(RED + BOLD, END_COLOR) - for regex in keywords_regexes: - header = regex.sub(colored_keyword, header) - footer = regex.sub(colored_keyword, footer) - display_short_info = regex.sub(colored_keyword, display_short_info) - display_long_info = regex.sub(colored_keyword, display_long_info) + for keyword in args.keywords: + search_text = re.sub(rf'(?i)\b{re.escape(keyword)}\b', f'{RED + BOLD}\\g<0>{END_COLOR}', search_text) # Restore BOLD in header/footer after matching keywords, and make sure # to add a END_COLOR at the end. @@ -246,6 +250,17 @@ def _split_first_sentence(text): remaining = text[split_idx:] if split_idx else "" return sentence, remaining +def _stem_keywords(keywords): + return [stemmer.stem(keyword) for keyword in keywords] + +def _stem_text(text): + words = nltk.word_tokenize(text) + return ' '.join([stemmer.stem(word) for word in words]) + +def _contains_stemmed_keywords(stemmed_keywords, text): + stemmed_text = _stem_text(text) + return all([stem in stemmed_text for stem in stemmed_keywords]) + if __name__ == '__main__': main() From e520d2febd60d2cda953dbc66b45dd2092f7d078 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Tue, 4 Jun 2024 14:39:46 -0400 Subject: [PATCH 05/69] modify _contains_stemmed_keywords function --- scripts/scil_search_keywords.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 1d5706d23..8a0c3bcfb 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -79,7 +79,7 @@ def main(): # Test intersection of all keywords, either in filename or docstring - if not _contains_stemmed_keywords(stemmed_keywords, search_text): + if not _contains_stemmed_keywords(stemmed_keywords, search_text, script_name): continue matches.append(script_name) @@ -117,14 +117,15 @@ def main(): # Search through the docstring instead of the argparser else: for script in sorted(script_dir.glob('*.py')): - filename = script.name - if filename == '__init__.py' or filename =='scil_search_keywords.py': + #Remove the .py extension + filename = script.stem + if filename == '__init__' or filename =='scil_search_keywords': continue - + search_text = _get_docstring_from_script_path(str(script)) # Test intersection of all keywords, either in filename or docstring - if not _test_matching_keywords(args.keywords, [filename, search_text]): + if not _contains_stemmed_keywords(stemmed_keywords, search_text, filename): continue matches.append(filename) @@ -257,9 +258,10 @@ def _stem_text(text): words = nltk.word_tokenize(text) return ' '.join([stemmer.stem(word) for word in words]) -def _contains_stemmed_keywords(stemmed_keywords, text): +def _contains_stemmed_keywords(stemmed_keywords,text, filename): stemmed_text = _stem_text(text) - return all([stem in stemmed_text for stem in stemmed_keywords]) + stemmed_filename = _stem_text(filename) + return all([stem in stemmed_text or stem in stemmed_filename for stem in stemmed_keywords]) if __name__ == '__main__': From 127bf0fc7e074259b6fa5da84d0c081835713f89 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Tue, 4 Jun 2024 15:26:19 -0400 Subject: [PATCH 06/69] regenerate hidden files if not already existing --- scripts/scil_search_keywords.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 8a0c3bcfb..54a1f2502 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -64,6 +64,16 @@ def main(): # Use directory of this script, should work with most installation setups script_dir = pathlib.Path(__file__).parent hidden_dir = script_dir / '.hidden' + + if not hidden_dir.exists(): + hidden_dir.mkdir() + logging.info('This is your first time running this script.\n' + 'Generating help files may take a few minutes, please be patient.\n' + 'Subsequent searches will be much faster.\n' + 'Generating help files....') + _generate_help_files() + + matches = [] keywords_regexes = [re.compile('(' + re.escape(kw) + ')', re.IGNORECASE) @@ -263,6 +273,14 @@ def _contains_stemmed_keywords(stemmed_keywords,text, filename): stemmed_filename = _stem_text(filename) return all([stem in stemmed_text or stem in stemmed_filename for stem in stemmed_keywords]) +def _generate_help_files(): + """Call the external script generate_help_files to generate help files + """ + script_path = pathlib.Path(__file__).parent.parent / 'scilpy-bot-scripts'/'generate_help_files.py' + #calling the extrernal script generate_help_files + subprocess.run(['python', script_path], check=True) + + if __name__ == '__main__': main() From 000f747bb315c212804702c350c6150246be5b49 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Thu, 6 Jun 2024 14:48:26 -0400 Subject: [PATCH 07/69] Color keywords in the terminal output --- scripts/scil_search_keywords.py | 68 ++++++++++++--------------------- 1 file changed, 25 insertions(+), 43 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 54a1f2502..b4cf77575 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -22,11 +22,14 @@ import numpy as np import nltk from nltk.stem import PorterStemmer +from colorama import init, Fore, Style from scilpy.io.utils import add_verbose_arg nltk.download('punkt', quiet=True) +init(autoreset=True) + RED = '\033[31m' BOLD = '\033[1m' END_COLOR = '\033[0m' @@ -99,29 +102,16 @@ def main(): display_short_info, display_long_info = _split_first_sentence( search_text) - # NOTE: It is important to do the formatting before adding color style, - # because python does not ignore ANSI color codes, and will count them - # as characters! - - # Center text, add spacing and make BOLD - header = _make_title(" {} ".format(display_filename)) - footer = _make_title(" End of {} ".format(display_filename)) - - # Highlight found keywords using ANSI color codes - colored_keyword = '{}\\1{}'.format(RED + BOLD, END_COLOR) + # Highlight found keywords using colorama for keyword in args.keywords: - search_text = re.sub(rf'(?i)\b{re.escape(keyword)}\b', f'{RED + BOLD}\\g<0>{END_COLOR}', search_text) - - # Restore BOLD in header/footer after matching keywords, and make sure - # to add a END_COLOR at the end. - header = header.replace(END_COLOR, END_COLOR + BOLD) + END_COLOR - footer = footer.replace(END_COLOR, END_COLOR + BOLD) + END_COLOR + display_short_info = display_short_info.replace(keyword, f'{Fore.RED}{Style.BRIGHT}{keyword}{Style.RESET_ALL}') + display_long_info = display_long_info.replace(keyword, f'{Fore.RED}{Style.BRIGHT}{keyword}{Style.RESET_ALL}') # Print everything - logging.info(header) + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") logging.info(display_short_info) logging.debug(display_long_info) - logging.info(footer) + logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info("\n") # Search through the docstring instead of the argparser @@ -145,34 +135,16 @@ def main(): display_short_info, display_long_info = _split_first_sentence( search_text) - # NOTE: It is important to do the formatting before adding color style, - # because python does not ignore ANSI color codes, and will count them - # as characters! - - # Center text, add spacing and make BOLD - header = _make_title(" {} ".format(display_filename)) - footer = _make_title(" End of {} ".format(display_filename)) - - # Highlight found keywords using ANSI color codes - colored_keyword = '{}\\1{}'.format(RED + BOLD, END_COLOR) - for regex in keywords_regexes: - header = regex.sub(colored_keyword, header) - footer = regex.sub(colored_keyword, footer) - display_short_info = regex.sub(colored_keyword, display_short_info) - display_long_info = regex.sub(colored_keyword, display_long_info) - - # Restore BOLD in header/footer after matching keywords, and make sure - # to add a END_COLOR at the end. - header = header.replace(END_COLOR, END_COLOR + BOLD) + END_COLOR - footer = footer.replace(END_COLOR, END_COLOR + BOLD) + END_COLOR + # Highlight found keywords using colorama + display_short_info = _highlight_keywords(display_short_info, stemmed_keywords) + display_long_info = _highlight_keywords(display_long_info, stemmed_keywords) # Print everything - logging.info(header) + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") logging.info(display_short_info) logging.debug(display_long_info) - logging.info(footer) + logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info("\n") - if not matches: @@ -180,7 +152,7 @@ def main(): def _make_title(text): - return BOLD + text.center(SPACING_LEN, SPACING_CHAR) + END_COLOR + return f'{Fore.BLUE}{Style.BRIGHT}{text.center(SPACING_LEN, SPACING_CHAR)}{Style.RESET_ALL}' def _test_matching_keywords(keywords, texts): @@ -280,7 +252,17 @@ def _generate_help_files(): #calling the extrernal script generate_help_files subprocess.run(['python', script_path], check=True) - +def _highlight_keywords(text, stemmed_keywords): + """Highlight the stemmed keywords in the given text using colorama.""" + words = text.split() + highlighted_text = [] + for word in words: + stemmed_word = stemmer.stem(word) + if stemmed_word in stemmed_keywords: + highlighted_text.append(f'{Fore.RED}{Style.BRIGHT}{word}{Style.RESET_ALL}') + else: + highlighted_text.append(word) + return ' '.join(highlighted_text) if __name__ == '__main__': main() From 237301c38150321808eca5a473ab66815423152f Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Fri, 7 Jun 2024 11:18:48 -0400 Subject: [PATCH 08/69] generate json_files --- scilpy-bot-scripts/generate_json_files.py | 84 ++ .../json_files/knowledge_base.json | 1187 +++++++++++++++++ 2 files changed, 1271 insertions(+) create mode 100644 scilpy-bot-scripts/generate_json_files.py create mode 100644 scilpy-bot-scripts/json_files/knowledge_base.json diff --git a/scilpy-bot-scripts/generate_json_files.py b/scilpy-bot-scripts/generate_json_files.py new file mode 100644 index 000000000..2c2a1c63b --- /dev/null +++ b/scilpy-bot-scripts/generate_json_files.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import os +import json +import ast +from pathlib import Path + + + +def _get_docstring_from_script_path(script_path): + """Extract a python file's docstring from a filepath. + + Parameters + ---------- + script : str + Path to python file + + Returns + ------- + docstring : str + The file docstring, or an empty string if there was no docstring. + """ + with open(script_path, 'r') as reader: + file_contents = reader.read() + module = ast.parse(file_contents) + docstring = ast.get_docstring(module) or '' + return docstring + + +def _get_help_text_from_file(help_file_path): + with open(help_file_path, 'r') as f: + help_text = f.read() + return help_text + + +def generate_json(knowledge_base_dir, hidden_dir, output_json_dir): + knowledge_base = {'scripts': []} + + for script in sorted(Path(knowledge_base_dir).glob('*.py')): + script_name = script.stem + if script_name in ('__init__','scil_search_keywords'): + continue + + docstring = _get_docstring_from_script_path(str(script)) + help_file_path = Path(hidden_dir) / f'{script_name}.py.help' + + if not help_file_path.exists(): + print(f"Warning: Help file for {script_name} not found in {hidden_dir}") + help_text = '' + else: + help_text = _get_help_text_from_file(help_file_path) + + script_info = { + 'name': script_name, + 'docstring': docstring, + 'help': help_text, + 'synonyms': [], # This can be filled later by lab members + 'keywords': [] # This can be filled later by lab members + } + + knowledge_base['scripts'].append(script_info) + + # Ensure the output directory exists + output_json_dir.mkdir(parents=True, exist_ok=True) + output_json_path = output_json_dir / 'knowledge_base.json' + + with open(output_json_path, 'w') as json_file: + json.dump(knowledge_base, json_file, indent=4) + + print(f"Knowledge base JSON has been generated at {output_json_path}") + + +def main(): + base_dir = Path(__file__).parent.parent + knowledge_base_dir = base_dir/'scripts/' + hidden_dir = knowledge_base_dir / '.hidden' + output_json_dir = base_dir/'scilpy-bot-scripts'/'json_files' + + generate_json(knowledge_base_dir, hidden_dir, output_json_dir) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/scilpy-bot-scripts/json_files/knowledge_base.json b/scilpy-bot-scripts/json_files/knowledge_base.json new file mode 100644 index 000000000..e5172d4c4 --- /dev/null +++ b/scilpy-bot-scripts/json_files/knowledge_base.json @@ -0,0 +1,1187 @@ +{ + "scripts": [ + { + "name": "scil_NODDI_maps", + "docstring": "Compute NODDI [1] maps using AMICO.\nMulti-shell DWI necessary.\n\nFormerly: scil_compute_NODDI.py", + "help": "usage: scil_NODDI_maps.py [-h] [--mask MASK] [--out_dir OUT_DIR]\n [--tolerance tol] [--skip_b0_check]\n [--para_diff PARA_DIFF] [--iso_diff ISO_DIFF]\n [--lambda1 LAMBDA1] [--lambda2 LAMBDA2]\n [--save_kernels DIRECTORY | --load_kernels DIRECTORY]\n [--compute_only] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec\n\nCompute NODDI [1] maps using AMICO.\nMulti-shell DWI necessary.\n\nFormerly: scil_compute_NODDI.py\n\npositional arguments:\n in_dwi DWI file acquired with a NODDI compatible protocol (single-shell data not suited).\n in_bval b-values filename, in FSL format (.bval).\n in_bvec b-vectors filename, in FSL format (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Brain mask filename.\n --out_dir OUT_DIR Output directory for the NODDI results. [results]\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nModel options:\n --para_diff PARA_DIFF\n Axial diffusivity (AD) in the CC. [0.0017]\n --iso_diff ISO_DIFF Mean diffusivity (MD) in ventricles. [0.003]\n --lambda1 LAMBDA1 First regularization parameter. [0.5]\n --lambda2 LAMBDA2 Second regularization parameter. [0.001]\n\nKernels options:\n --save_kernels DIRECTORY\n Output directory for the COMMIT kernels.\n --load_kernels DIRECTORY\n Input directory where the COMMIT kernels are located.\n --compute_only Compute kernels only, --save_kernels must be used.\n\nReference:\n [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC.\n NODDI: practical in vivo neurite orientation dispersion\n and density imaging of the human brain.\n NeuroImage. 2012 Jul 16;61:1000-16.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_NODDI_priors", + "docstring": "Compute the axial (para_diff), radial (perp_diff), and mean (iso_diff)\ndiffusivity priors for NODDI.\n\nFormerly: scil_compute_NODDI_priors.py", + "help": "usage: scil_NODDI_priors.py [-h] [--fa_min_single_fiber FA_MIN_SINGLE_FIBER]\n [--fa_max_ventricles FA_MAX_VENTRICLES]\n [--md_min_ventricles MD_MIN_VENTRICLES]\n [--roi_radius ROI_RADIUS]\n [--roi_center pos pos pos]\n [--out_txt_1fiber_para FILE]\n [--out_txt_1fiber_perp FILE]\n [--out_mask_1fiber FILE]\n [--out_txt_ventricles FILE]\n [--out_mask_ventricles FILE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_FA in_AD in_RD in_MD\n\nCompute the axial (para_diff), radial (perp_diff), and mean (iso_diff)\ndiffusivity priors for NODDI.\n\nFormerly: scil_compute_NODDI_priors.py\n\npositional arguments:\n in_FA Path to the FA volume.\n in_AD Path to the axial diffusivity (AD) volume.\n in_RD Path to the radial diffusivity (RD) volume.\n in_MD Path to the mean diffusivity (MD) volume.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics options:\n --fa_min_single_fiber FA_MIN_SINGLE_FIBER\n Minimal threshold of FA (voxels above that threshold are considered in \n the single fiber mask). [0.7]\n --fa_max_ventricles FA_MAX_VENTRICLES\n Maximal threshold of FA (voxels under that threshold are considered in \n the ventricles). [0.1]\n --md_min_ventricles MD_MIN_VENTRICLES\n Minimal threshold of MD in mm2/s (voxels above that threshold are considered \n for in the ventricles). [0.003]\n\nRegions options:\n --roi_radius ROI_RADIUS\n Radius of the region used to estimate the priors. The roi will be a cube spanning \n from ROI_CENTER in each direction. [20]\n --roi_center pos pos pos\n Center of the roi of size roi_radius used to estimate the priors; a 3-value coordinate. \n If not set, uses the center of the 3D volume.\n\nOutputs:\n --out_txt_1fiber_para FILE\n Output path for the text file containing the single fiber average value of AD.\n If not set, the file will not be saved.\n --out_txt_1fiber_perp FILE\n Output path for the text file containing the single fiber average value of RD.\n If not set, the file will not be saved.\n --out_mask_1fiber FILE\n Output path for single fiber mask. If not set, the mask will not be saved.\n --out_txt_ventricles FILE\n Output path for the text file containing the ventricles average value of MD.\n If not set, the file will not be saved.\n --out_mask_ventricles FILE\n Output path for the ventricule mask.\n If not set, the mask will not be saved.\n\nReference:\n [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC.\n NODDI: practical in vivo neurite orientation dispersion and density\n imaging of the human brain. NeuroImage. 2012 Jul 16;61:1000-16.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_aodf_metrics", + "docstring": "Script to compute various metrics derivated from asymmetric ODF.\n\nThese metrics include the asymmetric peak directions image, a number of fiber\ndirections (nufid) map [1], the asymmetry index (ASI) map [2] and an odd-power\nmap [3].\n\nThe asymmetric peak directions image contains peaks per hemisphere, considering\nantipodal sphere directions as distinct. On a symmetric signal, the number of\nasymmetric peaks extracted is then twice the number of symmetric peaks.\n\nThe nufid map is the asymmetric alternative to NuFO maps. It counts the\nnumber of asymmetric peaks extracted and ranges in [0..N] with N the maximum\nnumber of peaks.\n\nThe asymmetric index is a cosine-based metric in the range [0..1], with 0\ncorresponding to a perfectly symmetric signal and 1 to a perfectly asymmetric\nsignal.\n\nThe odd-power map is also in the range [0..1], with 0 corresponding to a\nperfectly symmetric signal and 1 to a perfectly anti-symmetric signal. It is\ngiven as the ratio of the L2-norm of odd SH coefficients on the L2-norm of all\nSH coefficients.\n\nFormerly: scil_compute_asym_odf_metrics.py", + "help": "usage: scil_aodf_metrics.py [-h] [--mask MASK] [--asi_map ASI_MAP]\n [--odd_power_map ODD_POWER_MAP] [--peaks PEAKS]\n [--peak_values PEAK_VALUES]\n [--peak_indices PEAK_INDICES] [--nufid NUFID]\n [--not_all] [--at A_THRESHOLD] [--rt R_THRESHOLD]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--processes NBR]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh\n\nScript to compute various metrics derivated from asymmetric ODF.\n\nThese metrics include the asymmetric peak directions image, a number of fiber\ndirections (nufid) map [1], the asymmetry index (ASI) map [2] and an odd-power\nmap [3].\n\nThe asymmetric peak directions image contains peaks per hemisphere, considering\nantipodal sphere directions as distinct. On a symmetric signal, the number of\nasymmetric peaks extracted is then twice the number of symmetric peaks.\n\nThe nufid map is the asymmetric alternative to NuFO maps. It counts the\nnumber of asymmetric peaks extracted and ranges in [0..N] with N the maximum\nnumber of peaks.\n\nThe asymmetric index is a cosine-based metric in the range [0..1], with 0\ncorresponding to a perfectly symmetric signal and 1 to a perfectly asymmetric\nsignal.\n\nThe odd-power map is also in the range [0..1], with 0 corresponding to a\nperfectly symmetric signal and 1 to a perfectly anti-symmetric signal. It is\ngiven as the ratio of the L2-norm of odd SH coefficients on the L2-norm of all\nSH coefficients.\n\nFormerly: scil_compute_asym_odf_metrics.py\n\npositional arguments:\n in_sh Input SH image.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Optional mask.\n --asi_map ASI_MAP Output asymmetry index (ASI) map.\n --odd_power_map ODD_POWER_MAP\n Output odd power map.\n --peaks PEAKS Output filename for the extracted peaks.\n --peak_values PEAK_VALUES\n Output filename for the extracted peaks values.\n --peak_indices PEAK_INDICES\n Output filename for the generated peaks indices on the sphere.\n --nufid NUFID Output filename for the nufid file.\n --not_all If set, only saves the files specified using the file flags [False].\n --at A_THRESHOLD Absolute threshold on fODF amplitude. This value should be set to\n approximately 1.5 to 2 times the maximum fODF amplitude in isotropic voxels\n (ie. ventricles).\n Use scil_fodf_max_in_ventricles.py to find the maximal value.\n See [Dell'Acqua et al HBM 2013] [0.0].\n --rt R_THRESHOLD Relative threshold on fODF amplitude in percentage [0.1].\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Sphere to use for peak directions estimation [symmetric724].\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] C. Poirier and M. Descoteaux, \"Filtering Methods for Asymmetric ODFs:\nWhere and How Asymmetry Occurs in the White Matter.\" bioRxiv. 2022 Jan 1;\n2022.12.18.520881. doi: https://doi.org/10.1101/2022.12.18.520881\n\n[2] S. Cetin Karayumak, E. \u00d6zarslan, and G. Unal,\n\"Asymmetric Orientation Distribution Functions (AODFs) revealing intravoxel\ngeometry in diffusion MRI,\" Magnetic Resonance Imaging, vol. 49, pp. 145-158,\nJun. 2018, doi: https://doi.org/10.1016/j.mri.2018.03.006.\n\n[3] C. Poirier, E. St-Onge, and M. Descoteaux, \"Investigating the Occurence of\nAsymmetric Patterns in White Matter Fiber Orientation Distribution Functions\"\n[Abstract], In: Proc. Intl. Soc. Mag. Reson. Med. 29 (2021), 2021 May 15-20,\nVancouver, BC, Abstract number 0865.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bids_validate", + "docstring": "Create a json file from a BIDS dataset detailling all info\nneeded for tractoflow\n- DWI/rev_DWI\n- T1\n- fmap/sbref (based on IntendedFor entity)\n- Freesurfer (optional - could be one per participant\n or one per participant/session)\n\nThe BIDS dataset MUST be homogeneous.\nThe metadata need to be uniform across all participants/sessions/runs\n\nMandatory entity: IntendedFor\nSensitive entities: PhaseEncodingDirection, TotalReadoutTime, direction\n\nFormerly: scil_validate_bids.py", + "help": "usage: scil_bids_validate.py [-h] [--bids_ignore BIDS_IGNORE] [--fs FS]\n [--clean] [--readout READOUT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bids out_json\n\nCreate a json file from a BIDS dataset detailling all info\nneeded for tractoflow\n- DWI/rev_DWI\n- T1\n- fmap/sbref (based on IntendedFor entity)\n- Freesurfer (optional - could be one per participant\n or one per participant/session)\n\nThe BIDS dataset MUST be homogeneous.\nThe metadata need to be uniform across all participants/sessions/runs\n\nMandatory entity: IntendedFor\nSensitive entities: PhaseEncodingDirection, TotalReadoutTime, direction\n\nFormerly: scil_validate_bids.py\n\npositional arguments:\n in_bids Input BIDS folder.\n out_json Output json file.\n\noptions:\n -h, --help show this help message and exit\n --bids_ignore BIDS_IGNORE\n If you want to ignore some subjects or some files, you\n can provide an extra bidsignore file.Check:\n https://github.com/bids-standard/bids-\n validator#bidsignore\n --fs FS Output freesurfer path. It will add keys wmparc and\n aparc+aseg.\n --clean If set, it will remove all the participants that are\n missing any information.\n --readout READOUT Default total readout time value [0.062].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bingham_metrics", + "docstring": "Script to compute fODF lobe-specific metrics derived from a Bingham\ndistribution fit, as described in [1]. Resulting metrics are fiber density\n(FD), fiber spread (FS) and fiber fraction (FF) [2].\n\nThe Bingham coefficients volume comes from scil_fodf_to_bingham.py.\n\nA lobe's FD is the integral of the Bingham function on the sphere. It\nrepresents the density of fibers going through a given voxel for a given\nfODF lobe (fixel). A lobe's FS is the ratio of its FD on its maximum AFD. It\nis at its minimum for a sharp lobe and at its maximum for a wide lobe. A lobe's\nFF is the ratio of its FD on the total FD in the voxel.\n\nUsing 12 threads, the execution takes 10 minutes for FD estimation for a brain\nwith 1mm isotropic resolution. Other metrics take less than a second.\n\nFormerly: scil_compute_lobe_specific_fodf_metrics.py", + "help": "usage: scil_bingham_metrics.py [-h] [--out_fd OUT_FD] [--out_fs OUT_FS]\n [--out_ff OUT_FF] [--not_all] [--mask MASK]\n [--nbr_integration_steps NBR_INTEGRATION_STEPS]\n [-v [{DEBUG,INFO,WARNING}]] [--processes NBR]\n [-f]\n in_bingham\n\nScript to compute fODF lobe-specific metrics derived from a Bingham\ndistribution fit, as described in [1]. Resulting metrics are fiber density\n(FD), fiber spread (FS) and fiber fraction (FF) [2].\n\nThe Bingham coefficients volume comes from scil_fodf_to_bingham.py.\n\nA lobe's FD is the integral of the Bingham function on the sphere. It\nrepresents the density of fibers going through a given voxel for a given\nfODF lobe (fixel). A lobe's FS is the ratio of its FD on its maximum AFD. It\nis at its minimum for a sharp lobe and at its maximum for a wide lobe. A lobe's\nFF is the ratio of its FD on the total FD in the voxel.\n\nUsing 12 threads, the execution takes 10 minutes for FD estimation for a brain\nwith 1mm isotropic resolution. Other metrics take less than a second.\n\nFormerly: scil_compute_lobe_specific_fodf_metrics.py\n\npositional arguments:\n in_bingham Input Bingham nifti image.\n\noptions:\n -h, --help show this help message and exit\n --out_fd OUT_FD Path to output fiber density. [fd.nii.gz]\n --out_fs OUT_FS Path to output fiber spread. [fs.nii.gz]\n --out_ff OUT_FF Path to fiber fraction file. [ff.nii.gz]\n --not_all Do not compute all metrics. Then, please provide the output paths of the files you need.\n --mask MASK Optional mask image. Only voxels inside the mask are computed.\n --nbr_integration_steps NBR_INTEGRATION_STEPS\n Number of integration steps along the theta axis for fiber density estimation. [50]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -f Force overwriting of the output files.\n\n[1] T. W. Riffert, J. Schreiber, A. Anwander, and T. R. Kn\u00f6sche, \u201cBeyond\n fractional anisotropy: Extraction of bundle-specific structural metrics\n from crossing fiber models,\u201d NeuroImage, vol. 100, pp. 176-191, Oct. 2014,\n doi: 10.1016/j.neuroimage.2014.06.015.\n\n[2] J. Schreiber, T. Riffert, A. Anwander, and T. R. Kn\u00f6sche, \u201cPlausibility\n Tracking: A method to evaluate anatomical connectivity and microstructural\n properties along fiber pathways,\u201d NeuroImage, vol. 90, pp. 163-178, Apr.\n 2014, doi: 10.1016/j.neuroimage.2014.01.002.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_btensor_metrics", + "docstring": "Script to compute microstructure metrics using the DIVIDE method. In order to\noperate, the script needs at leats two different types of b-tensor encodings.\nNote that custom encodings are not yet supported, so that only the linear\ntensor encoding (LTE, b_delta = 1), the planar tensor encoding\n(PTE, b_delta = -0.5), the spherical tensor encoding (STE, b_delta = 0) and\nthe cigar shape tensor encoding (b_delta = 0.5) are available. Moreover, all\nof `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names. Thus, this\nscript outputs the results from the DIVIDE fit or direct derivatives:\nmean diffusivity (MD), isotropic mean kurtosis (mk_i), anisotropic mean\nkurtosis (mk_a), total mean kurtosis (mk_t) and finally micro-FA (uFA).\nSpecific names can be specified using the\nfile flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output. The order parameter can also be computed from the uFA and a\nprecomputed FA, using separate input parameters.\n\n>>> scil_btensor_metrics.py --in_dwis LTE.nii.gz PTE.nii.gz STE.nii.gz\n --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs LTE.bvec PTE.bvec STE.bvec\n --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nIMPORTANT: If the script does not converge to a solution, it is probably due to\nnoise outside the brain. Thus, it is strongly recommanded to provide a brain\nmask with --mask.\n\nBased on Markus Nilsson, Filip Szczepankiewicz, Bj\u00f6rn Lampinen, Andr\u00e9 Ahlgren,\nJo\u00e3o P. de Almeida Martins, Samo Lasic, Carl-Fredrik Westin,\nand Daniel Topgaard. An open-source framework for analysis of multidimensional\ndiffusion MRI data implemented in MATLAB.\nProc. Intl. Soc. Mag. Reson. Med. (26), Paris, France, 2018.\n\nFormerly: scil_compute_divide.py", + "help": "usage: scil_btensor_metrics.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals\n IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS\n [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5}\n [{0,1,-0.5,0.5} ...] [--mask MASK]\n [--tolerance tol] [--skip_b0_check]\n [--fit_iters FIT_ITERS]\n [--random_iters RANDOM_ITERS]\n [--do_weight_bvals] [--do_weight_pa]\n [--do_multiple_s0] [--op OP] [--fa FA]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n [-f] [--not_all] [--md file] [--ufa file]\n [--mk_i file] [--mk_a file] [--mk_t file]\n\nScript to compute microstructure metrics using the DIVIDE method. In order to\noperate, the script needs at leats two different types of b-tensor encodings.\nNote that custom encodings are not yet supported, so that only the linear\ntensor encoding (LTE, b_delta = 1), the planar tensor encoding\n(PTE, b_delta = -0.5), the spherical tensor encoding (STE, b_delta = 0) and\nthe cigar shape tensor encoding (b_delta = 0.5) are available. Moreover, all\nof `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names. Thus, this\nscript outputs the results from the DIVIDE fit or direct derivatives:\nmean diffusivity (MD), isotropic mean kurtosis (mk_i), anisotropic mean\nkurtosis (mk_a), total mean kurtosis (mk_t) and finally micro-FA (uFA).\nSpecific names can be specified using the\nfile flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output. The order parameter can also be computed from the uFA and a\nprecomputed FA, using separate input parameters.\n\n>>> scil_btensor_metrics.py --in_dwis LTE.nii.gz PTE.nii.gz STE.nii.gz\n --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs LTE.bvec PTE.bvec STE.bvec\n --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nIMPORTANT: If the script does not converge to a solution, it is probably due to\nnoise outside the brain. Thus, it is strongly recommanded to provide a brain\nmask with --mask.\n\nBased on Markus Nilsson, Filip Szczepankiewicz, Bj\u00f6rn Lampinen, Andr\u00e9 Ahlgren,\nJo\u00e3o P. de Almeida Martins, Samo Lasic, Carl-Fredrik Westin,\nand Daniel Topgaard. An open-source framework for analysis of multidimensional\ndiffusion MRI data implemented in MATLAB.\nProc. Intl. Soc. Mag. Reson. Med. (26), Paris, France, 2018.\n\nFormerly: scil_compute_divide.py\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n Path to the input diffusion volume for each b-tensor encoding type.\n --in_bvals IN_BVALS [IN_BVALS ...]\n Path to the bval file, in FSL format, for each b-tensor encoding type.\n --in_bvecs IN_BVECS [IN_BVECS ...]\n Path to the bvec file, in FSL format, for each b-tensor encoding type.\n --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...]\n Value of b_delta for each b-tensor encoding type, in the same order as dwi, bval and bvec inputs.\n --mask MASK Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --fit_iters FIT_ITERS\n The number of time the gamma fit will be done [1]\n --random_iters RANDOM_ITERS\n The number of iterations for the initial parameters search. [50]\n --do_weight_bvals If set, does not do a weighting on the bvalues in the gamma fit.\n --do_weight_pa If set, does not do a powder averaging weighting in the gamma fit.\n --do_multiple_s0 If set, does not take into account multiple baseline signals.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --not_all If set, only saves the files specified using the file flags. (Default: False)\n\nOrder parameter (OP):\n --op OP Output filename for the order parameter. The OP will not be output if this is not given. Computation of the OP also requires a precomputed FA map (given using --fa).\n --fa FA Path to a FA map. Needed for calculating the OP.\n\nFile flags:\n --md file Output filename for the MD.\n --ufa file Output filename for the microscopic FA.\n --mk_i file Output filename for the isotropic mean kurtosis.\n --mk_a file Output filename for the anisotropic mean kurtosis.\n --mk_t file Output filename for the total mean kurtosis.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_clean_qbx_clusters", + "docstring": "Render clusters sequentially to either accept or reject them based on\nvisual inspection. Useful for cleaning bundles for RBx, BST or for figures.\nThe VTK window does not handle well opacity of streamlines, this is a\nnormal rendering behavior.\nOften use in pair with scil_tractogram_qbx.py.\n\nKey mapping:\n- a/A: accept displayed clusters\n- r/R: reject displayed clusters\n- z/Z: Rewing one element\n- c/C: Stop rendering of the background concatenation of streamlines\n- q/Q: Early window exist, everything remaining will be rejected", + "help": "usage: scil_bundle_clean_qbx_clusters.py [-h]\n [--out_accepted_dir OUT_ACCEPTED_DIR]\n [--out_rejected_dir OUT_REJECTED_DIR]\n [--min_cluster_size MIN_CLUSTER_SIZE]\n [--background_opacity BACKGROUND_OPACITY]\n [--background_linewidth BACKGROUND_LINEWIDTH]\n [--clusters_linewidth CLUSTERS_LINEWIDTH]\n [--reference REFERENCE]\n [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...]\n out_accepted out_rejected\n\n Render clusters sequentially to either accept or reject them based on\n visual inspection. Useful for cleaning bundles for RBx, BST or for figures.\n The VTK window does not handle well opacity of streamlines, this is a\n normal rendering behavior.\n Often use in pair with scil_tractogram_qbx.py.\n\n Key mapping:\n - a/A: accept displayed clusters\n - r/R: reject displayed clusters\n - z/Z: Rewing one element\n - c/C: Stop rendering of the background concatenation of streamlines\n - q/Q: Early window exist, everything remaining will be rejected\n\npositional arguments:\n in_bundles List of the clusters filename.\n out_accepted Filename of the concatenated accepted clusters.\n out_rejected Filename of the concatenated rejected clusters.\n\noptions:\n -h, --help show this help message and exit\n --out_accepted_dir OUT_ACCEPTED_DIR\n Directory to save all accepted clusters separately.\n --out_rejected_dir OUT_REJECTED_DIR\n Directory to save all rejected clusters separately.\n --min_cluster_size MIN_CLUSTER_SIZE\n Minimum cluster size for consideration [1].Must be at least 1.\n --background_opacity BACKGROUND_OPACITY\n Opacity of the background streamlines.Keep low between 0 and 0.5 [0.1].\n --background_linewidth BACKGROUND_LINEWIDTH\n Linewidth of the background streamlines [1].\n --clusters_linewidth CLUSTERS_LINEWIDTH\n Linewidth of the current cluster [1].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_compute_centroid", + "docstring": "Compute a single bundle centroid, using an 'infinite' QuickBundles threshold.\n\nFormerly: scil_compute_centroid.py", + "help": "usage: scil_bundle_compute_centroid.py [-h] [--nb_points NB_POINTS]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_centroid\n\nCompute a single bundle centroid, using an 'infinite' QuickBundles threshold.\n\nFormerly: scil_compute_centroid.py\n\npositional arguments:\n in_bundle Fiber bundle file.\n out_centroid Output centroid streamline filename.\n\noptions:\n -h, --help show this help message and exit\n --nb_points NB_POINTS\n Number of points defining the centroid streamline[20].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_compute_endpoints_map", + "docstring": "Computes the endpoint map of a bundle. The endpoint map is simply a count of\nthe number of streamlines that start or end in each voxel.\n\nThe idea is to estimate the cortical area affected by the bundle (assuming\nstreamlines start/end in the cortex).\n\nNote: If the streamlines are not ordered the head/tail are random and not\nreally two coherent groups. Use the following script to order streamlines:\nscil_tractogram_uniformize_endpoints.py\n\nFormerly: scil_compute_endpoints_map.py", + "help": "usage: scil_bundle_compute_endpoints_map.py [-h] [--swap] [--binary]\n [--nb_points NB_POINTS]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle endpoints_map_head\n endpoints_map_tail\n\nComputes the endpoint map of a bundle. The endpoint map is simply a count of\nthe number of streamlines that start or end in each voxel.\n\nThe idea is to estimate the cortical area affected by the bundle (assuming\nstreamlines start/end in the cortex).\n\nNote: If the streamlines are not ordered the head/tail are random and not\nreally two coherent groups. Use the following script to order streamlines:\nscil_tractogram_uniformize_endpoints.py\n\nFormerly: scil_compute_endpoints_map.py\n\npositional arguments:\n in_bundle Fiber bundle filename.\n endpoints_map_head Output endpoints map head filename.\n endpoints_map_tail Output endpoints map tail filename.\n\noptions:\n -h, --help show this help message and exit\n --swap Swap head<->tail convention. Can be useful when the reference is not in RAS.\n --binary Save outputs as a binary mask instead of a heat map.\n --nb_points NB_POINTS\n Number of points to consider at the extremities of the streamlines. [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_diameter", + "docstring": "Script to estimate the diameter of bundle(s) along their length.\nSee also scil_bundle_shape_measures.py, which prints a quick estimate of\nthe diameter (volume / length). The computation here is more complex and done\nfor each section of the bundle.\n\nThe script expects:\n- bundles with coherent endpoints from scil_tractogram_uniformize_endpoints.py\n- labels maps with around 5-50 points scil_bundle_label_map.py\n <5 is not enough, high risk of bad fit\n >50 is too much, high risk of bad fit\n- bundles that are close to a tube\n without major fanning in a single axis\n fanning is in 2 directions (uniform dispersion) good approximation\n\nThe scripts prints a JSON file with mean/std to be compatible with tractometry.\nWARNING: STD is in fact an ERROR measure from the fit and NOT an STD.\n\nSince the estimation and fit quality is not always intuitive for some bundles\nand the tube with varying diameter is not easy to color/visualize,\nthe script comes with its own VTK rendering to allow exploration of the data.\n(optional).\n\nFormerly: scil_estimate_bundles_diameter.py", + "help": "usage: scil_bundle_diameter.py [-h]\n [--fitting_func {lin_up,lin_down,exp,inv,log}]\n [--show_rendering | --save_rendering OUT_FOLDER]\n [--wireframe] [--error_coloring]\n [--width WIDTH] [--opacity OPACITY]\n [--win_dims WIDTH HEIGHT] [--background R G B]\n [--reference REFERENCE] [--indent INDENT]\n [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...] in_labels\n [in_labels ...]\n\nScript to estimate the diameter of bundle(s) along their length.\nSee also scil_bundle_shape_measures.py, which prints a quick estimate of\nthe diameter (volume / length). The computation here is more complex and done\nfor each section of the bundle.\n\nThe script expects:\n- bundles with coherent endpoints from scil_tractogram_uniformize_endpoints.py\n- labels maps with around 5-50 points scil_bundle_label_map.py\n <5 is not enough, high risk of bad fit\n >50 is too much, high risk of bad fit\n- bundles that are close to a tube\n without major fanning in a single axis\n fanning is in 2 directions (uniform dispersion) good approximation\n\nThe scripts prints a JSON file with mean/std to be compatible with tractometry.\nWARNING: STD is in fact an ERROR measure from the fit and NOT an STD.\n\nSince the estimation and fit quality is not always intuitive for some bundles\nand the tube with varying diameter is not easy to color/visualize,\nthe script comes with its own VTK rendering to allow exploration of the data.\n(optional).\n\nFormerly: scil_estimate_bundles_diameter.py\n\npositional arguments:\n in_bundles List of tractography files.\n in_labels List of labels maps that match the bundles.\n\noptions:\n -h, --help show this help message and exit\n --fitting_func {lin_up,lin_down,exp,inv,log}\n Function to weigh points using their distance.\n [Default: None]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nVisualization options:\n --show_rendering Display VTK window (optional).\n --save_rendering OUT_FOLDER\n Save VTK render in the specified folder (optional)\n --wireframe Use wireframe for the tube rendering.\n --error_coloring Use the fitting error to color the tube.\n --width WIDTH Width of tubes or lines representing streamlines\n [Default: 0.2]\n --opacity OPACITY Opacity for the streamlines rendered with the tube.\n [Default: 0.2]\n --win_dims WIDTH HEIGHT\n The dimensions for the vtk window. [(1920, 1080)]\n --background R G B RBG values [0, 255] of the color of the background.\n [Default: [1, 1, 1]]\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_filter_by_occurence", + "docstring": "Use multiple versions of a same bundle and detect the most probable voxels by\nusing a threshold on the occurence, voxel-wise. With threshold 0.5, this is\na majority vote. This is useful to generate an average representation from\nbundles of a given population.\n\nIf streamlines originate from the same tractogram (ex, to compare various\nbundle clustering techniques), streamline-wise vote is available to find the\nstreamlines most often included in the bundle.\n\nFormerly: scil_perform_majority_vote.py", + "help": "usage: scil_bundle_filter_by_occurence.py [-h] [--ratio_voxels [RATIO_VOXELS]]\n [--ratio_streamlines [RATIO_STREAMLINES]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...]\n output_prefix\n\nUse multiple versions of a same bundle and detect the most probable voxels by\nusing a threshold on the occurence, voxel-wise. With threshold 0.5, this is\na majority vote. This is useful to generate an average representation from\nbundles of a given population.\n\nIf streamlines originate from the same tractogram (ex, to compare various\nbundle clustering techniques), streamline-wise vote is available to find the\nstreamlines most often included in the bundle.\n\nFormerly: scil_perform_majority_vote.py\n\npositional arguments:\n in_bundles Input bundles filename(s). All tractograms must have identical headers.\n output_prefix Output prefix. Ex: my_path/voting_. The suffixes will be: streamlines.trk and voxels.nii.gz\n\noptions:\n -h, --help show this help message and exit\n --ratio_voxels [RATIO_VOXELS]\n Threshold on the ratio of bundles with at least one streamine in a \n given voxel to consider it as part of the 'gold standard'. Default if set: 0.5.\n --ratio_streamlines [RATIO_STREAMLINES]\n If all bundles come from the same tractogram, use this to generate \n a voting for streamlines too. The associated value is the threshold on the ratio of \n bundles including the streamline to consider it as part of the 'gold standard'. [0.5]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_generate_priors", + "docstring": "Generation of priors and enhanced-FOD from an example/template bundle.\nThe bundle must have been cleaned thorougly before use. The E-FOD can then\nbe used for bundle-specific tractography, but not for FOD metrics.\n\nFormerly: scil_generate_priors_from_bundle.py", + "help": "usage: scil_bundle_generate_priors.py [-h]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--todi_sigma {0,1,2,3,4}]\n [--sf_threshold SF_THRESHOLD]\n [--out_prefix OUT_PREFIX]\n [--out_dir OUT_DIR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle in_fodf in_mask\n\nGeneration of priors and enhanced-FOD from an example/template bundle.\nThe bundle must have been cleaned thorougly before use. The E-FOD can then\nbe used for bundle-specific tractography, but not for FOD metrics.\n\nFormerly: scil_generate_priors_from_bundle.py\n\npositional arguments:\n in_bundle Input bundle filename.\n in_fodf Input FOD filename.\n in_mask Mask to constrain the TODI spatial smoothing,\n for example a WM mask.\n\noptions:\n -h, --help show this help message and exit\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --todi_sigma {0,1,2,3,4}\n Smooth the orientation histogram.\n --sf_threshold SF_THRESHOLD\n Relative threshold for sf masking (0.0-1.0).\n --out_prefix OUT_PREFIX\n Add a prefix to all output filename, \n default is no prefix.\n --out_dir OUT_DIR Output directory for all generated files,\n default is current directory.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Rheault, Francois, et al. \"Bundle-specific tractography with\n incorporated anatomical and orientational priors.\"\n NeuroImage 186 (2019): 382-398\n \n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_label_map", + "docstring": "Compute the label image (Nifti) from a centroid and tractograms (all\nrepresenting the same bundle). The label image represents the coverage of\nthe bundle, segmented into regions labelled from 0 to --nb_pts, starting from\nthe head, ending in the tail.\n\nEach voxel will have the label of its nearest centroid point.\n\nThe number of labels will be the same as the centroid's number of points.\n\nFormerly: scil_compute_bundle_voxel_label_map.py", + "help": "usage: scil_bundle_label_map.py [-h] [--nb_pts NB_PTS] [--colormap COLORMAP]\n [--new_labelling] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...] in_centroid\n out_dir\n\nCompute the label image (Nifti) from a centroid and tractograms (all\nrepresenting the same bundle). The label image represents the coverage of\nthe bundle, segmented into regions labelled from 0 to --nb_pts, starting from\nthe head, ending in the tail.\n\nEach voxel will have the label of its nearest centroid point.\n\nThe number of labels will be the same as the centroid's number of points.\n\nFormerly: scil_compute_bundle_voxel_label_map.py\n\npositional arguments:\n in_bundles Fiber bundle file.\n in_centroid Centroid streamline corresponding to bundle.\n out_dir Directory to save all mapping and coloring files:\n - correlation_map.nii.gz\n - session_x/labels_map.nii.gz\n - session_x/distance_map.nii.gz\n - session_x/correlation_map.nii.gz\n - session_x/labels.trk\n - session_x/distance.trk\n - session_x/correlation.trk\n Where session_x is numbered with each bundle.\n\noptions:\n -h, --help show this help message and exit\n --nb_pts NB_PTS Number of divisions for the bundles.\n Default is the number of points of the centroid.\n --colormap COLORMAP Select the colormap for colored trk (data_per_point) [jet].\n --new_labelling Use the new labelling method (multi-centroids).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_mean_fixel_afd", + "docstring": "Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps along a bundle.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof the bundle provided, averaged at every voxel.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_fixel_afd_from_bundles.py", + "help": "usage: scil_bundle_mean_fixel_afd.py [-h] [--length_weighting]\n [--reference REFERENCE]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle in_fodf afd_mean_map\n\nCompute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps along a bundle.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof the bundle provided, averaged at every voxel.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_fixel_afd_from_bundles.py\n\npositional arguments:\n in_bundle Path of the bundle file.\n in_fodf Path of the fODF volume in spherical harmonics (SH).\n afd_mean_map Path of the output mean AFD map.\n\noptions:\n -h, --help show this help message and exit\n --length_weighting If set, will weigh the AFD values according to segment lengths. [False]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReference:\n [1] Raffelt, D., Tournier, JD., Rose, S., Ridgway, GR., Henderson, R.,\n Crozier, S., Salvado, O., & Connelly, A. (2012).\n Apparent Fibre Density: a novel measure for the analysis of\n diffusion-weighted magnetic resonance images. NeuroImage, 59(4),\n 3976--3994.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_mean_fixel_afd_from_hdf5", + "docstring": "Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps for every connections within a hdf5 (.h5) file.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof each connection, averaged at every voxel.\n\nPlease use a hdf5 (.h5) file containing decomposed connections\n\nFormerly: scil_compute_fixel_afd_from_hdf5.py", + "help": "usage: scil_bundle_mean_fixel_afd_from_hdf5.py [-h] [--length_weighting]\n [--processes NBR]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_hdf5 in_fodf out_hdf5\n\nCompute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps for every connections within a hdf5 (.h5) file.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof each connection, averaged at every voxel.\n\nPlease use a hdf5 (.h5) file containing decomposed connections\n\nFormerly: scil_compute_fixel_afd_from_hdf5.py\n\npositional arguments:\n in_hdf5 HDF5 filename (.h5) containing decomposed connections.\n in_fodf Path of the fODF volume in spherical harmonics (SH).\n out_hdf5 Path of the output HDF5 filenames (.h5).\n\noptions:\n -h, --help show this help message and exit\n --length_weighting If set, will weigh the AFD values according to segment lengths. [False]\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReference:\n [1] Raffelt, D., Tournier, JD., Rose, S., Ridgway, GR., Henderson, R.,\n Crozier, S., Salvado, O., & Connelly, A. (2012).\n Apparent Fibre Density: a novel measure for the analysis of\n diffusion-weighted magnetic resonance images. NeuroImage,\n 59(4), 3976--3994.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_mean_fixel_bingham_metric", + "docstring": "Given a bundle and Bingham coefficients, compute the average Bingham\nmetric at each voxel intersected by the bundle. Intersected voxels are\nfound by computing the intersection between the voxel grid and each streamline\nin the input tractogram.\n\nThis script behaves like scil_compute_mean_fixel_afd_from_bundles.py for fODFs,\nbut here for Bingham distributions. These add the unique possibility to capture\nfixel-based fiber spread (FS) and fiber fraction (FF). FD from the bingham\nshould be \"equivalent\" to the AFD_fixel we are used to.\n\nBingham coefficients volume must come from scil_fodf_to_bingham.py\nand Bingham metrics comes from scil_bingham_metrics.py.\n\nBingham metrics are extracted from Bingham distributions fitted to fODF. There\nare as many values per voxel as there are lobes extracted. The values chosen\nfor a given voxelis the one belonging to the lobe better aligned with the\ncurrent streamline segment.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_mean_fixel_obe_metric_from_bundles.py", + "help": "usage: scil_bundle_mean_fixel_bingham_metric.py [-h] [--length_weighting]\n [--max_theta MAX_THETA]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bundle in_bingham\n in_bingham_metric out_mean_map\n\nGiven a bundle and Bingham coefficients, compute the average Bingham\nmetric at each voxel intersected by the bundle. Intersected voxels are\nfound by computing the intersection between the voxel grid and each streamline\nin the input tractogram.\n\nThis script behaves like scil_compute_mean_fixel_afd_from_bundles.py for fODFs,\nbut here for Bingham distributions. These add the unique possibility to capture\nfixel-based fiber spread (FS) and fiber fraction (FF). FD from the bingham\nshould be \"equivalent\" to the AFD_fixel we are used to.\n\nBingham coefficients volume must come from scil_fodf_to_bingham.py\nand Bingham metrics comes from scil_bingham_metrics.py.\n\nBingham metrics are extracted from Bingham distributions fitted to fODF. There\nare as many values per voxel as there are lobes extracted. The values chosen\nfor a given voxelis the one belonging to the lobe better aligned with the\ncurrent streamline segment.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_mean_fixel_obe_metric_from_bundles.py\n\npositional arguments:\n in_bundle Path of the bundle file.\n in_bingham Path of the Bingham volume.\n in_bingham_metric Path of the Bingham metric (FD, FS, or FF) volume.\n out_mean_map Path of the output mean map.\n\noptions:\n -h, --help show this help message and exit\n --length_weighting If set, will weigh the FD values according to segment lengths.\n --max_theta MAX_THETA\n Maximum angle (in degrees) condition on lobe alignment. [60]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_mean_std", + "docstring": "Compute mean and std for each metric.\n\n- Default: For the whole bundle. This is achieved by averaging the metric\n values of all voxels occupied by the bundle.\n- Option --per_point: For all streamlines points in the bundle for each metric\n combination, along the bundle, i.e. for each point.\n **To create label_map and distance_map, see\n scil_bundle_label_map.py\n\nDensity weighting modifies the contribution of voxel with lower/higher\nstreamline count to reduce influence of spurious streamlines.\n\nFormerly: scil_compute_bundle_mean_std_per_point.py or\nscil_compute_bundle_mean_std.py", + "help": "usage: scil_bundle_mean_std.py [-h] [--per_point in_labels | --include_dps]\n [--density_weighting]\n [--distance_weighting DISTANCE_NII]\n [--correlation_weighting CORRELATION_NII]\n [--out_json OUT_JSON] [--reference REFERENCE]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n in_bundle in_metrics [in_metrics ...]\n\nCompute mean and std for each metric.\n\n- Default: For the whole bundle. This is achieved by averaging the metric\n values of all voxels occupied by the bundle.\n- Option --per_point: For all streamlines points in the bundle for each metric\n combination, along the bundle, i.e. for each point.\n **To create label_map and distance_map, see\n scil_bundle_label_map.py\n\nDensity weighting modifies the contribution of voxel with lower/higher\nstreamline count to reduce influence of spurious streamlines.\n\nFormerly: scil_compute_bundle_mean_std_per_point.py or\nscil_compute_bundle_mean_std.py\n\npositional arguments:\n in_bundle Fiber bundle file to compute statistics on.\n in_metrics Nifti file to compute statistics on. Probably some tractometry measure(s) such as FA, MD, RD, ...\n\noptions:\n -h, --help show this help message and exit\n --per_point in_labels\n If set, computes the metrics per point instead of on the whole bundle.\n You must then give the label map (.nii.gz) of the corresponding fiber bundle.\n --include_dps Save values from data_per_streamline.\n Currently not offered with option --per_point.\n --density_weighting If set, weights statistics by the number of fibers passing through each voxel.\n --distance_weighting DISTANCE_NII\n If set, weights statistics by the inverse of the distance between a streamline and the centroid.\n --correlation_weighting CORRELATION_NII\n If set, weight statistics by the correlation strength between longitudinal data.\n --out_json OUT_JSON Path of the output file. If not given, the output is simply printed on screen.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_pairwise_comparison", + "docstring": "Evaluate pair-wise similarity measures of bundles.\nAll tractograms must be in the same space (aligned to one reference).\n\nFor the voxel representation, the computed similarity measures are:\n bundle_adjacency_voxels, dice_voxels, w_dice_voxels, density_correlation\n volume_overlap, volume_overreach\nThe same measures are also evluated for the endpoints.\n\nFor the streamline representation, the computed similarity measures are:\n bundle_adjacency_streamlines, dice_streamlines, streamlines_count_overlap,\n streamlines_count_overreach\n\nFormerly: scil_evaluate_bundles_pairwise_agreement_measures.py", + "help": "usage: scil_bundle_pairwise_comparison.py [-h] [--streamline_dice]\n [--bundle_adjency_no_overlap]\n [--disable_streamline_distance]\n [--single_compare SINGLE_COMPARE]\n [--keep_tmp] [--ratio]\n [--processes NBR]\n [--reference REFERENCE]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...] out_json\n\nEvaluate pair-wise similarity measures of bundles.\nAll tractograms must be in the same space (aligned to one reference).\n\nFor the voxel representation, the computed similarity measures are:\n bundle_adjacency_voxels, dice_voxels, w_dice_voxels, density_correlation\n volume_overlap, volume_overreach\nThe same measures are also evluated for the endpoints.\n\nFor the streamline representation, the computed similarity measures are:\n bundle_adjacency_streamlines, dice_streamlines, streamlines_count_overlap,\n streamlines_count_overreach\n\nFormerly: scil_evaluate_bundles_pairwise_agreement_measures.py\n\npositional arguments:\n in_bundles Path of the input bundles.\n out_json Path of the output json file.\n\noptions:\n -h, --help show this help message and exit\n --streamline_dice Compute streamline-wise dice coefficient.\n Tractograms must be identical [False].\n --bundle_adjency_no_overlap\n If set, do not count zeros in the average BA.\n --disable_streamline_distance\n Will not compute the streamlines distance \n [False].\n --single_compare SINGLE_COMPARE\n Compare inputs to this single file.\n --keep_tmp Will not delete the tmp folder at the end.\n --ratio Compute overlap and overreach as a ratio over the\n reference tractogram in a Tractometer-style way.\n Can only be used if also using the `single_compare` option.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_reject_outliers", + "docstring": "Clean a bundle (inliers/outliers) using hiearchical clustering.\nhttp://archive.ismrm.org/2015/2844.html\n\nIf spurious streamlines are dense, it is possible they will not be recognized\nas outliers. Manual cleaning may be required to overcome this limitation.", + "help": "usage: scil_bundle_reject_outliers.py [-h]\n [--remaining_bundle REMAINING_BUNDLE]\n [--alpha ALPHA] [--display_counts]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_bundle\n\nClean a bundle (inliers/outliers) using hiearchical clustering.\nhttp://archive.ismrm.org/2015/2844.html\n\nIf spurious streamlines are dense, it is possible they will not be recognized\nas outliers. Manual cleaning may be required to overcome this limitation.\n\npositional arguments:\n in_bundle Fiber bundle file to remove outliers from.\n out_bundle Fiber bundle without outliers.\n\noptions:\n -h, --help show this help message and exit\n --remaining_bundle REMAINING_BUNDLE\n Removed outliers.\n --alpha ALPHA Percent of the length of the tree that clusters of individual streamlines will be pruned. [0.6]\n --display_counts Print streamline count before and after filtering\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_score_many_bundles_one_tractogram", + "docstring": "This script is intended to score all bundles from a single tractogram. Each\nvalid bundle is compared to its ground truth.\nEx: It was used for the ISMRM 2015 Challenge scoring.\n\nSee also scil_bundle_score_same_bundle_many_segmentations.py to score many\nversions of a same bundle, compared to ONE ground truth / gold standard.\n\nThis script is the second part of script scil_score_tractogram, which also\nsegments the wholebrain tractogram into bundles first.\n\nHere we suppose that the bundles are already segmented and saved as follows:\n main_dir/\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (optional)\n segmented_WPC/*_wpc.trk (optional)\n IS.trk OR NC.trk (if segmented_IB is present)\n\nConfig file\n-----------\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n }\n}\n\nFormerly: scil_score_bundles.py", + "help": "usage: scil_bundle_score_many_bundles_one_tractogram.py [-h] [--json_prefix p]\n [--gt_dir DIR]\n [--indent INDENT]\n [--sort_keys]\n [--reference REFERENCE]\n [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n gt_config bundles_dir\n\nThis script is intended to score all bundles from a single tractogram. Each\nvalid bundle is compared to its ground truth.\nEx: It was used for the ISMRM 2015 Challenge scoring.\n\nSee also scil_bundle_score_same_bundle_many_segmentations.py to score many\nversions of a same bundle, compared to ONE ground truth / gold standard.\n\nThis script is the second part of script scil_score_tractogram, which also\nsegments the wholebrain tractogram into bundles first.\n\nHere we suppose that the bundles are already segmented and saved as follows:\n main_dir/\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (optional)\n segmented_WPC/*_wpc.trk (optional)\n IS.trk OR NC.trk (if segmented_IB is present)\n\nConfig file\n-----------\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n }\n}\n\nFormerly: scil_score_bundles.py\n\nTractometry\n-----------\nGlobal connectivity metrics:\n\n- Computed by default:\n - VS: valid streamlines, belonging to a bundle (i.e. respecting all the\n criteria for that bundle; endpoints, limit_mask, gt_mask.).\n - IS: invalid streamlines. All other streamlines. IS = IC + NC.\n\n- Optional:\n - WPC: wrong path connections, streamlines connecting correct ROIs but not\n respecting the other criteria for that bundle. Such streamlines always\n exist but they are only saved separately if specified in the options.\n Else, they are merged back with the IS.\n By definition. WPC are only computed if \"limits masks\" are provided.\n - IC: invalid connections, streamlines joining an incorrect combination of\n ROIs. Use carefully, quality depends on the quality of your ROIs and no\n analysis is done on the shape of the streamlines.\n - NC: no connections. Invalid streamlines minus invalid connections.\n\n- Fidelity metrics:\n - OL: Overlap. Percentage of ground truth voxels containing streamline(s)\n for a given bundle.\n - OR: Overreach. Amount of voxels containing streamline(s) when they\n shouldn't, for a given bundle. We compute two versions :\n OR_pct_vs = divided by the total number of voxel covered by the bundle.\n (percentage of the voxels touched by VS).\n Values range between 0 and 100%. Values are not defined when we\n recovered no streamline for a bundle, but we set the OR_pct_vs to 0\n in that case.\n OR_pct_gt = divided by the total size of the ground truth bundle mask.\n Values could be higher than 100%.\n - f1 score: which is the same as the Dice score.\n\npositional arguments:\n gt_config .json dict configured as specified above.\n bundles_dir Directory containing all bundles.\n (Ex: Output directory for scil_score_tractogram).\n It is expected to contain a file IS.trk and \n files segmented_VB/*_VS.trk, with, possibly, files \n segmented_WPC/*_wpc.trk and segmented_IC/\n\noptions:\n -h, --help show this help message and exit\n --json_prefix p Prefix of the output json file. Ex: 'study_x_'.\n Suffix will be results.json. File will be saved inside bundles_dir.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nAdditions to gt_config:\n --gt_dir DIR Root path of the ground truth files listed in the gt_config.\n If not set, filenames in the config file are considered \n as absolute paths.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_score_same_bundle_many_segmentations", + "docstring": "This script is intended to score many versions of a same bundle, compared to\nONE ground truth / gold standard.\n\nSee also scil_bundle_score_many_bundles_one_tractogram.py to score all bundles\nfrom a single tractogram by comparing each valid bundle to its ground truth.\n\nAll tractograms must be in the same space (aligned to one reference).\nThe measures can be applied to a voxel-wise or streamline-wise representation.\n\nA gold standard must be provided for the desired representation.\nA gold standard would be a segmentation from an expert or a group of experts.\nIf only the streamline-wise representation is provided without a voxel-wise\ngold standard, it will be computed from the provided streamlines.\nAt least one of the two representations is required.\n\nThe gold standard tractogram is the tractogram (whole brain most likely) from\nwhich the segmentation is performed.\nThe gold standard tracking mask is the tracking mask used by the tractography\nalgorighm to generate the gold standard tractogram.\n\nThe computed binary classification measures are:\nsensitivity, specificity, precision, accuracy, dice, kappa, youden for both\nthe streamline and voxel representation (if provided).\n\nFormerly: scil_evaluate_bundles_binary_classification_measures.py", + "help": "usage: scil_bundle_score_same_bundle_many_segmentations.py [-h]\n [--streamlines_measures GOLD_STANDARD_STREAMLINES TRACTOGRAM]\n [--voxels_measures GOLD_STANDARD_MASK TRACKING MASK]\n [--processes NBR]\n [--reference REFERENCE]\n [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bundles\n [in_bundles ...]\n out_json\n\nThis script is intended to score many versions of a same bundle, compared to\nONE ground truth / gold standard.\n\nSee also scil_bundle_score_many_bundles_one_tractogram.py to score all bundles\nfrom a single tractogram by comparing each valid bundle to its ground truth.\n\nAll tractograms must be in the same space (aligned to one reference).\nThe measures can be applied to a voxel-wise or streamline-wise representation.\n\nA gold standard must be provided for the desired representation.\nA gold standard would be a segmentation from an expert or a group of experts.\nIf only the streamline-wise representation is provided without a voxel-wise\ngold standard, it will be computed from the provided streamlines.\nAt least one of the two representations is required.\n\nThe gold standard tractogram is the tractogram (whole brain most likely) from\nwhich the segmentation is performed.\nThe gold standard tracking mask is the tracking mask used by the tractography\nalgorighm to generate the gold standard tractogram.\n\nThe computed binary classification measures are:\nsensitivity, specificity, precision, accuracy, dice, kappa, youden for both\nthe streamline and voxel representation (if provided).\n\nFormerly: scil_evaluate_bundles_binary_classification_measures.py\n\npositional arguments:\n in_bundles Path of the input bundles.\n out_json Path of the output json.\n\noptions:\n -h, --help show this help message and exit\n --streamlines_measures GOLD_STANDARD_STREAMLINES TRACTOGRAM\n The gold standard bundle and the original tractogram.\n --voxels_measures GOLD_STANDARD_MASK TRACKING MASK\n The gold standard mask and the original tracking mask.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_shape_measures", + "docstring": "Evaluate basic measurements of bundle(s).\n\nThe computed measures are:\n - volume_info: volume, volume_endpoints\n - streamlines_info: streamlines_count, avg_length (in mm or in number of\n point), average step size, min_length, max_length.\n ** You may also get this information with scil_tractogram_print_info.py.\n - shape_info: span, curl, diameter, elongation, surface area,\n irregularity, end surface area, radius, end surface irregularity,\n mean_curvature, fractal dimension.\n ** The diameter, here, is a simple estimation using volume / length.\n For a more complex calculation, see scil_bundle_diameter.py.\n\nWith more than one bundle, the measures are averaged over bundles. All\ntractograms must be in the same space.\n\nThe set average contains the average measures of all input bundles. The\nmeasures that are dependent on the streamline count are weighted by the number\nof streamlines of each bundle. Each of these average measure is computed by\nfirst summing the multiple of a measure and the streamline count of each\nbundle and divide the sum by the total number of streamlines. Thus, measures\nincluding length and span are essentially averages of all the streamlines.\nOther streamline-related set measure are computed with other set averages.\nWhereas bundle-related measures are computed as an average of all bundles.\nThese measures include volume and surface area.\n\nThe fractal dimension is dependent on the voxel size and the number of voxels.\nIf data comparison is performed, the bundles MUST be in same resolution.\n\nFormerly: scil_compute_bundle_volume.py or\nscil_evaluate_bundles_individual_measures.py", + "help": "usage: scil_bundle_shape_measures.py [-h] [--out_json OUT_JSON]\n [--group_statistics] [--no_uniformize]\n [--reference REFERENCE] [--processes NBR]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...]\n\nEvaluate basic measurements of bundle(s).\n\nThe computed measures are:\n - volume_info: volume, volume_endpoints\n - streamlines_info: streamlines_count, avg_length (in mm or in number of\n point), average step size, min_length, max_length.\n ** You may also get this information with scil_tractogram_print_info.py.\n - shape_info: span, curl, diameter, elongation, surface area,\n irregularity, end surface area, radius, end surface irregularity,\n mean_curvature, fractal dimension.\n ** The diameter, here, is a simple estimation using volume / length.\n For a more complex calculation, see scil_bundle_diameter.py.\n\nWith more than one bundle, the measures are averaged over bundles. All\ntractograms must be in the same space.\n\nThe set average contains the average measures of all input bundles. The\nmeasures that are dependent on the streamline count are weighted by the number\nof streamlines of each bundle. Each of these average measure is computed by\nfirst summing the multiple of a measure and the streamline count of each\nbundle and divide the sum by the total number of streamlines. Thus, measures\nincluding length and span are essentially averages of all the streamlines.\nOther streamline-related set measure are computed with other set averages.\nWhereas bundle-related measures are computed as an average of all bundles.\nThese measures include volume and surface area.\n\nThe fractal dimension is dependent on the voxel size and the number of voxels.\nIf data comparison is performed, the bundles MUST be in same resolution.\n\nFormerly: scil_compute_bundle_volume.py or\nscil_evaluate_bundles_individual_measures.py\n\npositional arguments:\n in_bundles Path of the input bundles.\n\noptions:\n -h, --help show this help message and exit\n --out_json OUT_JSON Path of the output file. If not given, the output is simply printed on screen.\n --group_statistics Show average measures [False].\n --no_uniformize Do NOT automatically uniformize endpoints for theendpoints related metrics.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\nReferences:\n[1] Fang-Cheng Yeh. 2020.\n Shape analysis of the human association pathways. NeuroImage.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_uniformize_endpoints", + "docstring": "Uniformize streamlines' endpoints according to a defined axis.\nUseful for tractometry or models creation.\n\nThe --auto option will automatically calculate the main orientation.\nIf the input bundle is poorly defined, it is possible heuristic will be wrong.\n\nThe default is to flip each streamline so their first point's coordinate in the\ndefined axis is smaller than their last point (--swap does the opposite).\n\nThe --target_roi option will use the barycenter of the target mask to define\nthe axis. The target mask can be a binary mask or an atlas. If an atlas is\nused, labels are expected in the form of --target_roi atlas.nii.gz 2 3 5:7.\n\nFormerly: scil_uniformize_streamlines_endpoints.py", + "help": "usage: scil_bundle_uniformize_endpoints.py [-h]\n (--axis {x,y,z} | --auto | --centroid tractogram | --target_roi TARGET_ROI [TARGET_ROI ...])\n [--swap] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_bundle\n\nUniformize streamlines' endpoints according to a defined axis.\nUseful for tractometry or models creation.\n\nThe --auto option will automatically calculate the main orientation.\nIf the input bundle is poorly defined, it is possible heuristic will be wrong.\n\nThe default is to flip each streamline so their first point's coordinate in the\ndefined axis is smaller than their last point (--swap does the opposite).\n\nThe --target_roi option will use the barycenter of the target mask to define\nthe axis. The target mask can be a binary mask or an atlas. If an atlas is\nused, labels are expected in the form of --target_roi atlas.nii.gz 2 3 5:7.\n\nFormerly: scil_uniformize_streamlines_endpoints.py\n\npositional arguments:\n in_bundle Input path of the tractography file.\n out_bundle Output path of the uniformized file.\n\noptions:\n -h, --help show this help message and exit\n --axis {x,y,z} Match endpoints of the streamlines along this axis.\n SUGGESTION: Commissural = x, Association = y, Projection = z\n --auto Match endpoints of the streamlines along an automatically determined axis.\n --centroid tractogram\n Match endpoints of the streamlines to align it to a reference unique streamline (centroid).\n --target_roi TARGET_ROI [TARGET_ROI ...]\n Provide a target ROI: either a binary mask or a label map and the labels to use.\n Will align heads to be closest to the mask barycenter.\n (atlas: if no labels are provided, all labels will be used.\n --swap Swap head <-> tail convention. Can be useful when the reference is not in RAS.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_bundle_volume_per_label", + "docstring": "Compute bundle volume per label in mm3. This script supports anisotropic voxels\nresolution. Volume is estimated by counting the number of voxel occupied by\neach label and multiplying it by the volume of a single voxel.\n\nThe labels can be obtained by scil_bundle_label_map.py.\n\nThis estimation is typically performed at resolution around 1mm3.\n\nTo get the volume and other measures directly from the (whole) bundle, use\nscil_bundle_shape_measures.py.\n\nFormerly: scil_compute_bundle_volume_per_label.py", + "help": "usage: scil_bundle_volume_per_label.py [-h] [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n voxel_label_map bundle_name\n\nCompute bundle volume per label in mm3. This script supports anisotropic voxels\nresolution. Volume is estimated by counting the number of voxel occupied by\neach label and multiplying it by the volume of a single voxel.\n\nThe labels can be obtained by scil_bundle_label_map.py.\n\nThis estimation is typically performed at resolution around 1mm3.\n\nTo get the volume and other measures directly from the (whole) bundle, use\nscil_bundle_shape_measures.py.\n\nFormerly: scil_compute_bundle_volume_per_label.py\n\npositional arguments:\n voxel_label_map Fiber bundle file.\n bundle_name Bundle name.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_connectivity_compare_populations", + "docstring": "Performs a network-based statistical comparison for populations g1 and g2. The\noutput is a matrix of the same size as the input connectivity matrices, with\np-values at each edge.\nAll input matrices must have the same shape (NxN). For paired t-test, both\ngroups must have the same number of observations.\n\nFor example, if you have streamline count weighted matrices for a MCI and a\ncontrol group and you want to investiguate differences in their connectomes:\n >>> scil_connectivity_compare_populations.py pval.npy\n --g1 MCI/*_sc.npy --g2 CTL/*_sc.npy\n\n--filtering_mask will simply multiply the binary mask to all input\nmatrices before performing the statistical comparison. Reduces the number of\nstatistical tests, useful when using --fdr or --bonferroni.\n\nFormerly: scil_compare_connectivity.py", + "help": "usage: scil_connectivity_compare_populations.py [-h] --in_g1 IN_G1 [IN_G1 ...]\n --in_g2 IN_G2 [IN_G2 ...]\n [--tail {left,right,both}]\n [--paired]\n [--fdr | --bonferroni]\n [--p_threshold THRESH OUT_FILE]\n [--filtering_mask FILTERING_MASK]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n out_pval_matrix\n\nPerforms a network-based statistical comparison for populations g1 and g2. The\noutput is a matrix of the same size as the input connectivity matrices, with\np-values at each edge.\nAll input matrices must have the same shape (NxN). For paired t-test, both\ngroups must have the same number of observations.\n\nFor example, if you have streamline count weighted matrices for a MCI and a\ncontrol group and you want to investiguate differences in their connectomes:\n >>> scil_connectivity_compare_populations.py pval.npy\n --g1 MCI/*_sc.npy --g2 CTL/*_sc.npy\n\n--filtering_mask will simply multiply the binary mask to all input\nmatrices before performing the statistical comparison. Reduces the number of\nstatistical tests, useful when using --fdr or --bonferroni.\n\nFormerly: scil_compare_connectivity.py\n\npositional arguments:\n out_pval_matrix Output matrix (.npy) containing the edges p-value.\n\noptions:\n -h, --help show this help message and exit\n --in_g1 IN_G1 [IN_G1 ...]\n List of matrices for the first population (.npy).\n --in_g2 IN_G2 [IN_G2 ...]\n List of matrices for the second population (.npy).\n --tail {left,right,both}\n Enables specification of an alternative hypothesis:\n left: mean of g1 < mean of g2,\n right: mean of g2 < mean of g1,\n both: both means are not equal (default).\n --paired Use paired sample t-test instead of population t-test.\n --in_g1 and --in_g2 must be ordered the same way.\n --fdr Perform a false discovery rate (FDR) correction for the p-values.\n Uses the number of non-zero edges as number of tests (value between 0.01 and 0.1).\n --bonferroni Perform a Bonferroni correction for the p-values.\n Uses the number of non-zero edges as number of tests.\n --p_threshold THRESH OUT_FILE\n Threshold the final p-value matrix and save the binary matrix (.npy).\n --filtering_mask FILTERING_MASK\n Binary filtering mask (.npy) to apply before computing the measures.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Rubinov, Mikail, and Olaf Sporns. \"Complex network measures of brain\n connectivity: uses and interpretations.\" Neuroimage 52.3 (2010):\n 1059-1069.\n[2] Zalesky, Andrew, Alex Fornito, and Edward T. Bullmore. \"Network-based\n statistic: identifying differences in brain networks.\" Neuroimage 53.4\n (2010): 1197-1207.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_connectivity_compute_matrices", + "docstring": "This script computes a variety of measures in the form of connectivity\nmatrices. This script is made to follow\nscil_tractogram_segment_bundles_for_connectivity.py and\nuses the same labels list as input.\n\nThe script expects a folder containing all relevants bundles following the\nnaming convention LABEL1_LABEL2.trk and a text file containing the list of\nlabels that should be part of the matrices. The ordering of labels in the\nmatrices will follow the same order as the list.\nThis script only generates matrices in the form of array, does not visualize\nor reorder the labels (node).\n\nThe parameter --similarity expects a folder with density maps\n(LABEL1_LABEL2.nii.gz) following the same naming convention as the input\ndirectory.\nThe bundles should be averaged version in the same space. This will\ncompute the weighted-dice between each node and their homologuous average\nversion.\n\nThe parameters --metrics can be used more than once and expect a map (t1, fa,\netc.) in the same space and each will generate a matrix. The average value in\nthe volume occupied by the bundle will be reported in the matrices nodes.\n\nThe parameters --maps can be used more than once and expect a folder with\npre-computed maps (LABEL1_LABEL2.nii.gz) following the same naming convention\nas the input directory. Each will generate a matrix. The average non-zeros\nvalue in the map will be reported in the matrices nodes.\n\nThe parameters --lesion_load will compute 3 lesion(s) related matrices:\nlesion_count.npy, lesion_vol.npy, lesion_sc.npy and put it inside of a\nspecified folder. They represent the number of lesion, the total volume of\nlesion(s) and the total of streamlines going through the lesion(s) for of each\nconnection. Each connection can be seen as a 'bundle' and then something\nsimilar to scil_analyse_lesion_load.py is run for each 'bundle'.\n\nFormerly: scil_compute_connectivity.py", + "help": "usage: scil_connectivity_compute_matrices.py [-h] [--volume OUT_FILE]\n [--streamline_count OUT_FILE]\n [--length OUT_FILE]\n [--similarity IN_FOLDER OUT_FILE]\n [--maps IN_FOLDER OUT_FILE]\n [--metrics IN_FILE OUT_FILE]\n [--lesion_load IN_FILE OUT_DIR]\n [--min_lesion_vol MIN_LESION_VOL]\n [--density_weighting]\n [--no_self_connection]\n [--include_dps OUT_DIR]\n [--force_labels_list FORCE_LABELS_LIST]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_hdf5 in_labels\n\nThis script computes a variety of measures in the form of connectivity\nmatrices. This script is made to follow\nscil_tractogram_segment_bundles_for_connectivity.py and\nuses the same labels list as input.\n\nThe script expects a folder containing all relevants bundles following the\nnaming convention LABEL1_LABEL2.trk and a text file containing the list of\nlabels that should be part of the matrices. The ordering of labels in the\nmatrices will follow the same order as the list.\nThis script only generates matrices in the form of array, does not visualize\nor reorder the labels (node).\n\nThe parameter --similarity expects a folder with density maps\n(LABEL1_LABEL2.nii.gz) following the same naming convention as the input\ndirectory.\nThe bundles should be averaged version in the same space. This will\ncompute the weighted-dice between each node and their homologuous average\nversion.\n\nThe parameters --metrics can be used more than once and expect a map (t1, fa,\netc.) in the same space and each will generate a matrix. The average value in\nthe volume occupied by the bundle will be reported in the matrices nodes.\n\nThe parameters --maps can be used more than once and expect a folder with\npre-computed maps (LABEL1_LABEL2.nii.gz) following the same naming convention\nas the input directory. Each will generate a matrix. The average non-zeros\nvalue in the map will be reported in the matrices nodes.\n\nThe parameters --lesion_load will compute 3 lesion(s) related matrices:\nlesion_count.npy, lesion_vol.npy, lesion_sc.npy and put it inside of a\nspecified folder. They represent the number of lesion, the total volume of\nlesion(s) and the total of streamlines going through the lesion(s) for of each\nconnection. Each connection can be seen as a 'bundle' and then something\nsimilar to scil_analyse_lesion_load.py is run for each 'bundle'.\n\nFormerly: scil_compute_connectivity.py\n\npositional arguments:\n in_hdf5 Input filename for the hdf5 container (.h5).\n Obtained from scil_tractogram_segment_bundles_for_connectivity.py.\n in_labels Labels file name (nifti).\n This generates a NxN connectivity matrix.\n\noptions:\n -h, --help show this help message and exit\n --volume OUT_FILE Output file for the volume weighted matrix (.npy).\n --streamline_count OUT_FILE\n Output file for the streamline count weighted matrix (.npy).\n --length OUT_FILE Output file for the length weighted matrix (.npy).\n --similarity IN_FOLDER OUT_FILE\n Input folder containing the averaged bundle density\n maps (.nii.gz) and output file for the similarity weighted matrix (.npy).\n --maps IN_FOLDER OUT_FILE\n Input folder containing pre-computed maps (.nii.gz)\n and output file for the weighted matrix (.npy).\n --metrics IN_FILE OUT_FILE\n Input (.nii.gz). and output file (.npy) for a metric weighted matrix.\n --lesion_load IN_FILE OUT_DIR\n Input binary mask (.nii.gz) and output directory for all lesion-related matrices.\n --min_lesion_vol MIN_LESION_VOL\n Minimum lesion volume in mm3 [7].\n --density_weighting Use density-weighting for the metric weightedmatrix.\n --no_self_connection Eliminate the diagonal from the matrices.\n --include_dps OUT_DIR\n Save matrices from data_per_streamline in the output directory.\n COMMIT-related values will be summed instead of averaged.\n Will always overwrite files.\n --force_labels_list FORCE_LABELS_LIST\n Path to a labels list (.txt) in case of missing labels in the atlas.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_connectivity_compute_pca", + "docstring": "Script to compute PCA analysis on diffusion metrics. Output returned is all\nsignificant principal components (e.g. presenting eigenvalues > 1) in a\nconnectivity matrix format. This script can take into account all edges from\nevery subject in a population or only non-zero edges across all subjects.\n\nThe script can take directly as input a connectoflow output folder. Simply use\nthe --input_connectoflow flag. For other type of folder input, the script\nexpects a single folder containing all matrices for all subjects.\nExample:\n [in_folder]\n |--- sub-01_ad.npy\n |--- sub-01_md.npy\n |--- sub-02_ad.npy\n |--- sub-02_md.npy\n |--- ...\n\nThe plots, tables and principal components matrices will be outputted in the\ndesignated folder from the argument. If you want to move back your\nprincipal components matrices in your connectoflow output, you can use a\nsimilar bash command for all principal components:\nfor sub in `cat list_id.txt`;\ndo\n cp out_folder/${sub}_PC1.npy connectoflow_output/$sub/Compute_Connectivity/\ndone\n\nInterpretation of resulting principal components can be done by evaluating the\nloadings values for each metrics. A value near 0 means that this metric doesn't\ncontribute to this specific component whereas high positive or negative values\nmean a larger contribution. Components can then be labeled based on which\nmetric contributes the highest. For example, a principal component showing a\nhigh loading for afd_fixel and near 0 loading for all other metrics can be\ninterpreted as axonal density (see Gagnon et al. 2022 for this specific example\nor ref [3] for an introduction to PCA).\n\nEXAMPLE USAGE:\nscil_connectivity_compute_pca.py input_folder/ output_folder/\n --metrics ad fa md rd [...] --list_ids list_ids.txt", + "help": "usage: scil_connectivity_compute_pca.py [-h] --metrics METRICS [METRICS ...]\n --list_ids FILE [--not_only_common]\n [--input_connectoflow]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_folder out_folder\n\nScript to compute PCA analysis on diffusion metrics. Output returned is all\nsignificant principal components (e.g. presenting eigenvalues > 1) in a\nconnectivity matrix format. This script can take into account all edges from\nevery subject in a population or only non-zero edges across all subjects.\n\nThe script can take directly as input a connectoflow output folder. Simply use\nthe --input_connectoflow flag. For other type of folder input, the script\nexpects a single folder containing all matrices for all subjects.\nExample:\n [in_folder]\n |--- sub-01_ad.npy\n |--- sub-01_md.npy\n |--- sub-02_ad.npy\n |--- sub-02_md.npy\n |--- ...\n\nThe plots, tables and principal components matrices will be outputted in the\ndesignated folder from the argument. If you want to move back your\nprincipal components matrices in your connectoflow output, you can use a\nsimilar bash command for all principal components:\nfor sub in `cat list_id.txt`;\ndo\n cp out_folder/${sub}_PC1.npy connectoflow_output/$sub/Compute_Connectivity/\ndone\n\nInterpretation of resulting principal components can be done by evaluating the\nloadings values for each metrics. A value near 0 means that this metric doesn't\ncontribute to this specific component whereas high positive or negative values\nmean a larger contribution. Components can then be labeled based on which\nmetric contributes the highest. For example, a principal component showing a\nhigh loading for afd_fixel and near 0 loading for all other metrics can be\ninterpreted as axonal density (see Gagnon et al. 2022 for this specific example\nor ref [3] for an introduction to PCA).\n\nEXAMPLE USAGE:\nscil_connectivity_compute_pca.py input_folder/ output_folder/\n --metrics ad fa md rd [...] --list_ids list_ids.txt\n\npositional arguments:\n in_folder Path to the input folder. See explanation above for its expected organization.\n out_folder Path to the output folder to export graphs, tables and principal \n components matrices.\n\noptions:\n -h, --help show this help message and exit\n --metrics METRICS [METRICS ...]\n Suffixes of all metrics to include in PCA analysis (ex: ad md fa rd). \n They must be immediately followed by the .npy extension.\n --list_ids FILE Path to a .txt file containing a list of all ids.\n --not_only_common If true, will include all edges from all subjects and not only \n common edges (Not recommended)\n --input_connectoflow If true, script will assume the input folder is a Connectoflow output.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Chamberland M, Raven EP, Genc S, Duffy K, Descoteaux M, Parker GD, Tax CMW,\n Jones DK. Dimensionality reduction of diffusion MRI measures for improved\n tractometry of the human brain. Neuroimage. 2019 Oct 15;200:89-100.\n doi: 10.1016/j.neuroimage.2019.06.020. Epub 2019 Jun 20. PMID: 31228638;\n PMCID: PMC6711466.\n[2] Gagnon A., Grenier G., Bocti C., Gillet V., Lepage J.-F., Baccarelli A. A.,\n Posner J., Descoteaux M., Takser L. (2022). White matter microstructural\n variability linked to differential attentional skills and impulsive behavior\n in a pediatric population. Cerebral Cortex.\n https://doi.org/10.1093/cercor/bhac180\n[3] https://towardsdatascience.com/what-are-pca-loadings-and-biplots-9a7897f2e559\n \n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_connectivity_filter", + "docstring": "Script to facilitate filtering of connectivity matrices.\nThe same could be achieved through a complex sequence of\nscil_connectivity_math.py.\n\nCan be used with any connectivity matrix from\nscil_connectivity_compute_matrices.py.\n\nFor example, a simple filtering (Jasmeen style) would be:\nscil_connectivity_filter.py out_mask.npy\n --greater_than */sc.npy 1 0.90\n --lower_than */sim.npy 2 0.90\n --greater_than */len.npy 40 0.90 -v;\n\nThis will result in a binary mask where each node with a value of 1 represents\na node with at least 90% of the population having at least 1 streamline,\n90% of the population is similar to the average (2mm) and 90% of the\npopulation having at least 40mm of average streamlines length.\n\nAll operation are stricly > or <, there is no >= or <=.\n\n--greater_than or --lower_than expect the same convention:\n MATRICES_LIST VALUE_THR POPULATION_PERC\nIt is strongly recommended (but not enforced) that the same number of\nconnectivity matrices is used for each condition.\n\nThis script performs an intersection of all conditions, meaning that all\nconditions must be met in order not to be filtered.\nIf the user wants to manually handle the requirements, --keep_condition_count\ncan be used and manually binarized using scil_connectivity_math.py\n\nFormerly: scil_filter_connectivity.py", + "help": "usage: scil_connectivity_filter.py [-h] [--lower_than [LOWER_THAN ...]]\n [--greater_than [GREATER_THAN ...]]\n [--keep_condition_count] [--inverse_mask]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_matrix_mask\n\nScript to facilitate filtering of connectivity matrices.\nThe same could be achieved through a complex sequence of\nscil_connectivity_math.py.\n\nCan be used with any connectivity matrix from\nscil_connectivity_compute_matrices.py.\n\nFor example, a simple filtering (Jasmeen style) would be:\nscil_connectivity_filter.py out_mask.npy\n --greater_than */sc.npy 1 0.90\n --lower_than */sim.npy 2 0.90\n --greater_than */len.npy 40 0.90 -v;\n\nThis will result in a binary mask where each node with a value of 1 represents\na node with at least 90% of the population having at least 1 streamline,\n90% of the population is similar to the average (2mm) and 90% of the\npopulation having at least 40mm of average streamlines length.\n\nAll operation are stricly > or <, there is no >= or <=.\n\n--greater_than or --lower_than expect the same convention:\n MATRICES_LIST VALUE_THR POPULATION_PERC\nIt is strongly recommended (but not enforced) that the same number of\nconnectivity matrices is used for each condition.\n\nThis script performs an intersection of all conditions, meaning that all\nconditions must be met in order not to be filtered.\nIf the user wants to manually handle the requirements, --keep_condition_count\ncan be used and manually binarized using scil_connectivity_math.py\n\nFormerly: scil_filter_connectivity.py\n\npositional arguments:\n out_matrix_mask Output mask (matrix) resulting from the provided conditions (.npy).\n\noptions:\n -h, --help show this help message and exit\n --lower_than [LOWER_THAN ...]\n Lower than condition using the VALUE_THR in at least POPULATION_PERC (from MATRICES_LIST).\n See description for more details.\n --greater_than [GREATER_THAN ...]\n Greater than condition using the VALUE_THR in at least POPULATION_PERC (from MATRICES_LIST).\n See description for more details.\n --keep_condition_count\n Report the number of condition(s) that pass/fail rather than a binary mask.\n --inverse_mask Inverse the final mask. 0 where all conditions are respected and 1 where at least one fail.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_connectivity_graph_measures", + "docstring": "Evaluate graph theory measures from connectivity matrices.\nA length weighted and a streamline count weighted matrix are required since\nsome measures require one or the other.\n\nThis script evaluates the measures one subject at the time. To generate a\npopulation dictionary (similarly to other scil_connectivity_*.py scripts), use\nthe --append_json option as well as using the same output filename.\n>>> for i in hcp/*/; do scil_connectivity_graph_measures.py ${i}/sc_prob.npy\n ${i}/len_prob.npy hcp_prob.json --append_json --avg_node_wise; done\n\nSome measures output one value per node, the default behavior is to list\nthem all into a list. To obtain only the average use the\n--avg_node_wise option.\n\nThe computed connectivity measures are:\ncentrality, modularity, assortativity, participation, clustering,\nnodal_strength, local_efficiency, global_efficiency, density, rich_club,\npath_length, edge_count, omega, sigma\n\nFor more details about the measures, please refer to\n- https://sites.google.com/site/bctnet/measures\n- https://github.com/aestrivex/bctpy/wiki\n\nThis script is under the GNU GPLv3 license, for more detail please refer to\nhttps://www.gnu.org/licenses/gpl-3.0.en.html\n\nFormerly: scil_evaluate_connectivity_graph_measures.py", + "help": "usage: scil_connectivity_graph_measures.py [-h]\n [--filtering_mask FILTERING_MASK]\n [--avg_node_wise] [--append_json]\n [--small_world] [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_conn_matrix in_length_matrix\n out_json\n\nEvaluate graph theory measures from connectivity matrices.\nA length weighted and a streamline count weighted matrix are required since\nsome measures require one or the other.\n\nThis script evaluates the measures one subject at the time. To generate a\npopulation dictionary (similarly to other scil_connectivity_*.py scripts), use\nthe --append_json option as well as using the same output filename.\n>>> for i in hcp/*/; do scil_connectivity_graph_measures.py ${i}/sc_prob.npy\n ${i}/len_prob.npy hcp_prob.json --append_json --avg_node_wise; done\n\nSome measures output one value per node, the default behavior is to list\nthem all into a list. To obtain only the average use the\n--avg_node_wise option.\n\nThe computed connectivity measures are:\ncentrality, modularity, assortativity, participation, clustering,\nnodal_strength, local_efficiency, global_efficiency, density, rich_club,\npath_length, edge_count, omega, sigma\n\nFor more details about the measures, please refer to\n- https://sites.google.com/site/bctnet/measures\n- https://github.com/aestrivex/bctpy/wiki\n\nThis script is under the GNU GPLv3 license, for more detail please refer to\nhttps://www.gnu.org/licenses/gpl-3.0.en.html\n\nFormerly: scil_evaluate_connectivity_graph_measures.py\n\npositional arguments:\n in_conn_matrix Input connectivity matrix (.npy).\n Typically a streamline count weighted matrix.\n in_length_matrix Input length weighted matrix (.npy).\n out_json Path of the output json.\n\noptions:\n -h, --help show this help message and exit\n --filtering_mask FILTERING_MASK\n Binary filtering mask to apply before computing the measures.\n --avg_node_wise Return a single value for node-wise measures.\n --append_json If the file already exists, will append to the dictionary.\n --small_world Compute measure related to small worldness (omega and sigma).\n This option is much slower.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\n[1] Rubinov, Mikail, and Olaf Sporns. \"Complex network measures of brain\n connectivity: uses and interpretations.\" Neuroimage 52.3 (2010):\n 1059-1069.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_connectivity_hdf5_average_density_map", + "docstring": "Compute a density map for each connection from a hdf5 file.\nTypically use after scil_tractogram_segment_bundles_for_connectivity.py in\norder to obtain the average density map of each connection to allow the use\nof --similarity in scil_connectivity_compute_matrices.py.\n\nThis script is parallelized, but will run much slower on non-SSD if too many\nprocesses are used. The output is a directory containing the thousands of\nconnections:\nout_dir/\n |-- LABEL1_LABEL1.nii.gz\n |-- LABEL1_LABEL2.nii.gz\n |-- [...]\n |-- LABEL90_LABEL90.nii.gz\n\nFormerly: scil_compute_hdf5_average_density_map.py", + "help": "usage: scil_connectivity_hdf5_average_density_map.py [-h] [--binary]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_hdf5 [in_hdf5 ...]\n out_dir\n\nCompute a density map for each connection from a hdf5 file.\nTypically use after scil_tractogram_segment_bundles_for_connectivity.py in\norder to obtain the average density map of each connection to allow the use\nof --similarity in scil_connectivity_compute_matrices.py.\n\nThis script is parallelized, but will run much slower on non-SSD if too many\nprocesses are used. The output is a directory containing the thousands of\nconnections:\nout_dir/\n |-- LABEL1_LABEL1.nii.gz\n |-- LABEL1_LABEL2.nii.gz\n |-- [...]\n |-- LABEL90_LABEL90.nii.gz\n\nFormerly: scil_compute_hdf5_average_density_map.py\n\npositional arguments:\n in_hdf5 List of HDF5 filenames (.h5) from scil_tractogram_segment_bundles_for_connectivity.py.\n out_dir Path of the output directory.\n\noptions:\n -h, --help show this help message and exit\n --binary Binarize density maps before the population average.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_connectivity_math", + "docstring": "Performs an operation on a list of matrices. The supported operations are\nlisted below.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of matrices.\n> scil_connectivity_math.py multiplication mat.npy 10 mult_10.npy", + "help": "usage: scil_connectivity_math.py [-h] [--data_type DATA_TYPE]\n [--exclude_background]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference}\n in_matrices [in_matrices ...] out_matrix\n\nPerforms an operation on a list of matrices. The supported operations are\nlisted below.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of matrices.\n> scil_connectivity_math.py multiplication mat.npy 10 mult_10.npy\n\n lower_threshold: MAT THRESHOLD\n All values below the threshold will be set to zero.\n All values above the threshold will be set to one.\n \n upper_threshold: MAT THRESHOLD\n All values below the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_eq: MAT THRESHOLD\n All values below the threshold will be set to zero.\n All values above or equal the threshold will be set to one.\n \n upper_threshold_eq: MAT THRESHOLD\n All values below or equal the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_otsu: MAT\n All values below or equal to the Otsu threshold will be set to zero.\n All values above the Otsu threshold will be set to one.\n (Otsu's method is an algorithm to perform automatic matrix thresholding\n of the background.)\n \n upper_threshold_otsu: MAT\n All values below the Otsu threshold will be set to one.\n All values above or equal to the Otsu threshold will be set to zero.\n Equivalent to lower_threshold_otsu followed by an inversion.\n \n lower_clip: MAT THRESHOLD\n All values below the threshold will be set to threshold.\n \n upper_clip: MAT THRESHOLD\n All values above the threshold will be set to threshold.\n \n absolute_value: MAT\n All negative values will become positive.\n \n round: MAT\n Round all decimal values to the closest integer.\n \n ceil: MAT\n Ceil all decimal values to the next integer.\n \n floor: MAT\n Floor all decimal values to the previous integer.\n \n normalize_sum: MAT\n Normalize the matrix so the sum of all values is one.\n \n normalize_max: MAT\n Normalize the matrix so the maximum value is one.\n \n log_10: MAT\n Apply a log (base 10) to all non zeros values of an matrix.\n \n log_e: MAT\n Apply a natural log to all non zeros values of an matrix.\n \n convert: MAT\n Perform no operation, but simply change the data type.\n \n invert: MAT\n Operation on binary matrix to interchange 0s and 1s in a binary mask.\n \n addition: MATs\n Add multiple matrices together.\n \n subtraction: MAT_1 MAT_2\n Subtract first matrix by the second (MAT_1 - MAT_2).\n \n multiplication: MATs\n Multiply multiple matrices together (danger of underflow and overflow)\n \n division: MAT_1 MAT_2\n Divide first matrix by the second (danger of underflow and overflow)\n Ignore zeros values, excluded from the operation.\n \n mean: MATs\n Compute the mean of matrices.\n If a single 4D matrix is provided, average along the last dimension.\n \n std: MATs\n Compute the standard deviation average of multiple matrices.\n If a single 4D matrix is provided, compute the STD along the last\n dimension.\n \n correlation: MATs\n Computes the correlation of the 3x3x3 neighborhood of each voxel, for\n all pair of input matrices. The final matrix is the average correlation\n (through all pairs).\n For a given pair of matrices\n - Background is considered as 0. May lead to very high correlations\n close to the border of the background regions, or very poor ones if the\n background in both matrices differ.\n - Images are zero-padded. For the same reason as higher, may lead to\n very high correlations if you have data close to the border of the\n matrix.\n - NaN values (if a voxel's neighborhood is entirely uniform; std 0) are\n replaced by\n - 0 if at least one neighborhood was entirely containing background.\n - 1 if the voxel's neighborhoods are uniform in both matrices\n - 0 if the voxel's neighborhoods is uniform in one matrix, but not\n the other.\n\n UPDATE AS OF VERSION 2.0: Random noise was previously added in the\n process to help avoid NaN values. Now replaced by either 0 or 1 as\n explained above.\n \n union: MATs\n Operation on binary matrix to keep voxels, that are non-zero, in at\n least one file.\n \n intersection: MATs\n Operation on binary matrix to keep the voxels, that are non-zero,\n are present in all files.\n \n difference: MAT_1 MAT_2\n Operation on binary matrix to keep voxels from the first file that are\n not in the second file (non-zeros).\n \n\npositional arguments:\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference}\n The type of operation to be performed on the matrices.\n in_matrices The list of matrices files or parameters.\n out_matrix Output matrix path.\n\noptions:\n -h, --help show this help message and exit\n --data_type DATA_TYPE\n Data type of the output image. Use the format: uint8, float16, int32.\n --exclude_background Does not affect the background of the original matrices.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_connectivity_normalize", + "docstring": "Normalize a connectivity matrix coming from\nscil_tractogram_segment_bundles_for_connectivity.py.\n3 categories of normalization are available:\n-- Edge attributes\n - length: Multiply each edge by the average bundle length.\n Compensate for far away connections when using interface seeding.\n Cannot be used with inverse_length.\n\n - inverse_length: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n Cannot be used with length.\n\n - bundle_volume: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n\n-- Node attributes (Mutually exclusive)\n - parcel_volume: Divide each edge by the sum of node volume.\n Compensate for the likelihood of ending in the node.\n Compensate seeding bias when using interface seeding.\n\n - parcel_surface: Divide each edge by the sum of the node surface.\n Compensate for the likelihood of ending in the node.\n Compensate for seeding bias when using interface seeding.\n\n-- Matrix scaling (Mutually exclusive)\n - max_at_one: Maximum value of the matrix will be set to one.\n - sum_to_one: Ensure the sum of all edges weight is one\n - log_10: Apply a base 10 logarithm to all edges weight\n\nThe volume and length matrix should come from the\nscil_tractogram_segment_bundles_for_connectivity.py script.\n\nA review of the type of normalization is available in:\nColon-Perez, Luis M., et al. \"Dimensionless, scale-invariant, edge weight\nmetric for the study of complex structural networks.\" PLOS one 10.7 (2015).\n\nHowever, the proposed weighting of edge presented in this publication is not\nimplemented.\n\nFormerly: scil_normalize_connectivity.py", + "help": "usage: scil_connectivity_normalize.py [-h]\n [--length LENGTH_MATRIX | --inverse_length LENGTH_MATRIX]\n [--bundle_volume VOLUME_MATRIX]\n [--parcel_volume ATLAS LABELS_LIST | --parcel_surface ATLAS LABELS_LIST]\n [--max_at_one | --sum_to_one | --log_10]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_matrix out_matrix\n\nNormalize a connectivity matrix coming from\nscil_tractogram_segment_bundles_for_connectivity.py.\n3 categories of normalization are available:\n-- Edge attributes\n - length: Multiply each edge by the average bundle length.\n Compensate for far away connections when using interface seeding.\n Cannot be used with inverse_length.\n\n - inverse_length: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n Cannot be used with length.\n\n - bundle_volume: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n\n-- Node attributes (Mutually exclusive)\n - parcel_volume: Divide each edge by the sum of node volume.\n Compensate for the likelihood of ending in the node.\n Compensate seeding bias when using interface seeding.\n\n - parcel_surface: Divide each edge by the sum of the node surface.\n Compensate for the likelihood of ending in the node.\n Compensate for seeding bias when using interface seeding.\n\n-- Matrix scaling (Mutually exclusive)\n - max_at_one: Maximum value of the matrix will be set to one.\n - sum_to_one: Ensure the sum of all edges weight is one\n - log_10: Apply a base 10 logarithm to all edges weight\n\nThe volume and length matrix should come from the\nscil_tractogram_segment_bundles_for_connectivity.py script.\n\nA review of the type of normalization is available in:\nColon-Perez, Luis M., et al. \"Dimensionless, scale-invariant, edge weight\nmetric for the study of complex structural networks.\" PLOS one 10.7 (2015).\n\nHowever, the proposed weighting of edge presented in this publication is not\nimplemented.\n\nFormerly: scil_normalize_connectivity.py\n\npositional arguments:\n in_matrix Input connectivity matrix. This is typically a streamline_count matrix (.npy).\n out_matrix Output normalized matrix (.npy).\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nEdge-wise options:\n --length LENGTH_MATRIX\n Length matrix used for edge-wise multiplication.\n --inverse_length LENGTH_MATRIX\n Length matrix used for edge-wise division.\n --bundle_volume VOLUME_MATRIX\n Volume matrix used for edge-wise division.\n --parcel_volume ATLAS LABELS_LIST\n Atlas and labels list for edge-wise division.\n --parcel_surface ATLAS LABELS_LIST\n Atlas and labels list for edge-wise division.\n\nScaling options:\n --max_at_one Scale matrix with maximum value at one.\n --sum_to_one Scale matrix with sum of all elements at one.\n --log_10 Apply a base 10 logarithm to the matrix.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_connectivity_pairwise_agreement", + "docstring": "Evaluate pair-wise similarity measures of connectivity matrix.\n\nThe computed similarity measures are:\nsum of square difference and pearson correlation coefficent\n\nFormerly: scil_evaluate_connectivity_pairwaise_agreement_measures.py", + "help": "usage: scil_connectivity_pairwise_agreement.py [-h] [--single_compare matrix]\n [--normalize] [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_matrices [in_matrices ...]\n out_json\n\nEvaluate pair-wise similarity measures of connectivity matrix.\n\nThe computed similarity measures are:\nsum of square difference and pearson correlation coefficent\n\nFormerly: scil_evaluate_connectivity_pairwaise_agreement_measures.py\n\npositional arguments:\n in_matrices Path of the input matricies.\n out_json Path of the output json file.\n\noptions:\n -h, --help show this help message and exit\n --single_compare matrix\n Compare inputs to this single file.\n (Else, compute all pairs in in_matrices).\n --normalize If set, will normalize all matrices from zero to one.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_connectivity_print_filenames", + "docstring": "Output the list of filenames using the coordinates from a binary connectivity\nmatrix. Typically used to move around files that are considered valid after\nthe scil_connectivity_filter.py script.\n\nExample:\n# Keep connections with more than 1000 streamlines for 100% of a population\nscil_connectivity_filter.py filtering_mask.npy\n --greater_than */streamlines_count.npy 1000 1.0\nscil_connectivity_print_filenames.py filtering_mask.npy\n labels_list.txt pass.txt\nfor file in $(cat pass.txt);\n do mv ${SOMEWHERE}/${FILE} ${SOMEWHERE_ELSE}/;\ndone\n\nFormerly: scil_print_connectivity_filenames.py", + "help": "usage: scil_connectivity_print_filenames.py [-h] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_matrix labels_list out_txt\n\nOutput the list of filenames using the coordinates from a binary connectivity\nmatrix. Typically used to move around files that are considered valid after\nthe scil_connectivity_filter.py script.\n\nExample:\n# Keep connections with more than 1000 streamlines for 100% of a population\nscil_connectivity_filter.py filtering_mask.npy\n --greater_than */streamlines_count.npy 1000 1.0\nscil_connectivity_print_filenames.py filtering_mask.npy\n labels_list.txt pass.txt\nfor file in $(cat pass.txt);\n do mv ${SOMEWHERE}/${FILE} ${SOMEWHERE_ELSE}/;\ndone\n\nFormerly: scil_print_connectivity_filenames.py\n\npositional arguments:\n in_matrix Binary matrix in numpy (.npy) format.\n Typically from scil_connectivity_filter.py\n labels_list List saved by the decomposition script.\n out_txt Output text file containing all filenames.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_connectivity_reorder_rois", + "docstring": "Re-order one or many connectivity matrices using a text file format.\nThe first row are the (x) and the second row the (y), must be space separated.\nThe resulting matrix does not have to be square (support unequal number of\nx and y).\n\nThe values refer to the coordinates (starting at 0) in the matrix, but if the\n--labels_list parameter is used, the values will refer to the label which will\nbe converted to the appropriate coordinates. This file must be the same as the\none provided to the scil_tractogram_segment_bundles_for_connectivity.py.\n\nTo subsequently use scil_visualize_connectivity.py with a lookup table, you\nmust use a label-based reording json and use --labels_list.\n\nYou can also use the Optimal Leaf Ordering(OLO) algorithm to transform a\nsparse matrix into an ordering that reduces the matrix bandwidth. The output\nfile can then be re-used with --in_ordering. Only one input can be used with\nthis option, we recommand an average streamline count or volume matrix.\n\nFormerly: scil_reorder_connectivity.py", + "help": "usage: scil_connectivity_reorder_rois.py [-h]\n (--in_ordering IN_ORDERING | --optimal_leaf_ordering OUT_FILE)\n [--out_suffix OUT_SUFFIX]\n [--out_dir OUT_DIR]\n [--labels_list LABELS_LIST]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_matrices [in_matrices ...]\n\nRe-order one or many connectivity matrices using a text file format.\nThe first row are the (x) and the second row the (y), must be space separated.\nThe resulting matrix does not have to be square (support unequal number of\nx and y).\n\nThe values refer to the coordinates (starting at 0) in the matrix, but if the\n--labels_list parameter is used, the values will refer to the label which will\nbe converted to the appropriate coordinates. This file must be the same as the\none provided to the scil_tractogram_segment_bundles_for_connectivity.py.\n\nTo subsequently use scil_visualize_connectivity.py with a lookup table, you\nmust use a label-based reording json and use --labels_list.\n\nYou can also use the Optimal Leaf Ordering(OLO) algorithm to transform a\nsparse matrix into an ordering that reduces the matrix bandwidth. The output\nfile can then be re-used with --in_ordering. Only one input can be used with\nthis option, we recommand an average streamline count or volume matrix.\n\nFormerly: scil_reorder_connectivity.py\n\npositional arguments:\n in_matrices Connectivity matrices in .npy or .txt format.\n\noptions:\n -h, --help show this help message and exit\n --in_ordering IN_ORDERING\n Txt file with the first row as x and second as y.\n --optimal_leaf_ordering OUT_FILE\n Output a text file with an ordering that alignsstructures along the diagonal.\n --out_suffix OUT_SUFFIX\n Suffix for the output matrix filename.\n --out_dir OUT_DIR Output directory for the re-ordered matrices.\n --labels_list LABELS_LIST\n List saved by the decomposition script,\n --in_ordering must contain labels rather than coordinates (.txt).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Rubinov, Mikail, and Olaf Sporns. \"Complex network measures of brain\n connectivity: uses and interpretations.\" Neuroimage 52.3 (2010):\n 1059-1069.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_denoising_nlmeans", + "docstring": "Script to denoise a dataset with the Non Local Means algorithm.\n\nFormerly: scil_run_nlmeans.py", + "help": "usage: scil_denoising_nlmeans.py [-h] [--mask] [--sigma float] [--log LOGFILE]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_image out_image number_coils\n\nScript to denoise a dataset with the Non Local Means algorithm.\n\nFormerly: scil_run_nlmeans.py\n\npositional arguments:\n in_image Path of the image file to denoise.\n out_image Path to save the denoised image file.\n number_coils Number of receiver coils of the scanner.\n Use number_coils=1 in the case of a SENSE (GE, Philips) reconstruction and \n number_coils >= 1 for GRAPPA reconstruction (Siemens). number_coils=4 works well for the 1.5T\n in Sherbrooke. Use number_coils=0 if the noise is considered Gaussian distributed.\n\noptions:\n -h, --help show this help message and exit\n --mask Path to a binary mask. Only the data inside the mask will be used for computations\n --sigma float The standard deviation of the noise to use instead of computing it automatically.\n --log LOGFILE If supplied, name of the text file to store the logs.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dki_metrics", + "docstring": "Script to compute the Diffusion Kurtosis Imaging (DKI) and Mean Signal DKI\n(MSDKI) metrics. DKI is a multi-shell diffusion model. The input DWI needs\nto be multi-shell, i.e. multi-bvalued.\n\nSince the diffusion kurtosis model involves the estimation of a large number\nof parameters and since the non-Gaussian components of the diffusion signal\nare more sensitive to artefacts, you should really denoise your DWI volume\nbefore using this DKI script (e.g. scil_denoising_nlmeans.py). Moreover, to\nremove biases due to fiber dispersion, fiber crossings and other mesoscopic\nproperties of the underlying tissue, MSDKI does a powder-average of DWI for all\ndirections, thus removing the orientational dependencies and creating an\nalternative mean kurtosis map.\n\nDKI is also known to be vulnerable to artefacted voxels induced by the\nlow radial diffusivities of aligned white matter (CC, CST voxels). Since it is\nvery hard to capture non-Gaussian information due to the low decays in radial\ndirection, its kurtosis estimates have very low robustness.\nNoisy kurtosis estimates tend to be negative and its absolute values can have\norder of magnitudes higher than the typical kurtosis values. Consequently,\nthese negative kurtosis values will heavily propagate to the mean and radial\nkurtosis metrics. This is well-reported in [Rafael Henriques MSc thesis 2012,\nchapter 3]. Two ways to overcome this issue: i) compute the kurtosis values\nfrom powder-averaged MSDKI, and ii) perform 3D Gaussian smoothing. On\npowder-averaged signal decays, you don't have this low diffusivity issue and\nyour kurtosis estimates have much higher precision (additionally they are\nindependent to the fODF).\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section. If --not_all is set, only the metrics specified\nexplicitly by the flags will be output.\n\nThis script directly comes from the DIPY example gallery and references\ntherein.\n[1] examples_built/reconst_dki/#example-reconst-dki\n[2] examples_built/reconst_msdki/#example-reconst-msdki\n\nFormerly: scil_compute_kurtosis_metrics.py", + "help": "usage: scil_dki_metrics.py [-h] [--mask MASK] [--tolerance tol]\n [--skip_b0_check] [--min_k MIN_K] [--max_k MAX_K]\n [--smooth SMOOTH] [--not_all] [--ak file]\n [--mk file] [--rk file] [--msk file]\n [--dki_fa file] [--dki_md file] [--dki_ad file]\n [--dki_rd file] [--dki_residual file] [--msd file]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec\n\nScript to compute the Diffusion Kurtosis Imaging (DKI) and Mean Signal DKI\n(MSDKI) metrics. DKI is a multi-shell diffusion model. The input DWI needs\nto be multi-shell, i.e. multi-bvalued.\n\nSince the diffusion kurtosis model involves the estimation of a large number\nof parameters and since the non-Gaussian components of the diffusion signal\nare more sensitive to artefacts, you should really denoise your DWI volume\nbefore using this DKI script (e.g. scil_denoising_nlmeans.py). Moreover, to\nremove biases due to fiber dispersion, fiber crossings and other mesoscopic\nproperties of the underlying tissue, MSDKI does a powder-average of DWI for all\ndirections, thus removing the orientational dependencies and creating an\nalternative mean kurtosis map.\n\nDKI is also known to be vulnerable to artefacted voxels induced by the\nlow radial diffusivities of aligned white matter (CC, CST voxels). Since it is\nvery hard to capture non-Gaussian information due to the low decays in radial\ndirection, its kurtosis estimates have very low robustness.\nNoisy kurtosis estimates tend to be negative and its absolute values can have\norder of magnitudes higher than the typical kurtosis values. Consequently,\nthese negative kurtosis values will heavily propagate to the mean and radial\nkurtosis metrics. This is well-reported in [Rafael Henriques MSc thesis 2012,\nchapter 3]. Two ways to overcome this issue: i) compute the kurtosis values\nfrom powder-averaged MSDKI, and ii) perform 3D Gaussian smoothing. On\npowder-averaged signal decays, you don't have this low diffusivity issue and\nyour kurtosis estimates have much higher precision (additionally they are\nindependent to the fODF).\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section. If --not_all is set, only the metrics specified\nexplicitly by the flags will be output.\n\nThis script directly comes from the DIPY example gallery and references\ntherein.\n[1] examples_built/reconst_dki/#example-reconst-dki\n[2] examples_built/reconst_msdki/#example-reconst-msdki\n\nFormerly: scil_compute_kurtosis_metrics.py\n\npositional arguments:\n in_dwi Path of the input multi-shell DWI dataset.\n in_bval Path of the b-value file, in FSL format.\n in_bvec Path of the b-vector file, in FSL format.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computations and reconstruction.\n [Default: None]\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --min_k MIN_K Minimum kurtosis value in the output maps \n (ak, mk, rk). In theory, -3/7 is the min kurtosis \n limit for regions that consist of water confined \n to spherical pores (see DIPY example and \n documentation) [Default: 0.0].\n --max_k MAX_K Maximum kurtosis value in the output maps \n (ak, mk, rk). In theory, 10 is the max kurtosis\n limit for regions that consist of water confined\n to spherical pores (see DIPY example and \n documentation) [Default: 3.0].\n --smooth SMOOTH Smooth input DWI with a 3D Gaussian filter with \n full-width-half-max (fwhm). Kurtosis fitting is \n sensitive and outliers occur easily. According to\n tests on HCP, CB_Brain, Penthera3T, this smoothing\n is thus turned ON by default with fwhm=2.5. \n [Default: 2.5].\n --not_all If set, will only save the metrics explicitly \n specified using the other metrics flags. \n [Default: not set].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics files flags:\n --ak file Output filename for the axial kurtosis.\n --mk file Output filename for the mean kurtosis.\n --rk file Output filename for the radial kurtosis.\n --msk file Output filename for the mean signal kurtosis.\n --dki_fa file Output filename for the fractional anisotropy from DKI.\n --dki_md file Output filename for the mean diffusivity from DKI.\n --dki_ad file Output filename for the axial diffusivity from DKI.\n --dki_rd file Output filename for the radial diffusivity from DKI.\n\nQuality control files flags:\n --dki_residual file Output filename for the map of the residual of the tensor fit.\n Note. In previous versions, the resulting map was normalized. \n It is not anymore.\n --msd file Output filename for the mean signal diffusion (powder-average).\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dti_convert_tensors", + "docstring": "Conversion of tensors (the 6 values from the triangular matrix) between various\nsoftware standards. We cannot discover the input format type, user must know\nhow the tensors were created.", + "help": "usage: scil_dti_convert_tensors.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file out_file in_format out_format\n\nConversion of tensors (the 6 values from the triangular matrix) between various\nsoftware standards. We cannot discover the input format type, user must know\nhow the tensors were created.\n\n Dipy's order is [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://github.com/dipy/dipy/blob/master/dipy/reconst/dti.py#L1639\n \n MRTRIX's order is : [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz]\n Shape: [i, j , k, 6].\n Ref: https://mrtrix.readthedocs.io/en/dev/reference/commands/dwi2tensor.html\n \n ANTS's order ('nifti format') is : [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz].\n Shape: [i, j , k, 1, 6] (Careful, file is 5D).\n Ref: https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software\n \n FSL's order is [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide\n (Also used for the Fibernavigator)\n \n\npositional arguments:\n in_file Input tensors filename.\n out_file Output tensors filename.\n in_format Input format. Choices: ['fsl', 'nifti', 'mrtrix', 'dipy']\n out_format Output format. Choices: ['fsl', 'nifti', 'mrtrix', 'dipy']\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dti_metrics", + "docstring": "Script to compute all of the Diffusion Tensor Imaging (DTI) metrics.\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section.\n\nIf --not_all is set, only the metrics specified explicitly by the flags\nwill be output. The available metrics are:\n\nfractional anisotropy (FA), geodesic anisotropy (GA), axial diffusivisty (AD),\nradial diffusivity (RD), mean diffusivity (MD), mode, red-green-blue colored\nFA (rgb), principal tensor e-vector and tensor coefficients (dxx, dxy, dxz,\ndyy, dyz, dzz).\n\nFor all the quality control metrics such as residual, physically implausible\nsignals, pulsation and misalignment artifacts, see\n[J-D Tournier, S. Mori, A. Leemans. Diffusion Tensor Imaging and Beyond.\nMRM 2011].\n\nFormerly: scil_compute_dti_metrics.py", + "help": "usage: scil_dti_metrics.py [-h] [-f] [--mask MASK] [--method method_name]\n [--not_all] [--ad file] [--evecs file]\n [--evals file] [--fa file] [--ga file] [--md file]\n [--mode file] [--norm file] [--rgb file]\n [--rd file] [--tensor file]\n [--tensor_format {fsl,nifti,mrtrix,dipy}]\n [--non-physical file] [--pulsation string]\n [--residual file] [--b0_threshold thr]\n [--skip_b0_check] [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval in_bvec\n\nScript to compute all of the Diffusion Tensor Imaging (DTI) metrics.\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section.\n\nIf --not_all is set, only the metrics specified explicitly by the flags\nwill be output. The available metrics are:\n\nfractional anisotropy (FA), geodesic anisotropy (GA), axial diffusivisty (AD),\nradial diffusivity (RD), mean diffusivity (MD), mode, red-green-blue colored\nFA (rgb), principal tensor e-vector and tensor coefficients (dxx, dxy, dxz,\ndyy, dyz, dzz).\n\nFor all the quality control metrics such as residual, physically implausible\nsignals, pulsation and misalignment artifacts, see\n[J-D Tournier, S. Mori, A. Leemans. Diffusion Tensor Imaging and Beyond.\nMRM 2011].\n\nFormerly: scil_compute_dti_metrics.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n\noptions:\n -h, --help show this help message and exit\n -f Force overwriting of the output files.\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computations and reconstruction. (Default: None)\n --method method_name Tensor fit method.\n WLS for weighted least squares\n LS for ordinary least squares\n NLLS for non-linear least-squares\n restore for RESTORE robust tensor fitting. (Default: WLS)\n --not_all If set, will only save the metrics explicitly specified using the other metrics flags. (Default: not set).\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nMetrics files flags:\n --ad file Output filename for the axial diffusivity.\n --evecs file Output filename for the eigenvectors of the tensor.\n --evals file Output filename for the eigenvalues of the tensor.\n --fa file Output filename for the fractional anisotropy.\n --ga file Output filename for the geodesic anisotropy.\n --md file Output filename for the mean diffusivity.\n --mode file Output filename for the mode.\n --norm file Output filename for the tensor norm.\n --rgb file Output filename for the colored fractional anisotropy.\n --rd file Output filename for the radial diffusivity.\n --tensor file Output filename for the tensor coefficients.\n --tensor_format {fsl,nifti,mrtrix,dipy}\n Format used for the tensors saved in --tensor file.(default: fsl)\n \n Dipy's order is [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://github.com/dipy/dipy/blob/master/dipy/reconst/dti.py#L1639\n \n MRTRIX's order is : [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz]\n Shape: [i, j , k, 6].\n Ref: https://mrtrix.readthedocs.io/en/dev/reference/commands/dwi2tensor.html\n \n ANTS's order ('nifti format') is : [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz].\n Shape: [i, j , k, 1, 6] (Careful, file is 5D).\n Ref: https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software\n \n FSL's order is [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide\n (Also used for the Fibernavigator)\n \n\nQuality control files flags:\n --non-physical file Output filename for the voxels with physically implausible signals \n where the mean of b=0 images is below one or more diffusion-weighted images.\n --pulsation string Standard deviation map across all diffusion-weighted images and across b=0 images if more than one is available.\n Shows pulsation and misalignment artifacts.\n --residual file Output filename for the map of the residual of the tensor fit.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dwi_apply_bias_field", + "docstring": "Apply bias field correction to DWI. This script doesn't compute the bias\nfield itself. It ONLY applies an existing bias field. Please use the ANTs\nN4BiasFieldCorrection executable to compute the bias field.\n\nFormerly: scil_apply_bias_field_on_dwi.py", + "help": "usage: scil_dwi_apply_bias_field.py [-h] [--mask MASK]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bias_field out_name\n\nApply bias field correction to DWI. This script doesn't compute the bias\nfield itself. It ONLY applies an existing bias field. Please use the ANTs\nN4BiasFieldCorrection executable to compute the bias field.\n\nFormerly: scil_apply_bias_field_on_dwi.py\n\npositional arguments:\n in_dwi DWI Nifti image.\n in_bias_field Bias field Nifti image.\n out_name Corrected DWI Nifti image.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Apply bias field correction only in the region defined by the mask.\n If this is not given, the bias field is still only applied only in non-background data \n (i.e. where the dwi is not 0).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dwi_compute_snr", + "docstring": "Script to compute signal to noise ratio (SNR) in a region of interest (ROI)\nof a DWI volume.\n\nIt will compute the SNR for all DWI volumes of the input image seperately.\nThe output will contain the SNR which is the ratio of\nmean(signal) / std(noise).\nThe mean of the signal is computed inside the mask.\nThe standard deviation of the noise is estimated inside the noise_mask\nor inside the same mask if a noise_map is provided.\nIf it's not supplied, it will be estimated using the data outside the brain,\ncomputed with Dipy medotsu\n\nIf verbose is True, the SNR for every DWI volume will be output.\n\nThis works best in a well-defined ROI such as the corpus callosum.\nIt is heavily dependent on the ROI and its quality.\n\nWe highly recommend using a noise_map if you can acquire one.\nSee refs [1, 2] that describe the noise map acquisition.\n[1] St-Jean, et al (2016). Non Local Spatial and Angular Matching...\n https://doi.org/10.1016/j.media.2016.02.010\n[2] Reymbaut, et al (2021). Magic DIAMOND...\n https://doi.org/10.1016/j.media.2021.101988\n\nFormerly: scil_snr_in_roi.py", + "help": "usage: scil_dwi_compute_snr.py [-h]\n [--noise_mask NOISE_MASK | --noise_map NOISE_MAP]\n [--b0_thr B0_THR] [--out_basename OUT_BASENAME]\n [--split_shells] [--indent INDENT]\n [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_mask\n\nScript to compute signal to noise ratio (SNR) in a region of interest (ROI)\nof a DWI volume.\n\nIt will compute the SNR for all DWI volumes of the input image seperately.\nThe output will contain the SNR which is the ratio of\nmean(signal) / std(noise).\nThe mean of the signal is computed inside the mask.\nThe standard deviation of the noise is estimated inside the noise_mask\nor inside the same mask if a noise_map is provided.\nIf it's not supplied, it will be estimated using the data outside the brain,\ncomputed with Dipy medotsu\n\nIf verbose is True, the SNR for every DWI volume will be output.\n\nThis works best in a well-defined ROI such as the corpus callosum.\nIt is heavily dependent on the ROI and its quality.\n\nWe highly recommend using a noise_map if you can acquire one.\nSee refs [1, 2] that describe the noise map acquisition.\n[1] St-Jean, et al (2016). Non Local Spatial and Angular Matching...\n https://doi.org/10.1016/j.media.2016.02.010\n[2] Reymbaut, et al (2021). Magic DIAMOND...\n https://doi.org/10.1016/j.media.2021.101988\n\nFormerly: scil_snr_in_roi.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n in_mask Binary mask of the region used to estimate SNR.\n\noptions:\n -h, --help show this help message and exit\n --b0_thr B0_THR All b-values with values less than or equal to b0_thr are considered as b0s i.e. without diffusion weighting. [0.0]\n --out_basename OUT_BASENAME\n Path and prefix for the various saved file.\n --split_shells SNR will be split into shells.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMasks options:\n --noise_mask NOISE_MASK\n Binary mask used to estimate the noise from the DWI.\n --noise_map NOISE_MAP\n Noise map.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dwi_concatenate", + "docstring": "Concatenate DWI, bval and bvecs together. File must be specified in matching\norder. Default data type will be the same as the first input DWI.\n\nFormerly: scil_concatenate_dwi.py", + "help": "usage: scil_dwi_concatenate.py [-h] [--in_dwis IN_DWIS [IN_DWIS ...]]\n [--in_bvals IN_BVALS [IN_BVALS ...]]\n [--in_bvecs IN_BVECS [IN_BVECS ...]]\n [--data_type DATA_TYPE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_dwi out_bval out_bvec\n\nConcatenate DWI, bval and bvecs together. File must be specified in matching\norder. Default data type will be the same as the first input DWI.\n\nFormerly: scil_concatenate_dwi.py\n\npositional arguments:\n out_dwi The name of the output DWI file.\n out_bval The name of the output b-values file (.bval).\n out_bvec The name of the output b-vectors file (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n The DWI file (.nii) to concatenate.\n --in_bvals IN_BVALS [IN_BVALS ...]\n The b-values files in FSL format (.bval).\n --in_bvecs IN_BVECS [IN_BVECS ...]\n The b-vectors files in FSL format (.bvec).\n --data_type DATA_TYPE\n Data type of the output image. Use the format: uint8, int16, int/float32, int/float64.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dwi_convert_FDF", + "docstring": "Converts a Varian FDF file or directory to a nifti file.\nIf the procpar contains diffusion information, it will be saved as bval and\nbvec in the same folder as the output file.\n\nex: scil_dwi_convert_FDF.py semsdw/b0_folder/ semsdw/dwi_folder/ dwi.nii.gz --bval dwi.bval --bvec dwi.bvec -f\n\nFormerly: scil_convert_fdf.py", + "help": "usage: scil_dwi_convert_FDF.py [-h] [--bval BVAL] [--bvec BVEC]\n [--flip dimension [dimension ...]]\n [--swap dimension [dimension ...]]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_b0_path in_dwi_path out_path\n\nConverts a Varian FDF file or directory to a nifti file.\nIf the procpar contains diffusion information, it will be saved as bval and\nbvec in the same folder as the output file.\n\nex: scil_dwi_convert_FDF.py semsdw/b0_folder/ semsdw/dwi_folder/ dwi.nii.gz --bval dwi.bval --bvec dwi.bvec -f\n\nFormerly: scil_convert_fdf.py\n\npositional arguments:\n in_b0_path Path to the b0 FDF file or folder to convert.\n in_dwi_path Path to the DWI FDF file or folder to convert.\n out_path Path to the nifti file to write on disk.\n\noptions:\n -h, --help show this help message and exit\n --bval BVAL Path to the bval file to write on disk.\n --bvec BVEC Path to the bvec file to write on disk.\n --flip dimension [dimension ...]\n The axes you want to flip. eg: to flip the x and y axes use: x y. [None]\n --swap dimension [dimension ...]\n The axes you want to swap. eg: to swap the x and y axes use: x y. [None]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dwi_detect_volume_outliers", + "docstring": "This script simply finds the 3 closest angular neighbors of each direction\n(per shell) and compute the voxel-wise correlation.\nIf the angles or correlations to neighbors are below the shell average (by\nargs.std_scale x STD) it will flag the volume as a potential outlier.\n\nThis script supports multi-shells, but each shell is independant and detected\nusing the --b0_threshold parameter.\n\nThis script can be run before any processing to identify potential problem\nbefore launching pre-processing.", + "help": "usage: scil_dwi_detect_volume_outliers.py [-h] [--std_scale STD_SCALE]\n [--b0_threshold thr]\n [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval in_bvec\n\nThis script simply finds the 3 closest angular neighbors of each direction\n(per shell) and compute the voxel-wise correlation.\nIf the angles or correlations to neighbors are below the shell average (by\nargs.std_scale x STD) it will flag the volume as a potential outlier.\n\nThis script supports multi-shells, but each shell is independant and detected\nusing the --b0_threshold parameter.\n\nThis script can be run before any processing to identify potential problem\nbefore launching pre-processing.\n\npositional arguments:\n in_dwi The DWI file (.nii) to concatenate.\n in_bval The b-values files in FSL format (.bval).\n in_bvec The b-vectors files in FSL format (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --std_scale STD_SCALE\n How many deviation from the mean are required to be considered an outlier. [2.0]\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dwi_extract_b0", + "docstring": "Extract B0s from DWI, based on the bval and bvec information.\n\nThe default behavior is to save the first b0 of the series.\n\nFormerly: scil_extract_b0.py", + "help": "usage: scil_dwi_extract_b0.py [-h]\n [--all | --mean | --cluster-mean | --cluster-first]\n [--block-size INT] [--single-image]\n [--b0_threshold thr] [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_b0\n\nExtract B0s from DWI, based on the bval and bvec information.\n\nThe default behavior is to save the first b0 of the series.\n\nFormerly: scil_extract_b0.py\n\npositional arguments:\n in_dwi DWI Nifti image.\n in_bval b-values filename, in FSL format (.bval).\n in_bvec b-values filename, in FSL format (.bvec).\n out_b0 Output b0 file(s).\n\noptions:\n -h, --help show this help message and exit\n --block-size INT, -s INT\n Load the data using this block size. Useful\n when the data is too large to be loaded in memory.\n --single-image If output b0 volume has multiple time points, only outputs a single \n image instead of a numbered series of images.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nOptions in the case of multiple b0s.:\n --all Extract all b0s. Index number will be appended to the output file.\n --mean Extract mean b0.\n --cluster-mean Extract mean of each continuous cluster of b0s.\n --cluster-first Extract first b0 of each continuous cluster of b0s.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dwi_extract_shell", + "docstring": "Extracts the DWI volumes that are on specific b-value shells. Many shells\ncan be extracted at once by specifying multiple b-values. The extracted\nvolumes are in the same order as in the original file.\n\nIf the b-values of a shell are not all identical, use the --tolerance\nargument to adjust the accepted interval. For example, a b-value of 2000\nand a tolerance of 20 will extract all volumes with a b-values from 1980 to\n2020.\n\nFiles that are too large to be loaded in memory can still be processed by\nsetting the --block-size argument. A block size of X means that X DWI volumes\nare loaded at a time for processing.\n\nFormerly: scil_extract_dwi_shell.py", + "help": "usage: scil_dwi_extract_shell.py [-h] [--out_indices OUT_INDICES]\n [--block-size INT] [--tolerance INT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_bvals_to_extract\n [in_bvals_to_extract ...] out_dwi out_bval\n out_bvec\n\nExtracts the DWI volumes that are on specific b-value shells. Many shells\ncan be extracted at once by specifying multiple b-values. The extracted\nvolumes are in the same order as in the original file.\n\nIf the b-values of a shell are not all identical, use the --tolerance\nargument to adjust the accepted interval. For example, a b-value of 2000\nand a tolerance of 20 will extract all volumes with a b-values from 1980 to\n2020.\n\nFiles that are too large to be loaded in memory can still be processed by\nsetting the --block-size argument. A block size of X means that X DWI volumes\nare loaded at a time for processing.\n\nFormerly: scil_extract_dwi_shell.py\n\npositional arguments:\n in_dwi The DW image file to split.\n in_bval The b-values file in FSL format (.bval).\n in_bvec The b-vectors file in FSL format (.bvec).\n in_bvals_to_extract The list of b-values to extract. For example 0 2000.\n out_dwi The name of the output DWI file.\n out_bval The name of the output b-value file (.bval).\n out_bvec The name of the output b-vector file (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --out_indices OUT_INDICES\n Optional filename for valid indices in input dwi volume\n --block-size INT, -s INT\n Loads the data using this block size. Useful\n when the data is too large to be loaded in memory.\n --tolerance INT, -t INT\n The tolerated gap between the b-values to extract\n and the actual b-values. [20]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dwi_powder_average", + "docstring": "Script to compute powder average (mean diffusion weighted image) from set of\ndiffusion images.\n\nBy default will output an average image calculated from all images with\nnon-zero bvalue.\n\nSpecify --bvalue to output an image for a single shell\n\nScript currently does not take into account the diffusion gradient directions\nbeing averaged.\n\nFormerly: scil_compute_powder_average.py", + "help": "usage: scil_dwi_powder_average.py [-h] [-f] [--mask file] [--b0_thr B0_THR]\n [--shells SHELLS [SHELLS ...]]\n [--shell_thr SHELL_THR]\n [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval out_avg\n\nScript to compute powder average (mean diffusion weighted image) from set of\ndiffusion images.\n\nBy default will output an average image calculated from all images with\nnon-zero bvalue.\n\nSpecify --bvalue to output an image for a single shell\n\nScript currently does not take into account the diffusion gradient directions\nbeing averaged.\n\nFormerly: scil_compute_powder_average.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n out_avg Path of the output file.\n\noptions:\n -h, --help show this help message and exit\n -f Force overwriting of the output files.\n --mask file Path to a binary mask.\n Only data inside the mask will be used for powder avg. (Default: None)\n --b0_thr B0_THR Exclude b0 volumes from powder average with bvalue less than specified threshold.\n (Default: remove volumes with bvalue < 50\n --shells SHELLS [SHELLS ...]\n bvalue (shells) to include in powder average passed as a list \n (e.g. --shells 1000 2000). If not specified will include all volumes with a non-zero bvalue.\n --shell_thr SHELL_THR\n Include volumes with bvalue +- the specified threshold.\n (Default: [50]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dwi_prepare_eddy_command", + "docstring": "Prepare a typical command for eddy and create the necessary files. When using\nmultiple acquisitions and/or opposite phase directions, images, b-values and\nb-vectors should be merged together using scil_dwi_concatenate.py. If using\ntopup prior to calling this script, images should be concatenated in the same\norder as the b0s used with prepare_topup.\n\nFormerly: scil_prepare_eddy_command.py", + "help": "usage: scil_dwi_prepare_eddy_command.py [-h] [--n_reverse N_REVERSE]\n [--topup TOPUP]\n [--topup_params TOPUP_PARAMS]\n [--eddy_cmd {eddy_openmp,eddy_cuda,eddy_cuda8.0,eddy_cuda9.1,eddy_cuda10.2,eddy,eddy_cpu}]\n [--b0_thr B0_THR]\n [--encoding_direction {x,y,z}]\n [--readout READOUT]\n [--slice_drop_correction]\n [--lsr_resampling]\n [--out_directory OUT_DIRECTORY]\n [--out_prefix OUT_PREFIX]\n [--out_script] [--fix_seed]\n [--eddy_options EDDY_OPTIONS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bvals in_bvecs in_mask\n\nPrepare a typical command for eddy and create the necessary files. When using\nmultiple acquisitions and/or opposite phase directions, images, b-values and\nb-vectors should be merged together using scil_dwi_concatenate.py. If using\ntopup prior to calling this script, images should be concatenated in the same\norder as the b0s used with prepare_topup.\n\nFormerly: scil_prepare_eddy_command.py\n\npositional arguments:\n in_dwi Input DWI Nifti image. If using multiple acquisition and/or opposite phase directions, please merge in the same order as for prepare_topup using scil_dwi_concatenate.py.\n in_bvals Input b-values file in FSL format.\n in_bvecs Input b-vectors file in FSL format.\n in_mask Binary brain mask.\n\noptions:\n -h, --help show this help message and exit\n --n_reverse N_REVERSE\n Number of reverse phase volumes included in the DWI image [0].\n --topup TOPUP Topup output name. If given, apply topup during eddy.\n Should be the same as --out_prefix from scil_dwi_prepare_topup_command.py.\n --topup_params TOPUP_PARAMS\n Parameters file (typically named acqparams) used to run topup.\n --eddy_cmd {eddy_openmp,eddy_cuda,eddy_cuda8.0,eddy_cuda9.1,eddy_cuda10.2,eddy,eddy_cpu}\n Eddy command [eddy_openmp].\n --b0_thr B0_THR All b-values with values less than or equal to b0_thr are considered\n as b0s i.e. without diffusion weighting [20].\n --encoding_direction {x,y,z}\n Acquisition direction, default is AP-PA [y].\n --readout READOUT Total readout time from the DICOM metadata [0.062].\n --slice_drop_correction\n If set, will activate eddy's outlier correction,\n which includes slice drop correction.\n --lsr_resampling Perform least-square resampling, allowing eddy to combine forward and reverse phase acquisitions for better reconstruction. Only works if directions and b-values are identical in both phase direction.\n --out_directory OUT_DIRECTORY\n Output directory for eddy files [.].\n --out_prefix OUT_PREFIX\n Prefix of the eddy-corrected DWI [dwi_eddy_corrected].\n --out_script If set, will output a .sh script (eddy.sh).\n else, will output the lines to the terminal [False].\n --fix_seed If set, will use the fixed seed strategy for eddy.\n Enhances reproducibility.\n --eddy_options EDDY_OPTIONS\n Additional options you want to use to run eddy.\n Add these options using quotes (i.e. \"--ol_nstd=6 --mb=4\").\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dwi_prepare_topup_command", + "docstring": "Prepare a typical command for topup and create the necessary files.\nThe reversed b0 must be in a different file.\n\nFormerly: scil_prepare_topup_command.py", + "help": "usage: scil_dwi_prepare_topup_command.py [-h] [--config CONFIG] [--synb0]\n [--encoding_direction {x,y,z}]\n [--readout READOUT]\n [--out_b0s OUT_B0S]\n [--out_directory OUT_DIRECTORY]\n [--out_prefix OUT_PREFIX]\n [--out_params OUT_PARAMS]\n [--out_script]\n [--topup_options TOPUP_OPTIONS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_forward_b0 in_reverse_b0\n\nPrepare a typical command for topup and create the necessary files.\nThe reversed b0 must be in a different file.\n\nFormerly: scil_prepare_topup_command.py\n\npositional arguments:\n in_forward_b0 Input b0 Nifti image with forward phase encoding.\n in_reverse_b0 Input b0 Nifti image with reversed phase encoding.\n\noptions:\n -h, --help show this help message and exit\n --config CONFIG Topup config file [b02b0.cnf].\n --synb0 If set, will use SyNb0 custom acqparams file.\n --encoding_direction {x,y,z}\n Acquisition direction of the forward b0 image, default is AP [y].\n --readout READOUT Total readout time from the DICOM metadata [0.062].\n --out_b0s OUT_B0S Output fused b0 file [fused_b0s.nii.gz].\n --out_directory OUT_DIRECTORY\n Output directory for topup files [.].\n --out_prefix OUT_PREFIX\n Prefix of the topup results [topup_results].\n --out_params OUT_PARAMS\n Filename for the acquisition parameters file [acqparams.txt].\n --out_script If set, will output a .sh script (topup.sh).\n else, will output the lines to the terminal [False].\n --topup_options TOPUP_OPTIONS\n Additional options you want to use to run topup.\n Add these options using quotes (i.e. \"--fwhm=6 --miter=4\").\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dwi_reorder_philips", + "docstring": "Re-order gradient according to original table (Philips)\nThis script is not needed for version 5.6 and higher\n\nFormerly: scil_reorder_dwi_philips.py", + "help": "usage: scil_dwi_reorder_philips.py [-h] [--json JSON]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_table\n out_basename\n\nRe-order gradient according to original table (Philips)\nThis script is not needed for version 5.6 and higher\n\nFormerly: scil_reorder_dwi_philips.py\n\npositional arguments:\n in_dwi Input dwi file.\n in_bval Input bval FSL format.\n in_bvec Input bvec FSL format.\n in_table Original philips table - first line is skipped.\n out_basename Basename output file.\n\noptions:\n -h, --help show this help message and exit\n --json JSON If you give a json file, it will check if you need to reorder your Philips dwi.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dwi_split_by_indices", + "docstring": "Splits the DWI image at certain indices along the last dimension (b-values).\nMany indices can be given at once by specifying multiple values. The splited\nvolumes are in the same order as in the original file. Also outputs the\ncorresponding .bval and .bvec files.\n\nThis script can be useful for splitting images at places where a b-value\nextraction does not work. For instance, if one wants to split the x first\nb-1500s from the rest of the b-1500s in an image, simply put x as an index.\n\nFormerly: scil_split_image.py", + "help": "usage: scil_dwi_split_by_indices.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_basename\n split_indices [split_indices ...]\n\nSplits the DWI image at certain indices along the last dimension (b-values).\nMany indices can be given at once by specifying multiple values. The splited\nvolumes are in the same order as in the original file. Also outputs the\ncorresponding .bval and .bvec files.\n\nThis script can be useful for splitting images at places where a b-value\nextraction does not work. For instance, if one wants to split the x first\nb-1500s from the rest of the b-1500s in an image, simply put x as an index.\n\nFormerly: scil_split_image.py\n\npositional arguments:\n in_dwi The DW image file to split.\n in_bval The b-values file in FSL format (.bval).\n in_bvec The b-vectors file in FSL format (.bvec).\n out_basename The basename of the output files. Indices number will be appended to out_basename. For example, if split_indices were 3 10, the files would be saved as out_basename_0_2, out_basename_3_10, out_basename_11_20, where the size of the last dimension is 21 in this example.\n split_indices The list of indices where to split the image. For example 3 10. This would split the image in three parts, such as [:3], [3:10], [10:]. Indices must be in increasing order.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_dwi_to_sh", + "docstring": "Script to compute the SH coefficient directly on the raw DWI signal.\n\nFormerly: scil_compute_sh_from_signal.py", + "help": "usage: scil_dwi_to_sh.py [-h] [--sh_order SH_ORDER]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--smooth SMOOTH] [--use_attenuation] [--mask MASK]\n [--b0_threshold thr] [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_sh\n\nScript to compute the SH coefficient directly on the raw DWI signal.\n\nFormerly: scil_compute_sh_from_signal.py\n\npositional arguments:\n in_dwi Path of the dwi volume.\n in_bval Path of the b-value file, in FSL format.\n in_bvec Path of the b-vector file, in FSL format.\n out_sh Name of the output SH file to save.\n\noptions:\n -h, --help show this help message and exit\n --sh_order SH_ORDER SH order to fit (int). [4]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --smooth SMOOTH Lambda-regularization coefficient in the SH fit (float). [0.006]\n --use_attenuation If set, will use signal attenuation before fitting the SH (i.e. divide by the b0).\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computations and reconstruction \n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_fodf_max_in_ventricles", + "docstring": "Script to compute the maximum fODF in the ventricles. The ventricules are\nestimated from an MD and FA threshold.\n\nThis allows to clip the noise of fODF using an absolute thresold.\n\nFormerly: scil_compute_fodf_max_in_ventricles.py", + "help": "usage: scil_fodf_max_in_ventricles.py [-h] [--fa_threshold FA_THRESHOLD]\n [--md_threshold MD_THRESHOLD]\n [--max_value_output file]\n [--mask_output file] [--small_dims]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n fODFs FA MD\n\nScript to compute the maximum fODF in the ventricles. The ventricules are\nestimated from an MD and FA threshold.\n\nThis allows to clip the noise of fODF using an absolute thresold.\n\nFormerly: scil_compute_fodf_max_in_ventricles.py\n\npositional arguments:\n fODFs Path of the fODF volume in spherical harmonics (SH).\n FA Path to the FA volume.\n MD Path to the mean diffusivity (MD) volume.\n\noptions:\n -h, --help show this help message and exit\n --fa_threshold FA_THRESHOLD\n Maximal threshold of FA (voxels under that threshold are considered \n for evaluation. [0.1]).\n --md_threshold MD_THRESHOLD\n Minimal threshold of MD in mm2/s (voxels above that threshold are \n considered for evaluation. [0.003]).\n --max_value_output file\n Output path for the text file containing the value. If not set the \n file will not be saved.\n --mask_output file Output path for the ventricule mask. If not set, the mask \n will not be saved.\n --small_dims If set, takes the full range of data to search the max fodf amplitude \n in ventricles. Useful when the data has small dimensions.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Dell'Acqua, Flavio, et al. \"Can spherical deconvolution provide more\n information than fiber orientations? Hindrance modulated orientational\n anisotropy, a true-tract specific index to characterize white matter\n diffusion.\" Human brain mapping 34.10 (2013): 2464-2483.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_fodf_memsmt", + "docstring": "Script to compute multi-encoding multi-shell multi-tissue (memsmt)\nConstrained Spherical Deconvolution ODFs.\n\nIn order to operate, the script only needs the data from one type of b-tensor\nencoding. However, giving only a spherical one will not produce good fODFs, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable.\n\nAll of `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\n>>> scil_fodf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_fodf.py", + "help": "usage: scil_fodf_memsmt.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals\n IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS\n [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5}\n [{0,1,-0.5,0.5} ...] [--sh_order int] [--mask MASK]\n [--tolerance tol] [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--not_all] [--wm_out_fODF file]\n [--gm_out_fODF file] [--csf_out_fODF file]\n [--vf file] [--vf_rgb file]\n in_wm_frf in_gm_frf in_csf_frf\n\nScript to compute multi-encoding multi-shell multi-tissue (memsmt)\nConstrained Spherical Deconvolution ODFs.\n\nIn order to operate, the script only needs the data from one type of b-tensor\nencoding. However, giving only a spherical one will not produce good fODFs, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable.\n\nAll of `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\n>>> scil_fodf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_fodf.py\n\npositional arguments:\n in_wm_frf Text file of WM response function.\n in_gm_frf Text file of GM response function.\n in_csf_frf Text file of CSF response function.\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n Path to the input diffusion volume for each b-tensor encoding type.\n --in_bvals IN_BVALS [IN_BVALS ...]\n Path to the bval file, in FSL format, for each b-tensor encoding type.\n --in_bvecs IN_BVECS [IN_BVECS ...]\n Path to the bvec file, in FSL format, for each b-tensor encoding type.\n --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...]\n Value of b_delta for each b-tensor encoding type, in the same order as dwi, bval and bvec inputs.\n --sh_order int SH order used for the CSD. (Default: 8)\n --mask MASK Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --not_all If set, only saves the files specified using the file flags. (Default: False)\n\nFile flags:\n --wm_out_fODF file Output filename for the WM fODF coefficients.\n --gm_out_fODF file Output filename for the GM fODF coefficients.\n --csf_out_fODF file Output filename for the CSF fODF coefficients.\n --vf file Output filename for the volume fractions map.\n --vf_rgb file Output filename for the volume fractions map in rgb.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_fodf_metrics", + "docstring": "Script to compute the maximum Apparent Fiber Density (AFD), the fiber ODFs\norientations, values and indices (peaks, peak_values, peak_indices), the Number\nof Fiber Orientations (NuFO) maps from fiber ODFs and the RGB map.\n\nAFD_max map is the maximal fODF amplitude for each voxel.\n\nNuFO is the the number of maxima of the fODF with an ABSOLUTE amplitude above\nthe threshold set using --at, AND an amplitude above the RELATIVE threshold\nset using --rt.\n\nThe --at argument should be set to a value which is 1.5 times the maximal\nvalue of the fODF in the ventricules. This can be obtained with the\nscil_fodf_max_in_ventricles.py script.\n\nIf the --abs_peaks_and_values argument is set, the peaks are all normalized\nand the peak_values are equal to the actual fODF amplitude of the peaks. By\ndefault, the script max-normalizes the peak_values for each voxel and\nmultiplies the peaks by peak_values.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Raffelt et al. NeuroImage 2012] and [Dell'Acqua et al HBM 2013] for the\ndefinitions.\n\nFormerly: scil_compute_fodf_metrics.py", + "help": "usage: scil_fodf_metrics.py [-h] [--sphere string] [--mask] [--at A_THRESHOLD]\n [--rt R_THRESHOLD] [--abs_peaks_and_values]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [--processes NBR] [-f]\n [--not_all] [--afd_max file] [--afd_total file]\n [--afd_sum file] [--nufo file] [--rgb file]\n [--peaks file] [--peak_values file]\n [--peak_indices file]\n in_fODF\n\nScript to compute the maximum Apparent Fiber Density (AFD), the fiber ODFs\norientations, values and indices (peaks, peak_values, peak_indices), the Number\nof Fiber Orientations (NuFO) maps from fiber ODFs and the RGB map.\n\nAFD_max map is the maximal fODF amplitude for each voxel.\n\nNuFO is the the number of maxima of the fODF with an ABSOLUTE amplitude above\nthe threshold set using --at, AND an amplitude above the RELATIVE threshold\nset using --rt.\n\nThe --at argument should be set to a value which is 1.5 times the maximal\nvalue of the fODF in the ventricules. This can be obtained with the\nscil_fodf_max_in_ventricles.py script.\n\nIf the --abs_peaks_and_values argument is set, the peaks are all normalized\nand the peak_values are equal to the actual fODF amplitude of the peaks. By\ndefault, the script max-normalizes the peak_values for each voxel and\nmultiplies the peaks by peak_values.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Raffelt et al. NeuroImage 2012] and [Dell'Acqua et al HBM 2013] for the\ndefinitions.\n\nFormerly: scil_compute_fodf_metrics.py\n\npositional arguments:\n in_fODF Path of the fODF volume in spherical harmonics (SH).\n\noptions:\n -h, --help show this help message and exit\n --sphere string Discrete sphere to use in the processing [repulsion724].\n --mask Path to a binary mask. Only the data inside the mask\n will beused for computations and reconstruction [None].\n --at A_THRESHOLD Absolute threshold on fODF amplitude. This value should be set to\n approximately 1.5 to 2 times the maximum fODF amplitude in isotropic voxels\n (ie. ventricles).\n Use scil_fodf_max_in_ventricles.py to find the maximal value.\n See [Dell'Acqua et al HBM 2013] [0.0].\n --rt R_THRESHOLD Relative threshold on fODF amplitude in percentage [0.1].\n --abs_peaks_and_values\n If set, the peak_values are not max-normalized for each voxel, \n but rather they keep the actual fODF amplitude of the peaks. \n Also, the peaks are given as unit directions instead of being proportional to peak_values. [False]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -f Force overwriting of the output files.\n --not_all If set, only saves the files specified using the file flags [False].\n\nFile flags:\n --afd_max file Output filename for the AFD_max map.\n --afd_total file Output filename for the AFD_total map(SH coeff = 0).\n --afd_sum file Output filename for the sum of all peak contributions\n (sum of fODF lobes on the sphere).\n --nufo file Output filename for the NuFO map.\n --rgb file Output filename for the RGB map.\n --peaks file Output filename for the extracted peaks.\n --peak_values file Output filename for the extracted peaks values.\n --peak_indices file Output filename for the generated peaks indices on the sphere.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_fodf_msmt", + "docstring": "Script to compute Multishell Multi-tissue Constrained Spherical Deconvolution\nODFs.\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical\ndeconvolution for improved analysis of multi-shell diffusion\nMRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_fodf.py", + "help": "usage: scil_fodf_msmt.py [-h] [--sh_order int] [--mask] [--tolerance tol]\n [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [--not_all] [--wm_out_fODF file]\n [--gm_out_fODF file] [--csf_out_fODF file]\n [--vf file] [--vf_rgb file]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_wm_frf in_gm_frf in_csf_frf\n\nScript to compute Multishell Multi-tissue Constrained Spherical Deconvolution\nODFs.\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical\ndeconvolution for improved analysis of multi-shell diffusion\nMRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_fodf.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bval file, in FSL format.\n in_bvec Path of the bvec file, in FSL format.\n in_wm_frf Text file of WM response function.\n in_gm_frf Text file of GM response function.\n in_csf_frf Text file of CSF response function.\n\noptions:\n -h, --help show this help message and exit\n --sh_order int SH order used for the CSD. (Default: 8)\n --mask Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --not_all If set, only saves the files specified using the file flags. (Default: False)\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nFile flags:\n --wm_out_fODF file Output filename for the WM fODF coefficients.\n --gm_out_fODF file Output filename for the GM fODF coefficients.\n --csf_out_fODF file Output filename for the CSF fODF coefficients.\n --vf file Output filename for the volume fractions map.\n --vf_rgb file Output filename for the volume fractions map in rgb.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_fodf_ssst", + "docstring": "Script to compute Constrained Spherical Deconvolution (CSD) fiber ODFs.\n\nSee [Tournier et al. NeuroImage 2007]\n\nFormerly: scil_compute_ssst_fodf.py", + "help": "usage: scil_fodf_ssst.py [-h] [--sh_order int] [--mask] [--b0_threshold thr]\n [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec frf_file out_fODF\n\nScript to compute Constrained Spherical Deconvolution (CSD) fiber ODFs.\n\nSee [Tournier et al. NeuroImage 2007]\n\nFormerly: scil_compute_ssst_fodf.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n frf_file Path of the FRF file\n out_fODF Output path for the fiber ODF coefficients.\n\noptions:\n -h, --help show this help message and exit\n --sh_order int SH order used for the CSD. (Default: 8)\n --mask Path to a binary mask. Only the data inside the mask will be used \n for computations and reconstruction.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_fodf_to_bingham", + "docstring": "Script for fitting a Bingham distribution to each fODF lobe, as described\nin [1].\n\nThe Bingham fit is saved, with each Bingham distribution described by 7\ncoefficients (for example, for a maximum number of lobes of 5, the number\nof coefficients is 7 x 5 = 35 -- less than the number of coefficients for\nSH of maximum order 8).\n\nUsing 12 threads, the execution takes approximately 30 minutes for a brain with\n1mm isotropic resolution.\n\nFormerly: scil_fit_bingham_to_fodf.py", + "help": "usage: scil_fodf_to_bingham.py [-h] [--max_lobes MAX_LOBES] [--at AT]\n [--rt RT] [--min_sep_angle MIN_SEP_ANGLE]\n [--max_fit_angle MAX_FIT_ANGLE] [--mask MASK]\n [-v [{DEBUG,INFO,WARNING}]] [--processes NBR]\n [-f]\n in_sh out_bingham\n\nScript for fitting a Bingham distribution to each fODF lobe, as described\nin [1].\n\nThe Bingham fit is saved, with each Bingham distribution described by 7\ncoefficients (for example, for a maximum number of lobes of 5, the number\nof coefficients is 7 x 5 = 35 -- less than the number of coefficients for\nSH of maximum order 8).\n\nUsing 12 threads, the execution takes approximately 30 minutes for a brain with\n1mm isotropic resolution.\n\nFormerly: scil_fit_bingham_to_fodf.py\n\npositional arguments:\n in_sh Input SH image.\n out_bingham Output Bingham functions image.\n\noptions:\n -h, --help show this help message and exit\n --max_lobes MAX_LOBES\n Maximum number of lobes per voxel to extract. [5]\n --at AT Absolute threshold for peaks extraction. [0.0]\n --rt RT Relative threshold for peaks extraction. [0.1]\n --min_sep_angle MIN_SEP_ANGLE\n Minimum separation angle between two peaks. [25.0]\n --max_fit_angle MAX_FIT_ANGLE\n Maximum distance in degrees around a peak direction for fitting the Bingham function. [15.0]\n --mask MASK Optional mask file. Only SH inside the mask are fitted.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -f Force overwriting of the output files.\n\n[1] T. W. Riffert, J. Schreiber, A. Anwander, and T. R. Kn\u00f6sche, \u201cBeyond\n fractional anisotropy: Extraction of bundle-specific structural metrics\n from crossing fiber models,\u201d NeuroImage, vol. 100, pp. 176-191, Oct. 2014,\n doi: 10.1016/j.neuroimage.2014.06.015.\n\n[2] J. Schreiber, T. Riffert, A. Anwander, and T. R. Kn\u00f6sche, \u201cPlausibility\n Tracking: A method to evaluate anatomical connectivity and microstructural\n properties along fiber pathways,\u201d NeuroImage, vol. 90, pp. 163-178, Apr.\n 2014, doi: 10.1016/j.neuroimage.2014.01.002.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_freewater_maps", + "docstring": "Compute Free Water maps [1] using AMICO.\nThis script supports both single and multi-shell data.\n\nFormerly: scil_compute_freewater.py", + "help": "usage: scil_freewater_maps.py [-h] [--mask MASK] [--out_dir OUT_DIR]\n [--b_thr B_THR] [--para_diff PARA_DIFF]\n [--iso_diff ISO_DIFF]\n [--perp_diff_min PERP_DIFF_MIN]\n [--perp_diff_max PERP_DIFF_MAX]\n [--lambda1 LAMBDA1] [--lambda2 LAMBDA2]\n [--save_kernels DIRECTORY | --load_kernels DIRECTORY]\n [--compute_only] [--mouse] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec\n\nCompute Free Water maps [1] using AMICO.\nThis script supports both single and multi-shell data.\n\nFormerly: scil_compute_freewater.py\n\npositional arguments:\n in_dwi DWI file.\n in_bval b-values filename, in FSL format (.bval).\n in_bvec b-vectors filename, in FSL format (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Brain mask filename.\n --out_dir OUT_DIR Output directory for the Free Water results. [results]\n --b_thr B_THR Limit value to consider that a b-value is on an\n existing shell. Above this limit, the b-value is\n placed on a new shell. This includes b0s values.\n --mouse If set, use mouse fitting profile.\n --processes NBR Number of sub-processes to start. Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n\nModel options:\n --para_diff PARA_DIFF\n Axial diffusivity (AD) in the CC. [0.0015]\n --iso_diff ISO_DIFF Mean diffusivity (MD) in ventricles. [0.003]\n --perp_diff_min PERP_DIFF_MIN\n Radial diffusivity (RD) minimum. [0.0001]\n --perp_diff_max PERP_DIFF_MAX\n Radial diffusivity (RD) maximum. [0.0007]\n --lambda1 LAMBDA1 First regularization parameter. [0.0]\n --lambda2 LAMBDA2 Second regularization parameter. [0.25]\n\nKernels options:\n --save_kernels DIRECTORY\n Output directory for the COMMIT kernels.\n --load_kernels DIRECTORY\n Input directory where the COMMIT kernels are located.\n --compute_only Compute kernels only, --save_kernels must be used.\n\nReference:\n [1] Pasternak 0, Sochen N, Gur Y, Intrator N, Assaf Y.\n Free water elimination and mapping from diffusion mri.\n Magn Reson Med. 62 (3) (2009) 717-730.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_freewater_priors", + "docstring": "Synonym for scil_NODDI_priors.py", + "help": "usage: scil_freewater_priors.py [-h]\n [--fa_min_single_fiber FA_MIN_SINGLE_FIBER]\n [--fa_max_ventricles FA_MAX_VENTRICLES]\n [--md_min_ventricles MD_MIN_VENTRICLES]\n [--roi_radius ROI_RADIUS]\n [--roi_center pos pos pos]\n [--out_txt_1fiber_para FILE]\n [--out_txt_1fiber_perp FILE]\n [--out_mask_1fiber FILE]\n [--out_txt_ventricles FILE]\n [--out_mask_ventricles FILE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_FA in_AD in_RD in_MD\n\nCompute the axial (para_diff), radial (perp_diff), and mean (iso_diff)\ndiffusivity priors for NODDI.\n\nFormerly: scil_compute_NODDI_priors.py\n\npositional arguments:\n in_FA Path to the FA volume.\n in_AD Path to the axial diffusivity (AD) volume.\n in_RD Path to the radial diffusivity (RD) volume.\n in_MD Path to the mean diffusivity (MD) volume.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics options:\n --fa_min_single_fiber FA_MIN_SINGLE_FIBER\n Minimal threshold of FA (voxels above that threshold are considered in \n the single fiber mask). [0.7]\n --fa_max_ventricles FA_MAX_VENTRICLES\n Maximal threshold of FA (voxels under that threshold are considered in \n the ventricles). [0.1]\n --md_min_ventricles MD_MIN_VENTRICLES\n Minimal threshold of MD in mm2/s (voxels above that threshold are considered \n for in the ventricles). [0.003]\n\nRegions options:\n --roi_radius ROI_RADIUS\n Radius of the region used to estimate the priors. The roi will be a cube spanning \n from ROI_CENTER in each direction. [20]\n --roi_center pos pos pos\n Center of the roi of size roi_radius used to estimate the priors; a 3-value coordinate. \n If not set, uses the center of the 3D volume.\n\nOutputs:\n --out_txt_1fiber_para FILE\n Output path for the text file containing the single fiber average value of AD.\n If not set, the file will not be saved.\n --out_txt_1fiber_perp FILE\n Output path for the text file containing the single fiber average value of RD.\n If not set, the file will not be saved.\n --out_mask_1fiber FILE\n Output path for single fiber mask. If not set, the mask will not be saved.\n --out_txt_ventricles FILE\n Output path for the text file containing the ventricles average value of MD.\n If not set, the file will not be saved.\n --out_mask_ventricles FILE\n Output path for the ventricule mask.\n If not set, the mask will not be saved.\n\nReference:\n [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC.\n NODDI: practical in vivo neurite orientation dispersion and density\n imaging of the human brain. NeuroImage. 2012 Jul 16;61:1000-16.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_frf_mean", + "docstring": "Compute the mean Fiber Response Function from a set of individually\ncomputed Response Functions.\n\nThe FRF files are obtained from scil_frf_ssst.py, scil_frf_msmt.py in the\ncase of multi-shell data or scil_frf_memsmt.py in the case of multi-encoding\nmulti-shell data.\n\nFormerly: scil_compute_mean_frf.py", + "help": "usage: scil_frf_mean.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n list [list ...] file\n\nCompute the mean Fiber Response Function from a set of individually\ncomputed Response Functions.\n\nThe FRF files are obtained from scil_frf_ssst.py, scil_frf_msmt.py in the\ncase of multi-shell data or scil_frf_memsmt.py in the case of multi-encoding\nmulti-shell data.\n\nFormerly: scil_compute_mean_frf.py\n\npositional arguments:\n list List of FRF filepaths.\n file Path of the output mean FRF file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_frf_memsmt", + "docstring": "Script to estimate response functions for multi-encoding multi-shell\nmulti-tissue (memsmt) constrained spherical deconvolution. In order to operate,\nthe script only needs the data from one type of b-tensor encoding. However,\ngiving only a spherical one will not produce good fiber response functions, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable. Moreover, all of `--in_dwis`, `--in_bvals`, `--in_bvecs` and\n`--in_bdeltas` must have the same number of arguments. Be sure to keep the\nsame order of encodings throughout all these inputs and to set `--in_bdeltas`\naccordingly (IMPORTANT).\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n\nIn the wm, we compute the response function in each voxels where\nthe FA is superior at threshold_fa_wm.\n\nIn the gm (or csf), we compute the response function in each voxels where\nthe FA is below at threshold_fa_gm (or threshold_fa_csf) and where\nthe MD is below threshold_md_gm (or threshold_md_csf).\n\n>>> scil_frf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_frf.py", + "help": "usage: scil_frf_memsmt.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals\n IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS\n [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5}\n [{0,1,-0.5,0.5} ...] [--mask MASK]\n [--mask_wm MASK_WM] [--mask_gm MASK_GM]\n [--mask_csf MASK_CSF] [--fa_thr_wm FA_THR_WM]\n [--fa_thr_gm FA_THR_GM] [--fa_thr_csf FA_THR_CSF]\n [--md_thr_gm MD_THR_GM] [--md_thr_csf MD_THR_CSF]\n [--min_nvox MIN_NVOX] [--tolerance tol]\n [--skip_b0_check] [--dti_bval_limit DTI_BVAL_LIMIT]\n [--roi_radii ROI_RADII [ROI_RADII ...]]\n [--roi_center tuple(3) tuple(3) tuple(3)]\n [--wm_frf_mask file] [--gm_frf_mask file]\n [--csf_frf_mask file] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n out_wm_frf out_gm_frf out_csf_frf\n\nScript to estimate response functions for multi-encoding multi-shell\nmulti-tissue (memsmt) constrained spherical deconvolution. In order to operate,\nthe script only needs the data from one type of b-tensor encoding. However,\ngiving only a spherical one will not produce good fiber response functions, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable. Moreover, all of `--in_dwis`, `--in_bvals`, `--in_bvecs` and\n`--in_bdeltas` must have the same number of arguments. Be sure to keep the\nsame order of encodings throughout all these inputs and to set `--in_bdeltas`\naccordingly (IMPORTANT).\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n\nIn the wm, we compute the response function in each voxels where\nthe FA is superior at threshold_fa_wm.\n\nIn the gm (or csf), we compute the response function in each voxels where\nthe FA is below at threshold_fa_gm (or threshold_fa_csf) and where\nthe MD is below threshold_md_gm (or threshold_md_csf).\n\n>>> scil_frf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_frf.py\n\npositional arguments:\n out_wm_frf Path to the output WM frf file, in .txt format.\n out_gm_frf Path to the output GM frf file, in .txt format.\n out_csf_frf Path to the output CSF frf file, in .txt format.\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n Path to the input diffusion volume for each b-tensor encoding type.\n --in_bvals IN_BVALS [IN_BVALS ...]\n Path to the bval file, in FSL format, for each b-tensor encoding type.\n --in_bvecs IN_BVECS [IN_BVECS ...]\n Path to the bvec file, in FSL format, for each b-tensor encoding type.\n --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...]\n Value of b_delta for each b-tensor encoding type, in the same order as \n dwi, bval and bvec inputs.\n --mask MASK Path to a binary mask. Only the data inside the mask will be used for\n computations and reconstruction. Useful if no tissue masks are available.\n --mask_wm MASK_WM Path to the input WM mask file, used to improve the final WM frf mask.\n --mask_gm MASK_GM Path to the input GM mask file, used to improve the final GM frf mask.\n --mask_csf MASK_CSF Path to the input CSF mask file, used to improve the final CSF frf mask.\n --fa_thr_wm FA_THR_WM\n If supplied, use this threshold to select single WM fiber voxels from \n the FA inside the WM mask defined by mask_wm. \n Each voxel above this threshold will be selected. [0.7]\n --fa_thr_gm FA_THR_GM\n If supplied, use this threshold to select GM voxels from the FA inside \n the GM mask defined by mask_gm. \n Each voxel below this threshold will be selected. [0.2]\n --fa_thr_csf FA_THR_CSF\n If supplied, use this threshold to select CSF voxels from the FA inside \n the CSF mask defined by mask_csf. \n Each voxel below this threshold will be selected. [0.1]\n --md_thr_gm MD_THR_GM\n If supplied, use this threshold to select GM voxels from the MD inside \n the GM mask defined by mask_gm. \n Each voxel below this threshold will be selected. [0.0007]\n --md_thr_csf MD_THR_CSF\n If supplied, use this threshold to select CSF voxels from the MD inside \n the CSF mask defined by mask_csf. \n Each voxel below this threshold will be selected. [0.003]\n --min_nvox MIN_NVOX Minimal number of voxels needed for each tissue masks in order to \n proceed to frf estimation. [100]\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --dti_bval_limit DTI_BVAL_LIMIT\n The highest b-value taken for the DTI model. [1200]\n --roi_radii ROI_RADII [ROI_RADII ...]\n If supplied, use those radii to select a cuboid roi to estimate the \n response functions. The roi will be a cuboid spanning from the middle of \n the volume in each direction with the different radii. The type is either \n an int (e.g. --roi_radii 10) or an array-like (3,) (e.g. --roi_radii 20 30 10). [[20]]\n --roi_center tuple(3) tuple(3) tuple(3)\n If supplied, use this center to span the cuboid roi using roi_radii. \n [center of the 3D volume] (e.g. --roi_center 66 79 79)\n --wm_frf_mask file Path to the output WM frf mask file, the voxels used to compute the WM frf.\n --gm_frf_mask file Path to the output GM frf mask file, the voxels used to compute the GM frf.\n --csf_frf_mask file Path to the output CSF frf mask file, the voxels used to compute the CSF frf.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_frf_msmt", + "docstring": "Compute response functions for multi-shell multi-tissue (MSMT) constrained\nspherical deconvolution from DWI data.\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n - In the wm, we compute the response function in each voxel where the FA is\n superior at threshold_fa_wm.\n - In the gm (or csf), we compute the response function in each voxel where\n the FA is below at threshold_fa_gm (or threshold_fa_csf) and where the MD\n is below threshold_md_gm (or threshold_md_csf).\n\nWe output one response function file for each tissue, containing the response\nfunction for each b-value (arranged by lines). These are saved as the diagonal\nof the axis-symmetric diffusion tensor (3 e-values) and a mean b0 value.\nFor example, a typical wm_frf is [15e-4, 4e-4, 4e-4, 700], where the tensor\ne-values are (15,4,4)x10^-4 mm^2/s and the mean b0 is 700.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical deconvolution\nfor improved analysis of multi-shell diffusion MRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_frf.py", + "help": "usage: scil_frf_msmt.py [-h] [--mask MASK] [--mask_wm MASK_WM]\n [--mask_gm MASK_GM] [--mask_csf MASK_CSF]\n [--fa_thr_wm FA_THR_WM] [--fa_thr_gm FA_THR_GM]\n [--fa_thr_csf FA_THR_CSF] [--md_thr_gm MD_THR_GM]\n [--md_thr_csf MD_THR_CSF] [--min_nvox MIN_NVOX]\n [--tolerance TOLERANCE] [--skip_b0_check]\n [--dti_bval_limit DTI_BVAL_LIMIT]\n [--roi_radii ROI_RADII [ROI_RADII ...]]\n [--roi_center tuple(3) tuple(3) tuple(3)]\n [--wm_frf_mask file] [--gm_frf_mask file]\n [--csf_frf_mask file] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_wm_frf out_gm_frf\n out_csf_frf\n\nCompute response functions for multi-shell multi-tissue (MSMT) constrained\nspherical deconvolution from DWI data.\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n - In the wm, we compute the response function in each voxel where the FA is\n superior at threshold_fa_wm.\n - In the gm (or csf), we compute the response function in each voxel where\n the FA is below at threshold_fa_gm (or threshold_fa_csf) and where the MD\n is below threshold_md_gm (or threshold_md_csf).\n\nWe output one response function file for each tissue, containing the response\nfunction for each b-value (arranged by lines). These are saved as the diagonal\nof the axis-symmetric diffusion tensor (3 e-values) and a mean b0 value.\nFor example, a typical wm_frf is [15e-4, 4e-4, 4e-4, 700], where the tensor\ne-values are (15,4,4)x10^-4 mm^2/s and the mean b0 is 700.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical deconvolution\nfor improved analysis of multi-shell diffusion MRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_frf.py\n\npositional arguments:\n in_dwi Path to the input diffusion volume.\n in_bval Path to the bval file, in FSL format.\n in_bvec Path to the bvec file, in FSL format.\n out_wm_frf Path to the output WM frf file, in .txt format.\n out_gm_frf Path to the output GM frf file, in .txt format.\n out_csf_frf Path to the output CSF frf file, in .txt format.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to a binary mask. Only the data inside the mask\n will be used for computations and reconstruction.\n Useful if no tissue masks are available.\n --mask_wm MASK_WM Path to the input WM mask file, used to improve the\n final WM frf mask.\n --mask_gm MASK_GM Path to the input GM mask file, used to improve the\n final GM frf mask.\n --mask_csf MASK_CSF Path to the input CSF mask file, used to improve the\n final CSF frf mask.\n --fa_thr_wm FA_THR_WM\n If supplied, use this threshold to select single WM\n fiber voxels from the FA inside the WM mask defined by\n mask_wm. Each voxel above this threshold will be\n selected. [0.7]\n --fa_thr_gm FA_THR_GM\n If supplied, use this threshold to select GM voxels\n from the FA inside the GM mask defined by mask_gm.\n Each voxel below this threshold will be selected.\n [0.2]\n --fa_thr_csf FA_THR_CSF\n If supplied, use this threshold to select CSF voxels\n from the FA inside the CSF mask defined by mask_csf.\n Each voxel below this threshold will be selected.\n [0.1]\n --md_thr_gm MD_THR_GM\n If supplied, use this threshold to select GM voxels\n from the MD inside the GM mask defined by mask_gm.\n Each voxel below this threshold will be selected.\n [0.0007]\n --md_thr_csf MD_THR_CSF\n If supplied, use this threshold to select CSF voxels\n from the MD inside the CSF mask defined by mask_csf.\n Each voxel below this threshold will be selected.\n [0.003]\n --min_nvox MIN_NVOX Minimal number of voxels needed for each tissue masks\n in order to proceed to frf estimation. [100]\n --tolerance TOLERANCE\n The tolerated gap between the b-values to extract and\n the current b-value. [20]\n --skip_b0_check By default, we supervise that at least one b0 exists\n in your data (i.e. b-values below the default\n --tolerance). Use this option to allow continuing even\n if the minimum b-value is suspiciously high. Use with\n care, and only if you understand your data.\n --dti_bval_limit DTI_BVAL_LIMIT\n The highest b-value taken for the DTI model. [1200]\n --roi_radii ROI_RADII [ROI_RADII ...]\n If supplied, use those radii to select a cuboid roi to\n estimate the response functions. The roi will be a\n cuboid spanning from the middle of the volume in each\n direction with the different radii. The type is either\n an int (e.g. --roi_radii 10) or an array-like (3,)\n (e.g. --roi_radii 20 30 10). [[20]]\n --roi_center tuple(3) tuple(3) tuple(3)\n If supplied, use this center to span the cuboid roi\n using roi_radii. [center of the 3D volume] (e.g.\n --roi_center 66 79 79)\n --wm_frf_mask file Path to the output WM frf mask file, the voxels used\n to compute the WM frf.\n --gm_frf_mask file Path to the output GM frf mask file, the voxels used\n to compute the GM frf.\n --csf_frf_mask file Path to the output CSF frf mask file, the voxels used\n to compute the CSF frf.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_frf_set_diffusivities", + "docstring": "Replace the fiber response function in the FRF file.\nUse this script when you want to use a fixed response function\nand keep the mean b0.\n\nThe FRF file is obtained from scil_frf_ssst.py or scil_frf_msmt.py in the case\nof multi-shell data.\n\nFormerly: scil_set_response_function.py", + "help": "usage: scil_frf_set_diffusivities.py [-h] [--no_factor]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n input new_frf output\n\nReplace the fiber response function in the FRF file.\nUse this script when you want to use a fixed response function\nand keep the mean b0.\n\nThe FRF file is obtained from scil_frf_ssst.py or scil_frf_msmt.py in the case\nof multi-shell data.\n\nFormerly: scil_set_response_function.py\n\npositional arguments:\n input Path of the FRF file.\n new_frf New response function given as a tuple. We will replace the \n response function in frf_file with this fiber response \n function x 10**-4 (e.g. 15,4,4). \n If multi-shell, write the first shell,then the second shell, \n and the third, etc. (e.g. 15,4,4,13,5,5,12,5,5).\n output Path of the new FRF file.\n\noptions:\n -h, --help show this help message and exit\n --no_factor If supplied, the fiber response function is\n evaluated without the x 10**-4 factor. [False].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_frf_ssst", + "docstring": "Compute a single Fiber Response Function from a DWI.\n\nA DTI fit is made, and voxels containing a single fiber population are\nfound using a threshold on the FA.\n\nFormerly: scil_compute_ssst_frf.py", + "help": "usage: scil_frf_ssst.py [-h] [--mask MASK] [--mask_wm MASK_WM]\n [--fa_thresh FA_THRESH]\n [--min_fa_thresh MIN_FA_THRESH] [--min_nvox MIN_NVOX]\n [--roi_radii ROI_RADII [ROI_RADII ...]]\n [--roi_center tuple(3) tuple(3) tuple(3)]\n [--b0_threshold thr] [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec frf_file\n\nCompute a single Fiber Response Function from a DWI.\n\nA DTI fit is made, and voxels containing a single fiber population are\nfound using a threshold on the FA.\n\nFormerly: scil_compute_ssst_frf.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n frf_file Path to the output FRF file, in .txt format, saved by Numpy.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to a binary mask. Only the data inside the mask will be used \n for computations and reconstruction. Useful if no white matter mask \n is available.\n --mask_wm MASK_WM Path to a binary white matter mask. Only the data inside this mask \n and above the threshold defined by --fa_thresh will be used to estimate the \n fiber response function.\n --fa_thresh FA_THRESH\n If supplied, use this threshold as the initial threshold to select \n single fiber voxels. [0.7]\n --min_fa_thresh MIN_FA_THRESH\n If supplied, this is the minimal value that will be tried when looking \n for single fiber voxels. [0.5]\n --min_nvox MIN_NVOX Minimal number of voxels needing to be identified as single fiber voxels \n in the automatic estimation. [300]\n --roi_radii ROI_RADII [ROI_RADII ...]\n If supplied, use those radii to select a cuboid roi to estimate the \n response functions. The roi will be a cuboid spanning from the middle of \n the volume in each direction with the different radii. The type is either \n an int (e.g. --roi_radii 10) or an array-like (3,) (e.g. --roi_radii 20 30 10). [[20]]\n --roi_center tuple(3) tuple(3) tuple(3)\n If supplied, use this center to span the roi of size roi_radius. [center of the 3D volume]\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences: [1] Tournier et al. NeuroImage 2007\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_get_version", + "docstring": "Give you information about your current scilpy installation.\nThis is useful for non-developers to give you the information\nneeded to reproduce your results, or to help debugging.\n\nIf you are experiencing a bug, please run this script and\nsend the output to the scilpy developers.", + "help": "usage: scil_get_version.py [-h] [--show_dependencies]\n [-v [{DEBUG,INFO,WARNING}]]\n\nGive you information about your current scilpy installation.\nThis is useful for non-developers to give you the information\nneeded to reproduce your results, or to help debugging.\n\nIf you are experiencing a bug, please run this script and\nsend the output to the scilpy developers.\n\noptions:\n -h, --help show this help message and exit\n --show_dependencies Show the dependencies of scilpy.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_gradients_apply_transform", + "docstring": "Transform bvecs using an affine/rigid transformation.\n\nFormerly: scil_apply_transform_to_bvecs.py.", + "help": "usage: scil_gradients_apply_transform.py [-h] [--inverse]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bvecs in_transfo out_bvecs\n\nTransform bvecs using an affine/rigid transformation.\n\nFormerly: scil_apply_transform_to_bvecs.py.\n\npositional arguments:\n in_bvecs Path of the bvec file, in FSL format\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_bvecs Output filename of the transformed bvecs.\n\noptions:\n -h, --help show this help message and exit\n --inverse Apply the inverse transformation.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_gradients_convert", + "docstring": "Script to convert gradient tables between FSL and MRtrix formats.\n\nFormerly: scil_convert_gradients_mrtrix_to_fsl.py or\nscil_convert_gradients_fsl_to_mrtrix.py", + "help": "usage: scil_gradients_convert.py [-h] (--input_fsl | --input_mrtrix) [-f]\n [-v [{DEBUG,INFO,WARNING}]]\n GRADIENT_FILE(S) [GRADIENT_FILE(S) ...]\n output\n\nScript to convert gradient tables between FSL and MRtrix formats.\n\nFormerly: scil_convert_gradients_mrtrix_to_fsl.py or\nscil_convert_gradients_fsl_to_mrtrix.py\n\npositional arguments:\n GRADIENT_FILE(S) Path(s) to the gradient file(s). Either FSL (.bval, .bvec) or MRtrix (.b).\n output Basename of output without extension. Extension(s) will be added automatically (.b for MRtrix, .bval/.bvec for FSL.\n\noptions:\n -h, --help show this help message and exit\n --input_fsl FSL format.\n --input_mrtrix MRtrix format.\n -f Force overwriting of the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_gradients_generate_sampling", + "docstring": "Generate multi-shell gradient sampling with various processing options. Helps\naccelerate gradients, optimize duty cycle and avoid artefacts.\n\nMulti-shell gradient sampling is generated as in [1]. The bvecs are then\nflipped to maximize spread for eddy current correction, b0s are interleaved at\nequal spacing and the non-b0 samples are finally shuffled to minimize the total\ndiffusion gradient amplitude over a few TR.\n\nFormerly: scil_generate_gradient_sampling.py", + "help": "usage: scil_gradients_generate_sampling.py [-h] [--eddy] [--duty]\n [--no_b0_start NO_B0_START | --b0_every B0_EVERY]\n [--b0_end] [--b0_value B0_VALUE]\n [--b0_philips]\n (--bvals bvals [bvals ...] | --b_lin_max B_LIN_MAX | --q_lin_max Q_LIN_MAX)\n (--fsl | --mrtrix)\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n nb_samples_per_shell\n [nb_samples_per_shell ...]\n out_basename\n\nGenerate multi-shell gradient sampling with various processing options. Helps\naccelerate gradients, optimize duty cycle and avoid artefacts.\n\nMulti-shell gradient sampling is generated as in [1]. The bvecs are then\nflipped to maximize spread for eddy current correction, b0s are interleaved at\nequal spacing and the non-b0 samples are finally shuffled to minimize the total\ndiffusion gradient amplitude over a few TR.\n\nFormerly: scil_generate_gradient_sampling.py\n\npositional arguments:\n nb_samples_per_shell Number of samples on each non b0 shell. \n If multishell, provide a number per shell.\n out_basename Gradient sampling output basename (don't include extension).\n Please add options --fsl and/or --mrtrix below.\n\noptions:\n -h, --help show this help message and exit\n --eddy If set, we apply eddy optimization.\n B-vectors are flipped to be well spread without symmetry.\n --duty If set, we apply duty cycle optimization. \n B-vectors are shuffled to reduce consecutive colinearity in the samples. [False]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nb0 acquisitions:\n Default if you add no option is to have a b0 at the start.\n\n --no_b0_start NO_B0_START\n If set, do not add a b0 at the beginning. \n --b0_every B0_EVERY Interleave a b0 every n=b0_every values. Starts after the first b0 \n (cannot be used with --no_b0_start). Must be an integer >= 1.\n --b0_end If set, adds a b0 as last sample.\n --b0_value B0_VALUE b-value of the b0s. [0.0]\n --b0_philips If set, replaces values of b0s bvecs by existing bvecs for Philips handling.\n\nNon-b0 acquisitions:\n --bvals bvals [bvals ...]\n bval of each non-b0 shell.\n --b_lin_max B_LIN_MAX\n b-max for linear bval distribution in *b*.\n --q_lin_max Q_LIN_MAX\n b-max for linear bval distribution in *q*; \n the square root of b-values will be linearly distributed..\n\nSave as:\n --fsl Save in FSL format (.bvec/.bval).\n --mrtrix Save in MRtrix format (.b).\n\nReferences: [1] Emmanuel Caruyer, Christophe Lenglet, Guillermo Sapiro,\nRachid Deriche. Design of multishell gradient sampling with uniform coverage\nin diffusion MRI. Magnetic Resonance in Medicine, Wiley, 2013, 69 (6),\npp. 1534-1540. \n \n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_gradients_modify_axes", + "docstring": "Flip (ex, x --> -x) or swap (ex, x <-> y) chosen axes of the gradient sampling\nmatrix. Result will be saved in the same format as input gradient sampling\nfile.\n\nFormerly: scil_flip_gradients.py or scil_swap_gradient_axis.py", + "help": "usage: scil_gradients_modify_axes.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_gradient_sampling_file\n out_gradient_sampling_file\n {1,2,3,-1,-2,-3} {1,2,3,-1,-2,-3}\n {1,2,3,-1,-2,-3}\n\nFlip (ex, x --> -x) or swap (ex, x <-> y) chosen axes of the gradient sampling\nmatrix. Result will be saved in the same format as input gradient sampling\nfile.\n\nFormerly: scil_flip_gradients.py or scil_swap_gradient_axis.py\n\npositional arguments:\n in_gradient_sampling_file\n Path to gradient sampling file. (.bvec or .b)\n out_gradient_sampling_file\n Where to save the flipped gradient sampling file.Extension (.bvec or .b) must be the same as in_gradient_sampling_file\n {1,2,3,-1,-2,-3} The final order of the axes, compared to original order: x=1 y=2 z=3.\n Ex: to only flip y: 1 -2 3.\n Ex: to only swap x and y: 2 1 3.\n Ex: to first flip x, then permute all three axes: 3 -1 2.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_gradients_round_bvals", + "docstring": "Select b-values on specific b-value shells.\n\nWith the --tolerance argument, this is useful for sampling schemes where\nb-values of a shell are not all identical. Adjust the tolerance to vary the\naccepted interval around the targetted b-value.\n\nFor example, a b-value of 2000 and a tolerance of 20 will select all b-values\nbetween [1980, 2020] and round them to the value of 2000.\n\n>> scil_gradients_round_bvals.py bvals 0 1000 2000 newbvals --tolerance 20\n\nFormerly: scil_resample_bvals.py", + "help": "usage: scil_gradients_round_bvals.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bval shells [shells ...] out_bval\n tolerance\n\nSelect b-values on specific b-value shells.\n\nWith the --tolerance argument, this is useful for sampling schemes where\nb-values of a shell are not all identical. Adjust the tolerance to vary the\naccepted interval around the targetted b-value.\n\nFor example, a b-value of 2000 and a tolerance of 20 will select all b-values\nbetween [1980, 2020] and round them to the value of 2000.\n\n>> scil_gradients_round_bvals.py bvals 0 1000 2000 newbvals --tolerance 20\n\nFormerly: scil_resample_bvals.py\n\npositional arguments:\n in_bval The b-values in FSL format.\n shells The list of expected shells. For example 0 1000 2000.\n All b-values in the b_val file should correspond to one given shell (up to the tolerance).\n out_bval The name of the output b-values.\n tolerance The tolerated gap between the b-values to extract and the \n actual b-values. Expecting an integer value. Comparison is \n strict: a b-value of 1010 with a tolerance of 10 is NOT \n included in shell 1000. Suggestion: 20.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_gradients_validate_correct", + "docstring": "Detect sign flips and/or axes swaps in the gradients table from a fiber\ncoherence index [1]. The script takes as input the principal direction(s)\nat each voxel, the b-vectors and the fractional anisotropy map and outputs\na corrected b-vectors file.\n\nA typical pipeline could be:\n>>> scil_dti_metrics.py dwi.nii.gz bval bvec --not_all --fa fa.nii.gz\n --evecs peaks.nii.gz\n>>> scil_gradients_validate_correct.py bvec peaks_v1.nii.gz fa.nii.gz bvec_corr\n\nNote that peaks_v1.nii.gz is the file containing the direction associated\nto the highest eigenvalue at each voxel.\n\nIt is also possible to use a file containing multiple principal directions per\nvoxel, given that they are sorted by decreasing amplitude. In that case, the\nfirst direction (with the highest amplitude) will be chosen for validation.\nOnly 4D data is supported, so the directions must be stored in a single\ndimension. For example, peaks.nii.gz from scil_fodf_metrics.py could be used.\n\nFormerly: scil_validate_and_correct_bvecs.py", + "help": "usage: scil_gradients_validate_correct.py [-h] [--mask MASK]\n [--fa_threshold FA_THRESHOLD]\n [--column_wise]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bvec in_peaks in_FA out_bvec\n\nDetect sign flips and/or axes swaps in the gradients table from a fiber\ncoherence index [1]. The script takes as input the principal direction(s)\nat each voxel, the b-vectors and the fractional anisotropy map and outputs\na corrected b-vectors file.\n\nA typical pipeline could be:\n>>> scil_dti_metrics.py dwi.nii.gz bval bvec --not_all --fa fa.nii.gz\n --evecs peaks.nii.gz\n>>> scil_gradients_validate_correct.py bvec peaks_v1.nii.gz fa.nii.gz bvec_corr\n\nNote that peaks_v1.nii.gz is the file containing the direction associated\nto the highest eigenvalue at each voxel.\n\nIt is also possible to use a file containing multiple principal directions per\nvoxel, given that they are sorted by decreasing amplitude. In that case, the\nfirst direction (with the highest amplitude) will be chosen for validation.\nOnly 4D data is supported, so the directions must be stored in a single\ndimension. For example, peaks.nii.gz from scil_fodf_metrics.py could be used.\n\nFormerly: scil_validate_and_correct_bvecs.py\n\npositional arguments:\n in_bvec Path to bvec file.\n in_peaks Path to peaks file.\n in_FA Path to the fractional anisotropy file.\n out_bvec Path to corrected bvec file (FSL format).\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to an optional mask. If set, FA and Peaks will only be used inside the mask.\n --fa_threshold FA_THRESHOLD\n FA threshold. Only voxels with FA higher than fa_threshold will be considered. [0.2]\n --column_wise Specify if input peaks are column-wise (..., 3, N) instead of row-wise (..., N, 3).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Schilling KG, Yeh FC, Nath V, Hansen C, Williams O, Resnick S, Anderson AW,\nLandman BA. A fiber coherence index for quality control of B-table orientation\nin diffusion MRI scans. Magn Reson Imaging. 2019 May;58:82-89.\ndoi: 10.1016/j.mri.2019.01.018.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_gradients_validate_correct_eddy", + "docstring": "Validate and correct gradients from eddy outputs\nWith full AP-PA eddy outputs a full bvec bval (2x nb of dirs and bval)\nthat doesnt fit with the output dwi (1x nb of dir)\n\nFormerly: scil_validate_and_correct_eddy_gradients.py", + "help": "usage: scil_gradients_validate_correct_eddy.py [-h]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bvec in_bval nb_dirs\n out_bvec out_bval\n\nValidate and correct gradients from eddy outputs\nWith full AP-PA eddy outputs a full bvec bval (2x nb of dirs and bval)\nthat doesnt fit with the output dwi (1x nb of dir)\n\nFormerly: scil_validate_and_correct_eddy_gradients.py\n\npositional arguments:\n in_bvec In bvec file.\n in_bval In bval file.\n nb_dirs Number of directions per DWI.\n out_bvec Out bvec file.\n out_bval Out bval file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_header_print_info", + "docstring": "Print the raw header from the provided file or only the specified keys.\nSupport trk, nii and mgz files.\n\nFormerly: scil_print_header.py", + "help": "usage: scil_header_print_info.py [-h] [--keys KEYS [KEYS ...]]\n [--print_affine] [-v [{DEBUG,INFO,WARNING}]]\n in_file\n\nPrint the raw header from the provided file or only the specified keys.\nSupport trk, nii and mgz files.\n\nFormerly: scil_print_header.py\n\npositional arguments:\n in_file Input file (trk, nii and mgz).\n\noptions:\n -h, --help show this help message and exit\n --keys KEYS [KEYS ...]\n Print only the specified keys.\n --print_affine Print nibabel's affine.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_header_validate_compatibility", + "docstring": "Will compare all input files against the first one for the compatibility\nof their spatial attributes.\n\nSpatial attributes are: affine, dimensions, voxel sizes and voxel order.\n\nFormerly: scil_verify_space_attributes_compatibility.py", + "help": "usage: scil_header_validate_compatibility.py [-h] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n in_files [in_files ...]\n\nWill compare all input files against the first one for the compatibility\nof their spatial attributes.\n\nSpatial attributes are: affine, dimensions, voxel sizes and voxel order.\n\nFormerly: scil_verify_space_attributes_compatibility.py\n\npositional arguments:\n in_files List of file to compare (trk, tck and nii/nii.gz).\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_json_convert_entries_to_xlsx", + "docstring": "Convert a final aggregated json file to an Excel spreadsheet.\nTypically used during the tractometry pipeline.\n\nFormerly: scil_convert_json_to_xlsx.py", + "help": "usage: scil_json_convert_entries_to_xlsx.py [-h] [--no_sort_subs]\n [--no_sort_bundles]\n [--ignore_bundles FILE]\n [--stats_over_population]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_json out_xlsx\n\nConvert a final aggregated json file to an Excel spreadsheet.\nTypically used during the tractometry pipeline.\n\nFormerly: scil_convert_json_to_xlsx.py\n\npositional arguments:\n in_json File containing the json stats (.json).\n out_xlsx Output Excel file for the stats (.xlsx).\n\noptions:\n -h, --help show this help message and exit\n --no_sort_subs If set, subjects won't be sorted alphabetically.\n --no_sort_bundles If set, bundles won't be sorted alphabetically.\n --ignore_bundles FILE\n Path to a text file containing a list of bundles to ignore (.txt).\n One bundle, corresponding to keys in the json, per line.\n --stats_over_population\n If set, consider the input stats to be over an entire population and not subject-based.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_json_harmonize_entries", + "docstring": "This script will harmonize a json file by adding missing keys and values\nthat differs between the different layers of the dictionary.\n\nThis is used only (for now) in Aggregate_All_* portion of tractometry-flow,\nto counter the problem of missing bundles/metrics/lesions between subjects.\n\nThe most common use case is when specific subjects have missing bundles\nwhich will cause a panda array to be incomplete, and thus crash. Finding out\nthe union of all bundles/metrics/lesions will allow to create a complete json\n(but with NaN for missing values).\n\nFormerly: scil_harmonize_json.py", + "help": "usage: scil_json_harmonize_entries.py [-h] [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file out_file\n\n This script will harmonize a json file by adding missing keys and values\nthat differs between the different layers of the dictionary.\n\nThis is used only (for now) in Aggregate_All_* portion of tractometry-flow,\nto counter the problem of missing bundles/metrics/lesions between subjects.\n\nThe most common use case is when specific subjects have missing bundles\nwhich will cause a panda array to be incomplete, and thus crash. Finding out\nthe union of all bundles/metrics/lesions will allow to create a complete json\n(but with NaN for missing values).\n\nFormerly: scil_harmonize_json.py\n\npositional arguments:\n in_file Input file (json).\n out_file Output file (json).\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_json_merge_entries", + "docstring": "Merge multiple json files into a single one.\nTypically used during the tractometry pipeline.\n\nWithout option it will simply merge all entries at the top level, the top\nlevel must not have any conflicting keys.\n\n--keep_separate option will add a parent for each file, its basename will\nbecome the key.\n\n--no_list option will merge all entries at the top level, if there is a\nconflict the lowest level will be extended with the new values (if list) or\nadded (if value)\n\n--add_parent_key option will add a parent key before merging all entries.\n\n--remove_parent_key option will remove the parent key before merging all\nentries.\n\n--recursive option will merge all entries (scalar) at the lowest layers as a\nlist.\n\n--average_last_layer option will average all entries (scalar) at the lowest\nlayers, but instead of creating a list it creates a mean/std level.\n\nFormerly: scil_merge_json.py", + "help": "usage: scil_json_merge_entries.py [-h] [--keep_separate] [--no_list]\n [--add_parent_key ADD_PARENT_KEY]\n [--remove_parent_key] [--recursive]\n [--average_last_layer] [--indent INDENT]\n [--sort_keys] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_json [in_json ...] out_json\n\n Merge multiple json files into a single one.\nTypically used during the tractometry pipeline.\n\nWithout option it will simply merge all entries at the top level, the top\nlevel must not have any conflicting keys.\n\n--keep_separate option will add a parent for each file, its basename will\nbecome the key.\n\n--no_list option will merge all entries at the top level, if there is a\nconflict the lowest level will be extended with the new values (if list) or\nadded (if value)\n\n--add_parent_key option will add a parent key before merging all entries.\n\n--remove_parent_key option will remove the parent key before merging all\nentries.\n\n--recursive option will merge all entries (scalar) at the lowest layers as a\nlist.\n\n--average_last_layer option will average all entries (scalar) at the lowest\nlayers, but instead of creating a list it creates a mean/std level.\n\nFormerly: scil_merge_json.py\n\npositional arguments:\n in_json List of json files to merge (.json).\n out_json Output json file (.json).\n\noptions:\n -h, --help show this help message and exit\n --keep_separate Merge entries as separate keys based on filename.\n --no_list Merge entries knowing there is no conflict.\n --add_parent_key ADD_PARENT_KEY\n Merge all entries under a single parent.\n --remove_parent_key Merge ignoring parent key (e.g for population).\n --recursive Merge all entries at the lowest layers.\n --average_last_layer Average all entries at the lowest layers.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_labels_combine", + "docstring": "Script to combine labels from multiple volumes. If there is overlap, it will\noverwrite them based on the input order.\n\n >>> scil_labels_combine.py out_labels.nii.gz\n --volume_ids animal_labels.nii 20\n --volume_ids DKT_labels.nii.gz 44 53\n --out_labels_indices 20 44 53\n >>> scil_labels_combine.py slf_labels.nii.gz\n --volume_ids a2009s_aseg.nii.gz all\n --volume_ids clean/s1__DKT.nii.gz 1028 2028\n\nFormerly: scil_combine_labels.py.", + "help": "usage: scil_labels_combine.py [-h] --volume_ids VOLUME_IDS [VOLUME_IDS ...]\n [--out_labels_ids OUT_LABELS_IDS [OUT_LABELS_IDS ...]\n | --unique | --group_in_m]\n [--background BACKGROUND] [--merge_groups]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n output\n\nScript to combine labels from multiple volumes. If there is overlap, it will\noverwrite them based on the input order.\n\n >>> scil_labels_combine.py out_labels.nii.gz\n --volume_ids animal_labels.nii 20\n --volume_ids DKT_labels.nii.gz 44 53\n --out_labels_indices 20 44 53\n >>> scil_labels_combine.py slf_labels.nii.gz\n --volume_ids a2009s_aseg.nii.gz all\n --volume_ids clean/s1__DKT.nii.gz 1028 2028\n\nFormerly: scil_combine_labels.py.\n\npositional arguments:\n output Combined labels volume output.\n\noptions:\n -h, --help show this help message and exit\n --volume_ids VOLUME_IDS [VOLUME_IDS ...]\n List of volumes directly followed by their labels:\n --volume_ids atlasA id1a id2a \n --volume_ids atlasB id1b id2b ... \n \"all\" can be used instead of id numbers.\n --out_labels_ids OUT_LABELS_IDS [OUT_LABELS_IDS ...]\n List of labels indices for output images.\n --unique If set, output id with unique labels, excluding first background value.\n --group_in_m Add (x * 10 000) to each volume labels, where x is the input volume order number.\n --background BACKGROUND\n Background id, excluded from output [0],\n the value is used as output background value.\n --merge_groups Each group from the --volume_ids option will be merged as a single labels.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_labels_dilate", + "docstring": "Dilate regions (with or without masking) from a labeled volume:\n- \"label_to_dilate\" are regions that will dilate over\n \"label_to_fill\" if close enough to it (\"distance\").\n- \"label_to_dilate\", by default (None) will be all\n non-\"label_to_fill\" and non-\"label_not_to_dilate\".\n- \"label_not_to_dilate\" will not be changed, but will not dilate.\n- \"mask\" is where the dilation is allowed (constrained)\n in addition to \"background_label\" (logical AND)\n\n>>> scil_labels_dilate.py wmparc_t1.nii.gz wmparc_dil.nii.gz \\\n --label_to_fill 0 5001 5002 \\\n --label_not_to_dilate 4 43 10 11 12 49 50 51\n\nFormerly: scil_dilate_labels.py", + "help": "usage: scil_labels_dilate.py [-h] [--distance DISTANCE]\n [--labels_to_dilate LABELS_TO_DILATE [LABELS_TO_DILATE ...]]\n [--labels_to_fill LABELS_TO_FILL [LABELS_TO_FILL ...]]\n [--labels_not_to_dilate LABELS_NOT_TO_DILATE [LABELS_NOT_TO_DILATE ...]]\n [--mask MASK] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file out_file\n\nDilate regions (with or without masking) from a labeled volume:\n- \"label_to_dilate\" are regions that will dilate over\n \"label_to_fill\" if close enough to it (\"distance\").\n- \"label_to_dilate\", by default (None) will be all\n non-\"label_to_fill\" and non-\"label_not_to_dilate\".\n- \"label_not_to_dilate\" will not be changed, but will not dilate.\n- \"mask\" is where the dilation is allowed (constrained)\n in addition to \"background_label\" (logical AND)\n\n>>> scil_labels_dilate.py wmparc_t1.nii.gz wmparc_dil.nii.gz \\\n --label_to_fill 0 5001 5002 \\\n --label_not_to_dilate 4 43 10 11 12 49 50 51\n\nFormerly: scil_dilate_labels.py\n\npositional arguments:\n in_file Path of the volume (nii or nii.gz).\n out_file Output filename of the dilated labels.\n\noptions:\n -h, --help show this help message and exit\n --distance DISTANCE Maximal distance to dilate (in mm) [2.0].\n --labels_to_dilate LABELS_TO_DILATE [LABELS_TO_DILATE ...]\n Label list to dilate. By default it dilates all \n labels not in labels_to_fill nor in labels_not_to_dilate.\n --labels_to_fill LABELS_TO_FILL [LABELS_TO_FILL ...]\n Background id / labels to be filled [[0]],\n the first one is given as output background value.\n --labels_not_to_dilate LABELS_NOT_TO_DILATE [LABELS_NOT_TO_DILATE ...]\n Label list not to dilate.\n --mask MASK Only dilate values inside the mask.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_labels_remove", + "docstring": "Script to remove specific labels from an atlas volume.\n\n >>> scil_labels_remove.py DKT_labels.nii out_labels.nii.gz -i 5001 5002\n\nFormerly: scil_remove_labels.py", + "help": "usage: scil_labels_remove.py [-h] -i INDICES [INDICES ...]\n [--background BACKGROUND]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_labels out_labels\n\nScript to remove specific labels from an atlas volume.\n\n >>> scil_labels_remove.py DKT_labels.nii out_labels.nii.gz -i 5001 5002\n\nFormerly: scil_remove_labels.py\n\npositional arguments:\n in_labels Input labels volume.\n out_labels Output labels volume.\n\noptions:\n -h, --help show this help message and exit\n -i INDICES [INDICES ...], --indices INDICES [INDICES ...]\n List of labels indices to remove.\n --background BACKGROUND\n Integer used for removed labels [0].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_labels_split_volume_by_ids", + "docstring": "Split a label image into multiple images where the name of the output images\nis the id of the label (ex. 35.nii.gz, 36.nii.gz, ...). If the --range option\nis not provided, all labels of the image are extracted. The label 0 is\nconsidered as the background and is ignored.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_ids.py", + "help": "usage: scil_labels_split_volume_by_ids.py [-h] [--out_dir OUT_DIR]\n [--out_prefix OUT_PREFIX]\n [-r min max min max]\n [--background BACKGROUND]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_labels\n\nSplit a label image into multiple images where the name of the output images\nis the id of the label (ex. 35.nii.gz, 36.nii.gz, ...). If the --range option\nis not provided, all labels of the image are extracted. The label 0 is\nconsidered as the background and is ignored.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_ids.py\n\npositional arguments:\n in_labels Path of the input label file, in a format supported by Nibabel.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Put all ouptput images in a specific directory.\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n -r min max min max, --range min max min max\n Specifies a subset of labels to split, formatted as min max. Ex: -r 3 5 will give files _3, _4, _5.\n --background BACKGROUND\n Background value. Will not be saved as a separate label. Default: 0.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_labels_split_volume_from_lut", + "docstring": "Split a label image into multiple images where the name of the output images\nis taken from a lookup table (ex: left-lateral-occipital.nii.gz,\nright-thalamus.nii.gz, ...). Only the labels included in the lookup table\nare extracted.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_labels.py", + "help": "usage: scil_labels_split_volume_from_lut.py [-h] [--out_dir OUT_DIR]\n [--out_prefix OUT_PREFIX]\n (--scilpy_lut {freesurfer_subcortical,dk_aggregate_structures,freesurfer_desikan_killiany} | --custom_lut CUSTOM_LUT)\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_label\n\nSplit a label image into multiple images where the name of the output images\nis taken from a lookup table (ex: left-lateral-occipital.nii.gz,\nright-thalamus.nii.gz, ...). Only the labels included in the lookup table\nare extracted.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_labels.py\n\npositional arguments:\n in_label Path of the input label file, in a format supported by Nibabel.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Put all ouptput images in a specific directory.\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n --scilpy_lut {freesurfer_subcortical,dk_aggregate_structures,freesurfer_desikan_killiany}\n Lookup table, in the file scilpy/data/LUT, used to name the output files.\n --custom_lut CUSTOM_LUT\n Path of the lookup table file, used to name the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_lesions_info", + "docstring": "This script will output informations about lesion load in bundle(s).\nThe input can either be streamlines, binary bundle map, or a bundle voxel\nlabel map.\n\nTo be considered a valid lesion, the lesion volume must be at least\nmin_lesion_vol mm3. This avoid the detection of thousand of single voxel\nlesions if an automatic lesion segmentation tool is used.\n\nFormerly: scil_analyse_lesions_load.py", + "help": "usage: scil_lesions_info.py [-h]\n [--bundle BUNDLE | --bundle_mask BUNDLE_MASK | --bundle_labels_map BUNDLE_LABELS_MAP]\n [--min_lesion_vol MIN_LESION_VOL]\n [--out_lesion_atlas FILE]\n [--out_lesion_stats FILE]\n [--out_streamlines_stats FILE] [--indent INDENT]\n [--sort_keys] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_lesion out_json\n\nThis script will output informations about lesion load in bundle(s).\nThe input can either be streamlines, binary bundle map, or a bundle voxel\nlabel map.\n\nTo be considered a valid lesion, the lesion volume must be at least\nmin_lesion_vol mm3. This avoid the detection of thousand of single voxel\nlesions if an automatic lesion segmentation tool is used.\n\nFormerly: scil_analyse_lesions_load.py\n\npositional arguments:\n in_lesion Binary mask of the lesion(s) (.nii.gz).\n out_json Output file for lesion information (.json).\n\noptions:\n -h, --help show this help message and exit\n --bundle BUNDLE Path of the bundle file (.trk).\n --bundle_mask BUNDLE_MASK\n Path of the bundle binary mask (.nii.gz).\n --bundle_labels_map BUNDLE_LABELS_MAP\n Path of the bundle labels map (.nii.gz).\n --min_lesion_vol MIN_LESION_VOL\n Minimum lesion volume in mm3 [7].\n --out_lesion_atlas FILE\n Save the labelized lesion(s) map (.nii.gz).\n --out_lesion_stats FILE\n Save the lesion-wise volume measure (.json).\n --out_streamlines_stats FILE\n Save the lesion-wise streamline count (.json).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_mti_adjust_B1_header", + "docstring": "Correct B1 map header problem, by applying the scaling (slope) and setting\nthe slope to 1.", + "help": "usage: scil_mti_adjust_B1_header.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_B1_map out_B1_map in_B1_json\n\nCorrect B1 map header problem, by applying the scaling (slope) and setting\nthe slope to 1.\n\npositional arguments:\n in_B1_map Path to input B1 map file.\n out_B1_map Path to output B1 map file.\n in_B1_json Json file of the B1 map.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_mti_maps_MT", + "docstring": "This script computes two myelin indices maps from the Magnetization Transfer\n(MT) images.\nMagnetization Transfer is a contrast mechanism in tissue resulting from the\nproton exchange between non-aqueous protons (from macromolecules and their\nclosely associated water molecules, the \"bound\" pool) and protons in the free\nwater pool called aqueous protons. This exchange attenuates the MRI signal,\nintroducing microstructure-dependent contrast. MT's effect reflects the\nrelative density of macromolecules such as proteins and lipids, it has been\nassociated with myelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse to saturating the\nprotons on non-aqueous molecules a frequency irradiation. The MT maps are\nobtained using three or four contrasts: a single positive frequency image\nand/or a single negative frequency image, and two unsaturated contrasts as\nreference. These two references should be acquired with predominant PD\n(proton density) and T1 weighting at different excitation flip angles\n(a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all input must have a matching json file with the same filename\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script.\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\n\nThe output consists of a MT_native_maps folder containing the 2 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n The MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n The MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\n\nThe final maps from MT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 1 or 2 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n\n>>> scil_mti_maps_MT.py path/to/output/directory\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_positive path/to/echo*pos.nii.gz --in_negative path/to/echo*neg.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.", + "help": "usage: scil_mti_maps_MT.py [-h] [--out_prefix OUT_PREFIX] [--mask MASK]\n --in_positive IN_POSITIVE [IN_POSITIVE ...]\n --in_negative IN_NEGATIVE [IN_NEGATIVE ...]\n --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...]\n [--in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]]\n [--extended] [--filtering]\n [--in_jsons PD_json T1_json | --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time]\n [--in_B1_map IN_B1_MAP]\n [--B1_correction_method {empiric,model_based}]\n [--B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]]\n [--B1_nominal B1_NOMINAL]\n [--B1_smooth_dims B1_SMOOTH_DIMS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_dir\n\nThis script computes two myelin indices maps from the Magnetization Transfer\n(MT) images.\nMagnetization Transfer is a contrast mechanism in tissue resulting from the\nproton exchange between non-aqueous protons (from macromolecules and their\nclosely associated water molecules, the \"bound\" pool) and protons in the free\nwater pool called aqueous protons. This exchange attenuates the MRI signal,\nintroducing microstructure-dependent contrast. MT's effect reflects the\nrelative density of macromolecules such as proteins and lipids, it has been\nassociated with myelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse to saturating the\nprotons on non-aqueous molecules a frequency irradiation. The MT maps are\nobtained using three or four contrasts: a single positive frequency image\nand/or a single negative frequency image, and two unsaturated contrasts as\nreference. These two references should be acquired with predominant PD\n(proton density) and T1 weighting at different excitation flip angles\n(a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all input must have a matching json file with the same filename\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script.\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\nThe output consists of a MT_native_maps folder containing the 2 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n The MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n The MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\nThe final maps from MT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 1 or 2 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n>>> scil_mti_maps_MT.py path/to/output/directory\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_positive path/to/echo*pos.nii.gz --in_negative path/to/echo*neg.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.\n\npositional arguments:\n out_dir Path to output folder.\n\noptions:\n -h, --help show this help message and exit\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n --mask MASK Path to the binary brain mask.\n --extended If set, outputs the folder Complementary_maps.\n --filtering Gaussian filtering to remove Gibbs ringing. Not recommended.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nContrast maps:\n Path to echoes corresponding to contrast images. All constrasts must have \n the same number of echoes and coregistered between them. Use * to include all echoes. \n The in_mtoff_pd input and at least one of in_positive or in_negative are required.\n\n --in_positive IN_POSITIVE [IN_POSITIVE ...]\n Path to all echoes corresponding to the positive frequency \n saturation pulse.\n --in_negative IN_NEGATIVE [IN_NEGATIVE ...]\n Path to all echoes corresponding to the negative frequency \n saturation pulse.\n --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...]\n Path to all echoes corresponding to the predominant PD \n (proton density) weighting images with no saturation pulse.\n --in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]\n Path to all echoes corresponding to the predominant T1 \n weighting images with no saturation pulse. This one is optional, \n since it is only needed for the calculation of MTsat. \n Acquisition parameters should also be set with this image.\n\nAcquisition parameters:\n Acquisition parameters required for MTsat and ihMTsat calculation. \n These are the excitation flip angles (a_PD, a_T1), in DEGREES, and \n repetition times (TR_PD, TR_T1) of the PD and T1 images, in SECONDS. \n Can be given through json files (--in_jsons) or directly (--in_acq_parameters).\n\n --in_jsons PD_json T1_json\n Path to MToff PD json file and MToff T1 json file, in that order. \n The acquisition parameters will be extracted from these files. \n Must come from a Philips acquisition, otherwise, use in_acq_parameters.\n --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time\n Acquisition parameters in that order: flip angle of mtoff_PD, \n flip angle of mtoff_T1, repetition time of mtoff_PD, \n repetition time of mtoff_T1\n\nB1 correction:\n --in_B1_map IN_B1_MAP\n Path to B1 coregister map to MT contrasts.\n --B1_correction_method {empiric,model_based}\n Choice of B1 correction method. Choose between empiric and model-based. \n Note that the model-based method requires a B1 fitvalues file. \n Both method will only correct the saturation measures. [empiric]\n --B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]\n Path to B1 fitvalues files obtained externally. Should be one .mat \n file per input MT-on image, given in this specific order: \n positive frequency saturation, negative frequency saturation.\n --B1_nominal B1_NOMINAL\n Nominal value for the B1 map. For Philips, should be 100. [100]\n --B1_smooth_dims B1_SMOOTH_DIMS\n Dimension of the squared window used for B1 smoothing, in number of voxels. [5]\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_mti_maps_ihMT", + "docstring": "This script computes four myelin indices maps from the Magnetization Transfer\n(MT) and inhomogeneous Magnetization Transfer (ihMT) images. Magnetization\nTransfer is a contrast mechanism in tissue resulting from the proton exchange\nbetween non-aqueous protons (from macromolecules and their closely associated\nwater molecules, the \"bound\" pool) and protons in the free water pool called\naqueous protons. This exchange attenuates the MRI signal, introducing\nmicrostructure-dependent contrast. MT's effect reflects the relative density\nof macromolecules such as proteins and lipids, it has been associated with\nmyelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse prior to image\nacquisition (a prepulse), saturating the protons on non-aqueous molecules,\nby applying different frequency irradiation. The two MT maps and two ihMT maps\nare obtained using six contrasts: single positive frequency image, single\nnegative frequency image, dual alternating positive/negative frequency image,\ndual alternating negative/positive frequency image (saturated images);\nand two unsaturated contrasts as reference. These two references should be\nacquired with predominant PD (proton density) and T1 weighting at different\nexcitation flip angles (a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\n\nThe output consists of a ihMT_native_maps folder containing the 4 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n - ihMTR.nii.gz : inhomogeneous Magnetization Transfer Ratio map\n The (ih)MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n - ihMTsat.nii.gz : inhomogeneous Magnetization Transfer saturation map\n The (ih)MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - altnp.nii.gz : dual alternating negative and positive frequency image\n - altpn.nii.gz : dual alternating positive and negative frequency image\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_d.nii.gz : MTsat computed from the mean dual frequency images\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\n\nThe final maps from ihMT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 3 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation, dual frequency saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n\n>>> scil_mti_maps_ihMT.py path/to/output/directory\n --in_altnp path/to/echo*altnp.nii.gz --in_altpn path/to/echo*altpn.nii.gz\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_negative path/to/echo*neg.nii.gz --in_positive path/to/echo*pos.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.", + "help": "usage: scil_mti_maps_ihMT.py [-h] [--out_prefix OUT_PREFIX] [--mask MASK]\n --in_altnp IN_ALTNP [IN_ALTNP ...] --in_altpn\n IN_ALTPN [IN_ALTPN ...] --in_negative IN_NEGATIVE\n [IN_NEGATIVE ...] --in_positive IN_POSITIVE\n [IN_POSITIVE ...] --in_mtoff_pd IN_MTOFF_PD\n [IN_MTOFF_PD ...]\n [--in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]]\n [--extended] [--filtering]\n [--in_jsons PD_json T1_json | --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time]\n [--in_B1_map IN_B1_MAP]\n [--B1_correction_method {empiric,model_based}]\n [--B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]]\n [--B1_nominal B1_NOMINAL]\n [--B1_smooth_dims B1_SMOOTH_DIMS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_dir\n\nThis script computes four myelin indices maps from the Magnetization Transfer\n(MT) and inhomogeneous Magnetization Transfer (ihMT) images. Magnetization\nTransfer is a contrast mechanism in tissue resulting from the proton exchange\nbetween non-aqueous protons (from macromolecules and their closely associated\nwater molecules, the \"bound\" pool) and protons in the free water pool called\naqueous protons. This exchange attenuates the MRI signal, introducing\nmicrostructure-dependent contrast. MT's effect reflects the relative density\nof macromolecules such as proteins and lipids, it has been associated with\nmyelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse prior to image\nacquisition (a prepulse), saturating the protons on non-aqueous molecules,\nby applying different frequency irradiation. The two MT maps and two ihMT maps\nare obtained using six contrasts: single positive frequency image, single\nnegative frequency image, dual alternating positive/negative frequency image,\ndual alternating negative/positive frequency image (saturated images);\nand two unsaturated contrasts as reference. These two references should be\nacquired with predominant PD (proton density) and T1 weighting at different\nexcitation flip angles (a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\nThe output consists of a ihMT_native_maps folder containing the 4 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n - ihMTR.nii.gz : inhomogeneous Magnetization Transfer Ratio map\n The (ih)MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n - ihMTsat.nii.gz : inhomogeneous Magnetization Transfer saturation map\n The (ih)MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - altnp.nii.gz : dual alternating negative and positive frequency image\n - altpn.nii.gz : dual alternating positive and negative frequency image\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_d.nii.gz : MTsat computed from the mean dual frequency images\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\nThe final maps from ihMT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 3 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation, dual frequency saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n>>> scil_mti_maps_ihMT.py path/to/output/directory\n --in_altnp path/to/echo*altnp.nii.gz --in_altpn path/to/echo*altpn.nii.gz\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_negative path/to/echo*neg.nii.gz --in_positive path/to/echo*pos.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.\n\npositional arguments:\n out_dir Path to output folder.\n\noptions:\n -h, --help show this help message and exit\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n --mask MASK Path to the binary brain mask.\n --extended If set, outputs the folder Complementary_maps.\n --filtering Gaussian filtering to remove Gibbs ringing. Not recommended.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nContrast maps:\n Path to echoes corresponding to contrast images. All constrasts must have \n the same number of echoes and coregistered between them. Use * to include all echoes.\n\n --in_altnp IN_ALTNP [IN_ALTNP ...]\n Path to all echoes corresponding to the alternation of \n negative and positive frequency saturation pulse.\n --in_altpn IN_ALTPN [IN_ALTPN ...]\n Path to all echoes corresponding to the alternation of \n positive and negative frequency saturation pulse.\n --in_negative IN_NEGATIVE [IN_NEGATIVE ...]\n Path to all echoes corresponding to the negative frequency \n saturation pulse.\n --in_positive IN_POSITIVE [IN_POSITIVE ...]\n Path to all echoes corresponding to the positive frequency \n saturation pulse.\n --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...]\n Path to all echoes corresponding to the predominant PD \n (proton density) weighting images with no saturation pulse.\n --in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]\n Path to all echoes corresponding to the predominant T1 \n weighting images with no saturation pulse. This one is optional, \n since it is only needed for the calculation of MTsat and ihMTsat. \n Acquisition parameters should also be set with this image.\n\nAcquisition parameters:\n Acquisition parameters required for MTsat and ihMTsat calculation. \n These are the excitation flip angles (a_PD, a_T1), in DEGREES, and \n repetition times (TR_PD, TR_T1) of the PD and T1 images, in SECONDS. \n Can be given through json files (--in_jsons) or directly (--in_acq_parameters).\n\n --in_jsons PD_json T1_json\n Path to MToff PD json file and MToff T1 json file, in that order. \n The acquisition parameters will be extracted from these files. \n Must come from a Philips acquisition, otherwise, use in_acq_parameters.\n --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time\n Acquisition parameters in that order: flip angle of mtoff_PD, \n flip angle of mtoff_T1, repetition time of mtoff_PD, \n repetition time of mtoff_T1\n\nB1 correction:\n --in_B1_map IN_B1_MAP\n Path to B1 coregister map to MT contrasts.\n --B1_correction_method {empiric,model_based}\n Choice of B1 correction method. Choose between empiric and model-based. \n Note that the model-based method requires a B1 fitvalues file. \n Both method will only correct the saturation measures. [empiric]\n --B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]\n Path to B1 fitvalues files obtained externally. Should be one .mat \n file per input MT-on image, given in this specific order: \n positive frequency saturation, negative frequency saturation.\n --B1_nominal B1_NOMINAL\n Nominal value for the B1 map. For Philips, should be 100. [100]\n --B1_smooth_dims B1_SMOOTH_DIMS\n Dimension of the squared window used for B1 smoothing, in number of voxels. [5]\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_plot_stats_per_point", + "docstring": "Plot all mean/std per point for a subject or population json file from\ntractometry-flow.\nWARNING: For population, the displayed STDs is only showing the variation\nof the means. It does not account intra-subject STDs.\n\nFormerly: scil_plot_mean_std_per_point.py", + "help": "usage: scil_plot_stats_per_point.py [-h] [--stats_over_population]\n [--nb_pts NB_PTS] [--display_means]\n [--fill_color FILL_COLOR | --dict_colors DICT_COLORS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_json out_dir\n\nPlot all mean/std per point for a subject or population json file from\ntractometry-flow.\nWARNING: For population, the displayed STDs is only showing the variation\nof the means. It does not account intra-subject STDs.\n\nFormerly: scil_plot_mean_std_per_point.py\n\npositional arguments:\n in_json JSON file containing the mean/std per point. For example, can be created using scil_bundle_mean_std.py.\n out_dir Output directory.\n\noptions:\n -h, --help show this help message and exit\n --stats_over_population\n If set, consider the input stats to be over an entire population and not subject-based.\n --nb_pts NB_PTS Force the number of divisions for the bundles.\n Avoid unequal plots across datasets, replace missing data with zeros.\n --display_means Display the subjects means as semi-transparent line.\n Poor results when the number of subject is high.\n --fill_color FILL_COLOR\n Hexadecimal RGB color filling the region between mean +/- std. The hexadecimal RGB color should be formatted as 0xRRGGBB.\n --dict_colors DICT_COLORS\n Dictionnary mapping basename to color.Same convention as --color.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_qball_metrics", + "docstring": "Script to compute the Constant Solid Angle (CSA) or Analytical Q-ball model,\nthe generalized fractional anisotropy (GFA) and the peaks of the model.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Descoteaux et al MRM 2007, Aganj et al MRM 2009] for details and\n[Cote et al MEDIA 2013] for quantitative comparisons.\n\nFormerly: scil_compute_qball_metrics.py", + "help": "usage: scil_qball_metrics.py [-h] [-f] [--sh_order SH_ORDER] [--mask MASK]\n [--use_qball] [--not_all] [--gfa GFA]\n [--peaks PEAKS] [--peak_indices PEAK_INDICES]\n [--sh SH] [--nufo NUFO] [--a_power A_POWER]\n [--b0_threshold thr] [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval in_bvec\n\nScript to compute the Constant Solid Angle (CSA) or Analytical Q-ball model,\nthe generalized fractional anisotropy (GFA) and the peaks of the model.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Descoteaux et al MRM 2007, Aganj et al MRM 2009] for details and\n[Cote et al MEDIA 2013] for quantitative comparisons.\n\nFormerly: scil_compute_qball_metrics.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n\noptions:\n -h, --help show this help message and exit\n -f Force overwriting of the output files.\n --sh_order SH_ORDER Spherical harmonics order. Must be a positive even number [4].\n --mask MASK Path to a binary mask. Only data inside the mask will be used for computations and reconstruction [None].\n --use_qball If set, qball will be used as the odf reconstruction model instead of CSA.\n --not_all If set, will only save the files specified using the following flags.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nFile flags:\n --gfa GFA Output filename for the generalized fractional anisotropy [gfa.nii.gz].\n --peaks PEAKS Output filename for the extracted peaks [peaks.nii.gz].\n --peak_indices PEAK_INDICES\n Output filename for the generated peaks indices on the sphere [peaks_indices.nii.gz].\n --sh SH Output filename for the spherical harmonics coefficients [sh.nii.gz].\n --nufo NUFO Output filename for the NUFO map [nufo.nii.gz].\n --a_power A_POWER Output filename for the anisotropic power map[anisotropic_power.nii.gz].\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_rgb_convert", + "docstring": "Converts a RGB image encoded as a 4D image to a RGB image encoded as\na 3D image, or vice versa.\n\nTypically, most software tools used in the SCIL (including MI-Brain) use\nthe former, while Trackvis uses the latter.\n\nInput\n-Case 1: 4D image where the 4th dimension contains 3 values.\n-Case 2: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value.\n\nOutput\n-Case 1: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value (uint8).\n-Case 2: 4D image where the 4th dimension contains 3 values (uint8).\n\nFormerly: scil_convert_rgb.py", + "help": "usage: scil_rgb_convert.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image\n\nConverts a RGB image encoded as a 4D image to a RGB image encoded as\na 3D image, or vice versa.\n\nTypically, most software tools used in the SCIL (including MI-Brain) use\nthe former, while Trackvis uses the latter.\n\nInput\n-Case 1: 4D image where the 4th dimension contains 3 values.\n-Case 2: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value.\n\nOutput\n-Case 1: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value (uint8).\n-Case 2: 4D image where the 4th dimension contains 3 values (uint8).\n\nFormerly: scil_convert_rgb.py\n\npositional arguments:\n in_image name of input RGB image.\n Either 4D or 3D image.\n out_image name of output RGB image.\n Either 3D or 4D image.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_sh_convert", + "docstring": "Convert a SH file between the two of the following basis choices:\n'descoteaux07', 'descoteaux07_legacy', 'tournier07' or 'tournier07_legacy'.\nUsing the sh_basis argument, both the input and the output SH bases must be\ngiven, in the order. For more information about the bases, see\nhttps://docs.dipy.org/stable/theory/sh_basis.html.\n\nFormerly: scil_convert_sh_basis.py", + "help": "usage: scil_sh_convert.py [-h] [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_sh out_sh\n {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n\nConvert a SH file between the two of the following basis choices:\n'descoteaux07', 'descoteaux07_legacy', 'tournier07' or 'tournier07_legacy'.\nUsing the sh_basis argument, both the input and the output SH bases must be\ngiven, in the order. For more information about the bases, see\nhttps://docs.dipy.org/stable/theory/sh_basis.html.\n\nFormerly: scil_convert_sh_basis.py\n\npositional arguments:\n in_sh Input SH filename. (nii or nii.gz)\n out_sh Output SH filename. (nii or nii.gz)\n {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Both the input and output bases are required, in that order.\n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy', 'tournier07']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n\noptions:\n -h, --help show this help message and exit\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_sh_fusion", + "docstring": "Merge a list of Spherical Harmonics files.\n\nThis merges the coefficients of multiple Spherical Harmonics files by taking,\nfor each coefficient, the one with the largest magnitude.\n\nCan be used to merge fODFs computed from different shells into 1, while\nconserving the most relevant information.\n\nBased on [1] and [2].\n\nFormerly: scil_merge_sh.py", + "help": "usage: scil_sh_fusion.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_shs [in_shs ...] out_sh\n\nMerge a list of Spherical Harmonics files.\n\nThis merges the coefficients of multiple Spherical Harmonics files by taking,\nfor each coefficient, the one with the largest magnitude.\n\nCan be used to merge fODFs computed from different shells into 1, while\nconserving the most relevant information.\n\nBased on [1] and [2].\n\nFormerly: scil_merge_sh.py\n\npositional arguments:\n in_shs List of SH files.\n out_sh output SH file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReference:\n[1] Garyfallidis, E., Zucchelli, M., Houde, J-C., Descoteaux, M.\n How to perform best ODF reconstruction from the Human Connectome\n Project sampling scheme?\n ISMRM 2014.\n\n[2] Khachaturian, M. H., Wisco, J. J., & Tuch, D. S. (2007). Boosting the\n sampling efficiency of q\u2010ball imaging using multiple wavevector fusion.\n Magnetic Resonance in Medicine: An Official Journal of the International\n Society for Magnetic Resonance in Medicine, 57(2), 289-296.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_sh_to_aodf", + "docstring": "Script to estimate asymmetric ODFs (aODFs) from a spherical harmonics image.\n\nTwo methods are available:\n * Unified filtering [1] combines four asymmetric filtering methods into\n a single equation and relies on a combination of four gaussian filters.\n * Cosine filtering [2] is a simpler implementation using cosine distance\n for assigning weights to neighbours.\n\nUnified filtering can be accelerated using OpenCL with the option --use_opencl.\nMake sure you have pyopencl installed before using this option. By default, the\nOpenCL program will run on the cpu. To use a gpu instead, also specify the\noption --device gpu.", + "help": "usage: scil_sh_to_aodf.py [-h] [--out_sym OUT_SYM]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--method {unified,cosine}]\n [--sigma_spatial SIGMA_SPATIAL]\n [--sigma_align SIGMA_ALIGN]\n [--sigma_range SIGMA_RANGE]\n [--sigma_angle SIGMA_ANGLE] [--disable_spatial]\n [--disable_align] [--disable_range]\n [--include_center] [--win_hwidth WIN_HWIDTH]\n [--sharpness SHARPNESS] [--device {cpu,gpu}]\n [--use_opencl] [--patch_size PATCH_SIZE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh out_sh\n\nScript to estimate asymmetric ODFs (aODFs) from a spherical harmonics image.\n\nTwo methods are available:\n * Unified filtering [1] combines four asymmetric filtering methods into\n a single equation and relies on a combination of four gaussian filters.\n * Cosine filtering [2] is a simpler implementation using cosine distance\n for assigning weights to neighbours.\n\nUnified filtering can be accelerated using OpenCL with the option --use_opencl.\nMake sure you have pyopencl installed before using this option. By default, the\nOpenCL program will run on the cpu. To use a gpu instead, also specify the\noption --device gpu.\n\npositional arguments:\n in_sh Path to the input file.\n out_sh File name for averaged signal.\n\noptions:\n -h, --help show this help message and exit\n --out_sym OUT_SYM Name of optional symmetric output. [None]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Sphere used for the SH to SF projection. [repulsion200]\n --method {unified,cosine}\n Method for estimating asymmetric ODFs [unified].\n One of:\n 'unified': Unified filtering [1].\n 'cosine' : Cosine-based filtering [2].\n --device {cpu,gpu} Device to use for execution. [cpu]\n --use_opencl Accelerate code using OpenCL (requires pyopencl\n and a working OpenCL implementation).\n --patch_size PATCH_SIZE\n OpenCL patch size. [40]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nShared filter arguments:\n --sigma_spatial SIGMA_SPATIAL\n Standard deviation for spatial distance. [1.0]\n\nUnified filter arguments:\n --sigma_align SIGMA_ALIGN\n Standard deviation for alignment filter. [0.8]\n --sigma_range SIGMA_RANGE\n Standard deviation for range filter\n *relative to SF range of image*. [0.2]\n --sigma_angle SIGMA_ANGLE\n Standard deviation for angular filter\n (disabled by default).\n --disable_spatial Disable spatial filtering.\n --disable_align Disable alignment filtering.\n --disable_range Disable range filtering.\n --include_center Include center voxel in neighourhood.\n --win_hwidth WIN_HWIDTH\n Filtering window half-width. Defaults to 3*sigma_spatial.\n\nCosine filter arguments:\n --sharpness SHARPNESS\n Specify sharpness factor to use for\n weighted average. [1.0]\n\n[1] Poirier and Descoteaux, 2024, \"A Unified Filtering Method for Estimating\n Asymmetric Orientation Distribution Functions\", Neuroimage, vol. 287,\n https://doi.org/10.1016/j.neuroimage.2024.120516\n\n[2] Poirier et al, 2021, \"Investigating the Occurrence of Asymmetric Patterns\n in White Matter Fiber Orientation Distribution Functions\", ISMRM 2021\n (abstract 0865)\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_sh_to_rish", + "docstring": "Compute the RISH (Rotationally Invariant Spherical Harmonics) features of an SH\nsignal [1].\n\nEach RISH feature map is the total energy of its associated order.\nMathematically, it is the sum of the squared SH coefficients of the SH order.\n\nThis script supports both symmetrical and asymmetrical SH images as input, of\nany SH order.\n\nEach RISH feature will be saved as a separate file.\n\n[1] Mirzaalian, Hengameh, et al. \"Harmonizing diffusion MRI data across\nmultiple sites and scanners.\" MICCAI 2015.\nhttps://scholar.harvard.edu/files/hengameh/files/miccai2015.pdf\n\nFormerly: scil_compute_rish_from_sh.py", + "help": "usage: scil_sh_to_rish.py [-h] [--full_basis] [--mask MASK]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh out_prefix\n\nCompute the RISH (Rotationally Invariant Spherical Harmonics) features of an SH\nsignal [1].\n\nEach RISH feature map is the total energy of its associated order.\nMathematically, it is the sum of the squared SH coefficients of the SH order.\n\nThis script supports both symmetrical and asymmetrical SH images as input, of\nany SH order.\n\nEach RISH feature will be saved as a separate file.\n\n[1] Mirzaalian, Hengameh, et al. \"Harmonizing diffusion MRI data across\nmultiple sites and scanners.\" MICCAI 2015.\nhttps://scholar.harvard.edu/files/hengameh/files/miccai2015.pdf\n\nFormerly: scil_compute_rish_from_sh.py\n\npositional arguments:\n in_sh Path of the sh image. They can be formatted in any sh basis, but we \n expect it to be a symmetrical one. Else, provide --full_basis.\n out_prefix Prefix of the output RISH files to save. Suffixes will be \n based on the sh orders.\n\noptions:\n -h, --help show this help message and exit\n --full_basis Input SH image uses a full SH basis (asymmetrical).\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computation.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_sh_to_sf", + "docstring": "Script to sample SF values from a Spherical Harmonics signal. Outputs a Nifti\nfile with the SF values and an associated .bvec file with the chosen\ndirections.\n\nIf converting from SH to a DWI-like SF volume, --in_bval and --in_b0 need\nto be provided to concatenate the b0 image to the SF, and to generate the new\nbvals file. Otherwise, no .bval file will be created.\n\nFormerly: scil_compute_sf_from_sh.py", + "help": "usage: scil_sh_to_sf.py [-h]\n (--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} | --in_bvec IN_BVEC)\n [--dtype {float32,float64}] [--in_bval IN_BVAL]\n [--in_b0 IN_B0] [--out_bval OUT_BVAL]\n [--out_bvec OUT_BVEC] [--b0_scaling]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--full_basis] [--b0_threshold thr] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh out_sf\n\nScript to sample SF values from a Spherical Harmonics signal. Outputs a Nifti\nfile with the SF values and an associated .bvec file with the chosen\ndirections.\n\nIf converting from SH to a DWI-like SF volume, --in_bval and --in_b0 need\nto be provided to concatenate the b0 image to the SF, and to generate the new\nbvals file. Otherwise, no .bval file will be created.\n\nFormerly: scil_compute_sf_from_sh.py\n\npositional arguments:\n in_sh Path of the SH volume.\n out_sf Name of the output SF file to save (bvals/bvecs will be automatically named when necessary).\n\noptions:\n -h, --help show this help message and exit\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Sphere used for the SH to SF projection. \n --in_bvec IN_BVEC Directions used for the SH to SF projection. \n If given, --in_bval must also be provided.\n --dtype {float32,float64}\n Datatype to use for SF computation and output array.'[float32]'\n --in_bval IN_BVAL b-value file, in FSL format, used to assign a b-value to the \n output SF and generate a `.bval` file.\n - If used, --out_bval is required.\n - The output bval will contain one b-value per point in the SF \n output (i.e. one per point on the --sphere or one per --in_bvec.)\n - The values of the output bval will all be set to the same b-value:\n the average of your in_bval. (Any b0 found in this file, i.e \n b-values under --b0_threshold, will be removed beforehand.)\n - To add b0s to both the SF volume and the --out_bval file, use --in_b0.\n --in_b0 IN_B0 b0 volume to concatenate to the final SF volume.\n --out_bval OUT_BVAL Optional output bval file.\n --out_bvec OUT_BVEC Optional output bvec file.\n --b0_scaling Scale resulting SF by the b0 image (--in_b0 mustbe given).\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --full_basis If true, use a full basis for the input SH coefficients.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n Default if not set is 20.\n This value is used with option --in_bval only: any b0 found in the in_bval will be removed.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_stats_group_comparison", + "docstring": "Run group comparison statistics on metrics from tractometry\n1) Separate the sample given a particular variable (group_by) into groups\n\n2) Does Shapiro-Wilk test of normality for every sample\nhttps://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test\n\n3) Does Levene or Bartlett (depending on normality) test of variance\nhomogeneity Levene:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm\nBartlett:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm\n\n4) Test the group difference for every measure with the correct test depending\n on the sample (Student, Welch, Mannwhitneyu, ANOVA, Kruskall-Wallis)\nStudent :\nhttps://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test\nWelch :\nhttps://en.wikipedia.org/wiki/Welch%27s_t-test\nMann-Whitney U :\nhttps://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test\nANOVA :\nhttp://www.biostathandbook.com/onewayanova.html\nKruskall-Wallis :\nhttps://en.wikipedia.org/wiki/Kruskal%E2%80%93Wallis_one-way_analysis_of_variance\n\n\n5) If the group difference test is positive and number of group is greater than\n 2, test the group difference two by two.\n\n6) Generate the result for all metrics and bundles\n\nFormerly: scil_group_comparison.py", + "help": "usage: scil_stats_group_comparison.py [-h] [--out_dir OUT_DIR]\n [--out_json OUT_JSON]\n [--bundles BUNDLES [BUNDLES ...]]\n [--metrics METRICS [METRICS ...]]\n [--values VALUES [VALUES ...]]\n [--alpha_error ALPHA_ERROR]\n [--generate_graph] [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n IN_JSON IN_PARTICIPANTS GROUP_BY\n\nRun group comparison statistics on metrics from tractometry\n1) Separate the sample given a particular variable (group_by) into groups\n\n2) Does Shapiro-Wilk test of normality for every sample\nhttps://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test\n\n3) Does Levene or Bartlett (depending on normality) test of variance\nhomogeneity Levene:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm\nBartlett:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm\n\n4) Test the group difference for every measure with the correct test depending\n on the sample (Student, Welch, Mannwhitneyu, ANOVA, Kruskall-Wallis)\nStudent :\nhttps://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test\nWelch :\nhttps://en.wikipedia.org/wiki/Welch%27s_t-test\nMann-Whitney U :\nhttps://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test\nANOVA :\nhttp://www.biostathandbook.com/onewayanova.html\nKruskall-Wallis :\nhttps://en.wikipedia.org/wiki/Kruskal%E2%80%93Wallis_one-way_analysis_of_variance\n\n5) If the group difference test is positive and number of group is greater than\n 2, test the group difference two by two.\n\n6) Generate the result for all metrics and bundles\n\nFormerly: scil_group_comparison.py\n\npositional arguments:\n IN_JSON Input JSON file from tractometry nextflow pipeline or equivalent.\n IN_PARTICIPANTS Input tsv participants file.See doc in https://scilpy.readthedocs.io/en/latest/documentation/construct_participants_tsv_file.html.\n GROUP_BY Variable that will be used to compare group together.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Name of the output folder path. [stats]\n --out_json OUT_JSON The name of the result json output file otherwise it will be printed.\n --bundles BUNDLES [BUNDLES ...], -b BUNDLES [BUNDLES ...]\n Bundle(s) in which you want to do stats. [all]\n --metrics METRICS [METRICS ...], -m METRICS [METRICS ...]\n Metric(s) on which you want to do stats. [all]\n --values VALUES [VALUES ...], --va VALUES [VALUES ...]\n Value(s) on which you want to do stats (mean, std). [all]\n --alpha_error ALPHA_ERROR, -a ALPHA_ERROR\n Type 1 error for all the test. [0.05]\n --generate_graph, --gg\n Generate a simple plot of every metric across groups.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_surface_apply_transform", + "docstring": "Script to apply a transform to a surface (FreeSurfer or VTK supported),\nusing output from ANTs registration tools (i.e. affine.txt, warp.nii.gz).\n\nExample usage from T1 to b0 using ANTs transforms:\n> ConvertTransformFile 3 output0GenericAffine.mat vtk_transfo.txt --hm\n> scil_surface_apply_transform.py lh_white_lps.vtk affine.txt lh_white_b0.vtk\\\n --ants_warp warp.nii.gz\n\nImportant: The input surface needs to be in *T1 world LPS* coordinates\n(aligned over the T1 in MI-Brain).\n\nThe script will use the linear affine first and then the warp image.\nThe resulting surface will be in *b0 world LPS* coordinates\n(aligned over the b0 in MI-Brain).\n\nFormerly: scil_apply_transform_to_surface.py.", + "help": "usage: scil_surface_apply_transform.py [-h] [--ants_warp ANTS_WARP]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface ants_affine out_surface\n\nScript to apply a transform to a surface (FreeSurfer or VTK supported),\nusing output from ANTs registration tools (i.e. affine.txt, warp.nii.gz).\n\nExample usage from T1 to b0 using ANTs transforms:\n> ConvertTransformFile 3 output0GenericAffine.mat vtk_transfo.txt --hm\n> scil_surface_apply_transform.py lh_white_lps.vtk affine.txt lh_white_b0.vtk\\\n --ants_warp warp.nii.gz\n\nImportant: The input surface needs to be in *T1 world LPS* coordinates\n(aligned over the T1 in MI-Brain).\n\nThe script will use the linear affine first and then the warp image.\nThe resulting surface will be in *b0 world LPS* coordinates\n(aligned over the b0 in MI-Brain).\n\nFormerly: scil_apply_transform_to_surface.py.\n\npositional arguments:\n in_surface Input surface (.vtk).\n ants_affine Affine transform from ANTs (.txt or .mat).\n out_surface Output surface (.vtk).\n\noptions:\n -h, --help show this help message and exit\n --ants_warp ANTS_WARP\n Warp image from ANTs (Nifti image).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_surface_convert", + "docstring": "Script to convert surface formats\n\nSupported formats:\n \".vtk\", \".vtp\", \".ply\", \".stl\", \".xml\", \".obj\"\n and FreeSurfer surfaces\n\n> scil_surface_convert.py surf.vtk converted_surf.ply\n\nFormerly: scil_convert_surface.py", + "help": "usage: scil_surface_convert.py [-h] [--xform XFORM] [--to_lps]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface out_surface\n\nScript to convert surface formats\n\nSupported formats:\n \".vtk\", \".vtp\", \".ply\", \".stl\", \".xml\", \".obj\"\n and FreeSurfer surfaces\n\n> scil_surface_convert.py surf.vtk converted_surf.ply\n\nFormerly: scil_convert_surface.py\n\npositional arguments:\n in_surface Input a surface (FreeSurfer or supported by VTK).\n out_surface Output surface (formats supported by VTK).\n\noptions:\n -h, --help show this help message and exit\n --xform XFORM Path of the copy-paste output from mri_info \n Using: mri_info $input >> log.txt, \n The file log.txt would be this parameter\n --to_lps Flip for Surface/MI-Brain LPS\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_surface_flip", + "docstring": "Script to flip a given surface (FreeSurfer or VTK supported).\n\nCan flip surface coordinates around a single or multiple axes\nCan also be used to reverse the orientation of the surface normals.\n\nFormerly: scil_flip_surface.py", + "help": "usage: scil_surface_flip.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface out_surface {x,y,z,n} [{x,y,z,n} ...]\n\nScript to flip a given surface (FreeSurfer or VTK supported).\n\nCan flip surface coordinates around a single or multiple axes\nCan also be used to reverse the orientation of the surface normals.\n\nFormerly: scil_flip_surface.py\n\npositional arguments:\n in_surface Input surface (.vtk).\n out_surface Output flipped surface (.vtk).\n {x,y,z,n} The axes you want to flip. eg: to flip the x and y axes use: x y. to reverse the surface normals use: n\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_surface_smooth", + "docstring": "Script to smooth a surface with a Laplacian blur.\n\nFor a standard FreeSurfer white matter mesh a step_size from 0.1 to 10\nis recommended\n\nSmoothing time = step_size * nb_steps\n small amount of smoothing [step_size 1, nb_steps 10]\n moderate amount of smoothing [step_size 10, nb_steps 100]\n large amount of smoothing [step_size 100, nb_steps 1000]\n\nFormerly: scil_smooth_surface.py", + "help": "usage: scil_surface_smooth.py [-h] [-m VTS_MASK] [-n NB_STEPS] [-s STEP_SIZE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface out_surface\n\nScript to smooth a surface with a Laplacian blur.\n\nFor a standard FreeSurfer white matter mesh a step_size from 0.1 to 10\nis recommended\n\nSmoothing time = step_size * nb_steps\n small amount of smoothing [step_size 1, nb_steps 10]\n moderate amount of smoothing [step_size 10, nb_steps 100]\n large amount of smoothing [step_size 100, nb_steps 1000]\n\nFormerly: scil_smooth_surface.py\n\npositional arguments:\n in_surface Input surface (.vtk).\n out_surface Output smoothed surface (.vtk).\n\noptions:\n -h, --help show this help message and exit\n -m VTS_MASK, --vts_mask VTS_MASK\n Vertex mask no smoothing where mask equals 0 (.npy).\n -n NB_STEPS, --nb_steps NB_STEPS\n Number of steps for laplacian smooth [2].\n -s STEP_SIZE, --step_size STEP_SIZE\n Laplacian smooth step size [5.0].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tracking_local", + "docstring": "Local streamline HARDI tractography.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nAlgo 'eudx': select the peak from the spherical function (SF) most closely\naligned to the previous direction, and follow an average of it and the previous\ndirection [1].\n\nAlgo 'det': select the orientation corresponding to the maximum of the\nspherical function.\n\nAlgo 'prob': select a direction drawn from the empirical distribution function\ndefined from the SF.\n\nAlgo 'ptt': select the propagation direction using Parallel-Transport\nTractography (PTT) framework, see [2] for more details.\n\nNOTE: eudx can be used with pre-computed peaks from fodf as well as\nevecs_v1.nii.gz from scil_dti_metrics.py (experimental).\n\nNOTE: If tracking with PTT, the step-size should be smaller than usual,\ni.e 0.1-0.2mm or lower. The maximum angle between segments (theta) should\nbe between 10 and 20 degrees.\n\nThe local tracking algorithm can also run on the GPU using the --use_gpu\noption (experimental). By default, GPU tracking behaves the same as\nDIPY. Below is a list of known divergences between the CPU and GPU\nimplementations:\n * Backend: The CPU implementation uses DIPY's LocalTracking and the\n GPU implementation uses an in-house OpenCL implementation.\n * Algo: For the GPU implementation, the only available algorithm is\n Algo 'prob'.\n * SH interpolation: For GPU tracking, SH interpolation can be set to either\n nearest neighbour or trilinear (default). With DIPY, the only available\n method is trilinear.\n * Forward tracking: For GPU tracking, the `--forward_only` flag can be used\n to disable backward tracking. This option isn't available for CPU\n tracking.\n\nAll the input nifti files must be in isotropic resolution.\n\n\nReferences\n----------\n[1]: Garyfallidis, E. (2012). Towards an accurate brain tractography\n[PhD thesis]. University of Cambridge. United Kingdom.\n\n[2]: Aydogan, D. B., & Shi, Y. (2020). Parallel transport tractography.\nIEEE transactions on medical imaging, 40(2), 635-647.\n\nFormerly: scil_compute_local_tracking.py", + "help": "usage: scil_tracking_local.py [-h] [--step STEP_SIZE] [--min_length m]\n [--max_length M] [--theta THETA]\n [--sfthres sf_th]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--npv NPV | --nt NT] [--sh_to_pmf]\n [--algo {det,prob,ptt,eudx}]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--sub_sphere SUB_SPHERE]\n [--probe_length PROBE_LENGTH]\n [--probe_radius PROBE_RADIUS]\n [--probe_quality PROBE_QUALITY]\n [--probe_count PROBE_COUNT]\n [--support_exponent SUPPORT_EXPONENT]\n [--use_gpu] [--sh_interp {trilinear,nearest}]\n [--forward_only] [--batch_size BATCH_SIZE]\n [--compress [COMPRESS_TH]] [-f] [--save_seeds]\n [--seed SEED] [-v [{DEBUG,INFO,WARNING}]]\n in_odf in_seed in_mask out_tractogram\n\nLocal streamline HARDI tractography.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nAlgo 'eudx': select the peak from the spherical function (SF) most closely\naligned to the previous direction, and follow an average of it and the previous\ndirection [1].\n\nAlgo 'det': select the orientation corresponding to the maximum of the\nspherical function.\n\nAlgo 'prob': select a direction drawn from the empirical distribution function\ndefined from the SF.\n\nAlgo 'ptt': select the propagation direction using Parallel-Transport\nTractography (PTT) framework, see [2] for more details.\n\nNOTE: eudx can be used with pre-computed peaks from fodf as well as\nevecs_v1.nii.gz from scil_dti_metrics.py (experimental).\n\nNOTE: If tracking with PTT, the step-size should be smaller than usual,\ni.e 0.1-0.2mm or lower. The maximum angle between segments (theta) should\nbe between 10 and 20 degrees.\n\nThe local tracking algorithm can also run on the GPU using the --use_gpu\noption (experimental). By default, GPU tracking behaves the same as\nDIPY. Below is a list of known divergences between the CPU and GPU\nimplementations:\n * Backend: The CPU implementation uses DIPY's LocalTracking and the\n GPU implementation uses an in-house OpenCL implementation.\n * Algo: For the GPU implementation, the only available algorithm is\n Algo 'prob'.\n * SH interpolation: For GPU tracking, SH interpolation can be set to either\n nearest neighbour or trilinear (default). With DIPY, the only available\n method is trilinear.\n * Forward tracking: For GPU tracking, the `--forward_only` flag can be used\n to disable backward tracking. This option isn't available for CPU\n tracking.\n\nAll the input nifti files must be in isotropic resolution.\n\nReferences\n----------\n[1]: Garyfallidis, E. (2012). Towards an accurate brain tractography\n[PhD thesis]. University of Cambridge. United Kingdom.\n\n[2]: Aydogan, D. B., & Shi, Y. (2020). Parallel transport tractography.\nIEEE transactions on medical imaging, 40(2), 635-647.\n\nFormerly: scil_compute_local_tracking.py\n\npositional arguments:\n in_odf File containing the orientation diffusion function \n as spherical harmonics file (.nii.gz). Ex: ODF or fODF.\n in_seed Seeding mask (.nii.gz).\n in_mask Tracking mask (.nii.gz).\n Tracking will stop outside this mask. The last point of each \n streamline (triggering the stopping criteria) IS added to the streamline.\n out_tractogram Tractogram output file (must be .trk or .tck).\n\noptions:\n -h, --help show this help message and exit\n\nTracking options:\n --step STEP_SIZE Step size in mm. [0.5]\n --min_length m Minimum length of a streamline in mm. [10.0]\n --max_length M Maximum length of a streamline in mm. [300.0]\n --theta THETA Maximum angle between 2 steps. If the angle is too big, streamline is \n stopped and the following point is NOT included.\n [\"eudx\"=60, \"det\"=45, \"prob\"=20, \"ptt\"=20]\n --sfthres sf_th Spherical function relative threshold. [0.1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --sh_to_pmf If set, map sherical harmonics to spherical function (pmf) before \n tracking (faster, requires more memory)\n --algo {det,prob,ptt,eudx}\n Algorithm to use. [prob]\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Dipy sphere; set of possible directions.\n Default: [repulsion724]\n --sub_sphere SUB_SPHERE\n Subdivides each face of the sphere into 4^s new faces. [0]\n\nSeeding options:\n When no option is provided, uses --npv 1.\n\n --npv NPV Number of seeds per voxel.\n --nt NT Total number of seeds to use.\n\nPTT options:\n --probe_length PROBE_LENGTH\n The length of the probes. Smaller value\n yields more dispersed fibers. [1.0]\n --probe_radius PROBE_RADIUS\n The radius of the probe. A large probe_radius\n helps mitigate noise in the pmf but it might\n make it harder to sample thin and intricate\n connections, also the boundary of fiber\n bundles might be eroded. [0]\n --probe_quality PROBE_QUALITY\n The quality of the probe. This parameter sets\n the number of segments to split the cylinder\n along the length of the probe (minimum=2) [3]\n --probe_count PROBE_COUNT\n The number of probes. This parameter sets the\n number of parallel lines used to model the\n cylinder (minimum=1). [1]\n --support_exponent SUPPORT_EXPONENT\n Data support exponent, used for rejection\n sampling. [3]\n\nGPU options:\n --use_gpu Enable GPU tracking (experimental).\n --sh_interp {trilinear,nearest}\n SH image interpolation method. [trilinear]\n --forward_only Perform forward tracking only.\n --batch_size BATCH_SIZE\n Approximate size of GPU batches (number\n of streamlines to track in parallel). [10000]\n\nOutput options:\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.\n A rule of thumb is to set it to 0.1mm for deterministic \n streamlines and to 0.2mm for probabilitic streamlines.[0.1]\n -f Force overwriting of the output files.\n --save_seeds If set, save the seeds used for the tracking \n in the data_per_streamline property.\n Hint: you can then use scil_tractogram_seed_density_map.\n --seed SEED Random number generator seed.\n\nLogging options:\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tracking_local_dev", + "docstring": "Local streamline HARDI tractography using scilpy-only methods -- no dipy (i.e\nno cython). The goal of this is to have a python-only version that can be\nmodified more easily by our team when testing new algorithms and parameters,\nand that can be used as parent classes in sub-projects of our lab such as in\ndwi_ml.\n\nWARNING. MUCH SLOWER THAN scil_tracking_local.py. We recommand using multi-\nprocessing with option --nb_processes.\n\nSimilar to scil_tracking_local:\n The tracking direction is chosen in the aperture cone defined by the\n previous tracking direction and the angular constraint.\n - Algo 'det': the maxima of the spherical function (SF) the most closely\n aligned to the previous direction.\n - Algo 'prob': a direction drawn from the empirical distribution function\n defined from the SF.\n\nContrary to scil_tracking_local:\n - Algo 'eudx' is not yet available!\n - Input nifti files do not necessarily need to be in isotropic resolution.\n - The script works with asymmetric input ODF.\n - The interpolation for the tracking mask and spherical function can be\n one of 'nearest' or 'trilinear'.\n - Runge-Kutta integration is supported for the step function.\n\nA few notes on Runge-Kutta integration.\n 1. Runge-Kutta integration is used to approximate the next tracking\n direction by estimating directions from future tracking steps. This\n works well for deterministic tracking. However, in the context of\n probabilistic tracking, the next tracking directions cannot be estimated\n in advance, because they are picked randomly from a distribution. It is\n therefore recommanded to keep the rk_order to 1 for probabilistic\n tracking.\n 2. As a rule of thumb, doubling the rk_order will double the computation\n time in the worst case.\n\nReferences: [1] Girard, G., Whittingstall K., Deriche, R., and\n Descoteaux, M. (2014). Towards quantitative connectivity analysis:\n reducing tractography biases. Neuroimage, 98, 266-278.\n\nFormerly: scil_compute_local_tracking_dev.py", + "help": "usage: scil_tracking_local_dev.py [-h] [--step STEP_SIZE] [--min_length m]\n [--max_length M] [--theta THETA]\n [--sfthres sf_th]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--npv NPV | --nt NT] [--algo {det,prob}]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--sub_sphere SUB_SPHERE]\n [--sfthres_init sf_th] [--rk_order K]\n [--max_invalid_nb_points MAX]\n [--forward_only]\n [--sh_interp {nearest,trilinear}]\n [--mask_interp {nearest,trilinear}]\n [--keep_last_out_point]\n [--n_repeats_per_seed N_REPEATS_PER_SEED]\n [--rng_seed RNG_SEED] [--skip SKIP]\n [--processes NBR] [--compress [COMPRESS_TH]]\n [-f] [--save_seeds]\n [-v [{DEBUG,INFO,WARNING}]]\n in_odf in_seed in_mask out_tractogram\n\nLocal streamline HARDI tractography using scilpy-only methods -- no dipy (i.e\nno cython). The goal of this is to have a python-only version that can be\nmodified more easily by our team when testing new algorithms and parameters,\nand that can be used as parent classes in sub-projects of our lab such as in\ndwi_ml.\n\nWARNING. MUCH SLOWER THAN scil_tracking_local.py. We recommand using multi-\nprocessing with option --nb_processes.\n\nSimilar to scil_tracking_local:\n The tracking direction is chosen in the aperture cone defined by the\n previous tracking direction and the angular constraint.\n - Algo 'det': the maxima of the spherical function (SF) the most closely\n aligned to the previous direction.\n - Algo 'prob': a direction drawn from the empirical distribution function\n defined from the SF.\n\nContrary to scil_tracking_local:\n - Algo 'eudx' is not yet available!\n - Input nifti files do not necessarily need to be in isotropic resolution.\n - The script works with asymmetric input ODF.\n - The interpolation for the tracking mask and spherical function can be\n one of 'nearest' or 'trilinear'.\n - Runge-Kutta integration is supported for the step function.\n\nA few notes on Runge-Kutta integration.\n 1. Runge-Kutta integration is used to approximate the next tracking\n direction by estimating directions from future tracking steps. This\n works well for deterministic tracking. However, in the context of\n probabilistic tracking, the next tracking directions cannot be estimated\n in advance, because they are picked randomly from a distribution. It is\n therefore recommanded to keep the rk_order to 1 for probabilistic\n tracking.\n 2. As a rule of thumb, doubling the rk_order will double the computation\n time in the worst case.\n\nReferences: [1] Girard, G., Whittingstall K., Deriche, R., and\n Descoteaux, M. (2014). Towards quantitative connectivity analysis:\n reducing tractography biases. Neuroimage, 98, 266-278.\n\nFormerly: scil_compute_local_tracking_dev.py\n\npositional arguments:\n in_odf File containing the orientation diffusion function \n as spherical harmonics file (.nii.gz). Ex: ODF or fODF.\n in_seed Seeding mask (.nii.gz).\n in_mask Tracking mask (.nii.gz).\n Tracking will stop outside this mask. The last point of each \n streamline (triggering the stopping criteria) IS added to the streamline.\n out_tractogram Tractogram output file (must be .trk or .tck).\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nTracking options:\n --step STEP_SIZE Step size in mm. [0.5]\n --min_length m Minimum length of a streamline in mm. [10.0]\n --max_length M Maximum length of a streamline in mm. [300.0]\n --theta THETA Maximum angle between 2 steps. If the angle is too big, streamline is \n stopped and the following point is NOT included.\n [\"eudx\"=60, \"det\"=45, \"prob\"=20, \"ptt\"=20]\n --sfthres sf_th Spherical function relative threshold. [0.1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --algo {det,prob} Algorithm to use. [prob]\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Dipy sphere; set of possible directions.\n Default: [repulsion724]\n --sub_sphere SUB_SPHERE\n Subdivides each face of the sphere into 4^s new faces. [0]\n --sfthres_init sf_th Spherical function relative threshold value for the \n initial direction. [0.5]\n --rk_order K The order of the Runge-Kutta integration used for the step function.\n For more information, refer to the note in the script description. [1]\n --max_invalid_nb_points MAX\n Maximum number of steps without valid direction, \n ex: if threshold on ODF or max angles are reached.\n Default: 0, i.e. do not add points following an invalid direction.\n --forward_only If set, tracks in one direction only (forward) given the \n initial seed. The direction is randomly drawn from the ODF.\n --sh_interp {nearest,trilinear}\n Spherical harmonic interpolation: nearest-neighbor \n or trilinear. [trilinear]\n --mask_interp {nearest,trilinear}\n Mask interpolation: nearest-neighbor or trilinear. [nearest]\n --keep_last_out_point\n If set, keep the last point (once out of the tracking mask) of \n the streamline. Default: discard them. This is the default in \n Dipy too. Note that points obtained after an invalid direction \n (ex when angle is too sharp or sh_threshold not reached) are \n never added.\n --n_repeats_per_seed N_REPEATS_PER_SEED\n By default, each seed position is used only once. This option\n allows for tracking from the exact same seed n_repeats_per_seed\n times. [1]\n\nSeeding options:\n When no option is provided, uses --npv 1.\n\n --npv NPV Number of seeds per voxel.\n --nt NT Total number of seeds to use.\n\nRandom seeding options:\n --rng_seed RNG_SEED Initial value for the random number generator. [0]\n --skip SKIP Skip the first N random number. \n Useful if you want to create new streamlines to add to \n a previously created tractogram with a fixed --rng_seed.\n Ex: If tractogram_1 was created with -nt 1,000,000, \n you can create tractogram_2 with \n --skip 1,000,000.\n\nMemory options:\n --processes NBR Number of sub-processes to start. \n Default: [1]\n\nOutput options:\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.\n A rule of thumb is to set it to 0.1mm for deterministic \n streamlines and to 0.2mm for probabilitic streamlines.[0.1]\n -f Force overwriting of the output files.\n --save_seeds If set, save the seeds used for the tracking \n in the data_per_streamline property.\n Hint: you can then use scil_tractogram_seed_density_map.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tracking_pft", + "docstring": "Local streamline HARDI tractography including Particle Filtering tracking.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nThe tracking is done inside partial volume estimation maps and uses the\nparticle filtering tractography (PFT) algorithm. See\nscil_tracking_pft_maps.py to generate PFT required maps.\n\nStreamlines longer than min_length and shorter than max_length are kept.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\nDefault parameters as suggested in [1].\n\nAlgo 'det': the maxima of the spherical function (SF) the most closely aligned\nto the previous direction.\nAlgo 'prob': a direction drawn from the empirical distribution function defined\nfrom the SF.\n\nFor streamline compression, a rule of thumb is to set it to 0.1mm for the\ndeterministic algorithm and 0.2mm for probabilitic algorithm.\n\nAll the input nifti files must be in isotropic resolution.\n\nFormerly: scil_compute_pft.py", + "help": "usage: scil_tracking_pft.py [-h] [--algo {det,prob}] [--step STEP_SIZE]\n [--min_length MIN_LENGTH]\n [--max_length MAX_LENGTH] [--theta THETA] [--act]\n [--sfthres SF_THRESHOLD]\n [--sfthres_init SF_THRESHOLD_INIT]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--npv NPV | --nt NT] [--particles PARTICLES]\n [--back BACK_TRACKING]\n [--forward FORWARD_TRACKING] [--all] [--seed SEED]\n [-f] [--save_seeds] [--compress [COMPRESS_TH]]\n [-v [{DEBUG,INFO,WARNING}]]\n in_sh in_seed in_map_include map_exclude_file\n out_tractogram\n\nLocal streamline HARDI tractography including Particle Filtering tracking.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nThe tracking is done inside partial volume estimation maps and uses the\nparticle filtering tractography (PFT) algorithm. See\nscil_tracking_pft_maps.py to generate PFT required maps.\n\nStreamlines longer than min_length and shorter than max_length are kept.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\nDefault parameters as suggested in [1].\n\nAlgo 'det': the maxima of the spherical function (SF) the most closely aligned\nto the previous direction.\nAlgo 'prob': a direction drawn from the empirical distribution function defined\nfrom the SF.\n\nFor streamline compression, a rule of thumb is to set it to 0.1mm for the\ndeterministic algorithm and 0.2mm for probabilitic algorithm.\n\nAll the input nifti files must be in isotropic resolution.\n\nFormerly: scil_compute_pft.py\n\npositional arguments:\n in_sh Spherical harmonic file (.nii.gz).\n in_seed Seeding mask (.nii.gz).\n in_map_include The probability map (.nii.gz) of ending the\n streamline and including it in the output (CMC, PFT [1])\n map_exclude_file The probability map (.nii.gz) of ending the\n streamline and excluding it in the output (CMC, PFT [1]).\n out_tractogram Tractogram output file (must be .trk or .tck).\n\nGeneric options:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nTracking options:\n --algo {det,prob} Algorithm to use (must be \"det\" or \"prob\"). [prob]\n --step STEP_SIZE Step size in mm. [0.2]\n --min_length MIN_LENGTH\n Minimum length of a streamline in mm. [10.0]\n --max_length MAX_LENGTH\n Maximum length of a streamline in mm. [300.0]\n --theta THETA Maximum angle between 2 steps. [\"det\"=45, \"prob\"=20]\n --act If set, uses anatomically-constrained tractography (ACT) \n instead of continuous map criterion (CMC).\n --sfthres SF_THRESHOLD\n Spherical function relative threshold. [0.1]\n --sfthres_init SF_THRESHOLD_INIT\n Spherical function relative threshold value for the \n initial direction. [0.5]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n\nSeeding options:\n When no option is provided, uses --npv 1.\n\n --npv NPV Number of seeds per voxel.\n --nt NT Total number of seeds to use.\n\nPFT options:\n --particles PARTICLES\n Number of particles to use for PFT. [15]\n --back BACK_TRACKING Length of PFT back tracking (mm). [2.0]\n --forward FORWARD_TRACKING\n Length of PFT forward tracking (mm). [1.0]\n\nOutput options:\n --all If set, keeps \"excluded\" streamlines.\n NOT RECOMMENDED, except for debugging.\n --seed SEED Random number generator seed.\n -f Force overwriting of the output files.\n --save_seeds If set, save the seeds used for the tracking \n in the data_per_streamline property.\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.[0.1]\n\nReferences: [1] Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M. (2014). Towards quantitative connectivity analysis: reducing tractography biases. Neuroimage, 98, 266-278.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tracking_pft_maps", + "docstring": "Compute include and exclude maps, and the seeding interface mask from partial\nvolume estimation (PVE) maps. Maps should have values in [0,1], gm+wm+csf=1 in\nall voxels of the brain, gm+wm+csf=0 elsewhere.\n\nReferences: Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M.\n(2014). Towards quantitative connectivity analysis: reducing tractography\nbiases. Neuroimage.\n\nFormerly: scil_compute_maps_for_particle_filter_tracking.py", + "help": "usage: scil_tracking_pft_maps.py [-h] [--include filename]\n [--exclude filename] [--interface filename]\n [-t THRESHOLD] [-f]\n [-v [{DEBUG,INFO,WARNING}]]\n in_wm in_gm in_csf\n\nCompute include and exclude maps, and the seeding interface mask from partial\nvolume estimation (PVE) maps. Maps should have values in [0,1], gm+wm+csf=1 in\nall voxels of the brain, gm+wm+csf=0 elsewhere.\n\nReferences: Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M.\n(2014). Towards quantitative connectivity analysis: reducing tractography\nbiases. Neuroimage.\n\nFormerly: scil_compute_maps_for_particle_filter_tracking.py\n\npositional arguments:\n in_wm White matter PVE map (nifti). From normal FAST output, has a PVE_2 name suffix.\n in_gm Grey matter PVE map (nifti). From normal FAST output, has a PVE_1 name suffix.\n in_csf Cerebrospinal fluid PVE map (nifti). From normal FAST output, has a PVE_0 name suffix.\n\noptions:\n -h, --help show this help message and exit\n --include filename Output include map (nifti). [map_include.nii.gz]\n --exclude filename Output exclude map (nifti). [map_exclude.nii.gz]\n --interface filename Output interface seeding mask (nifti). [interface.nii.gz]\n -t THRESHOLD Minimum gm and wm PVE values in a voxel to be into the interface. [0.1]\n -f Force overwriting of the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tracking_pft_maps_edit", + "docstring": "Modify PFT maps to allow PFT tracking in given mask (e.g edema).\n\nFormerly: scil_add_tracking_mask_to_pft_maps.py.", + "help": "usage: scil_tracking_pft_maps_edit.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n map_include map_exclude additional_mask\n map_include_corr map_exclude_corr\n\nModify PFT maps to allow PFT tracking in given mask (e.g edema).\n\nFormerly: scil_add_tracking_mask_to_pft_maps.py.\n\npositional arguments:\n map_include PFT map include.\n map_exclude PFT map exclude.\n additional_mask Allow PFT tracking in this mask.\n map_include_corr Corrected PFT map include output file name.\n map_exclude_corr Corrected PFT map exclude output file name.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_apply_transform", + "docstring": "Transform a tractogram using an affine/rigid transformation and nonlinear\ndeformation (optional).\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nApplying transformation to a tractogram can lead to invalid streamlines (out of\nthe bounding box), and thus three strategies are available:\n1) Do nothing, may crash at saving if invalid streamlines are present.\n [This is the default]\n2) --keep_invalid, save invalid streamlines. Leave it to the user to run\n scil_tractogram_remove_invalid.py if needed.\n3) --remove_invalid, automatically remove invalid streamlines before saving.\n Should not remove more than a few streamlines. Typically, the streamlines\n that are rejected are the ones reaching the limits of the brain, ex, near\n the pons.\n4) --cut_invalid, automatically cut invalid streamlines before saving, i.e. the\n streamlines are kept but the points out of the bounding box are cut.\n\nExample:\nTo apply a transformation from ANTs to a tractogram, if the ANTs command was\nMOVING->REFERENCE...\n1) To apply the original transformation:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --inverse\n --in_deformation 1InverseWarp.nii.gz\n\n2) To apply the inverse transformation, i.e. REFERENCE->MOVING:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --in_deformation 1Warp.nii.gz\n --reverse_operation\n\nFormerly: scil_apply_transform_to_tractogram.py", + "help": "usage: scil_tractogram_apply_transform.py [-h] [--inverse]\n [--in_deformation file]\n [--reverse_operation]\n [--cut_invalid | --remove_invalid | --keep_invalid]\n [--no_empty] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_moving_tractogram in_target_file\n in_transfo out_tractogram\n\nTransform a tractogram using an affine/rigid transformation and nonlinear\ndeformation (optional).\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nApplying transformation to a tractogram can lead to invalid streamlines (out of\nthe bounding box), and thus three strategies are available:\n1) Do nothing, may crash at saving if invalid streamlines are present.\n [This is the default]\n2) --keep_invalid, save invalid streamlines. Leave it to the user to run\n scil_tractogram_remove_invalid.py if needed.\n3) --remove_invalid, automatically remove invalid streamlines before saving.\n Should not remove more than a few streamlines. Typically, the streamlines\n that are rejected are the ones reaching the limits of the brain, ex, near\n the pons.\n4) --cut_invalid, automatically cut invalid streamlines before saving, i.e. the\n streamlines are kept but the points out of the bounding box are cut.\n\nExample:\nTo apply a transformation from ANTs to a tractogram, if the ANTs command was\nMOVING->REFERENCE...\n1) To apply the original transformation:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --inverse\n --in_deformation 1InverseWarp.nii.gz\n\n2) To apply the inverse transformation, i.e. REFERENCE->MOVING:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --in_deformation 1Warp.nii.gz\n --reverse_operation\n\nFormerly: scil_apply_transform_to_tractogram.py\n\npositional arguments:\n in_moving_tractogram Path of the tractogram to be transformed.\n Bounding box validity will not be checked (could \n contain invalid streamlines).\n in_target_file Path of the reference target file (trk or nii).\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_tractogram Output tractogram filename (transformed data).\n\noptions:\n -h, --help show this help message and exit\n --no_empty Do not write file if there is no streamline.\n You may save an empty file if you use remove_invalid.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nTransformation options:\n --inverse Apply the inverse linear transformation.\n --in_deformation file\n Path to the file containing a deformation field.\n --reverse_operation Apply the transformation in reverse (see doc), warp\n first, then linear.\n\nManagement of invalid streamlines:\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_invalid Remove the streamlines landing out of the bounding box.\n --keep_invalid Keep the streamlines landing out of the bounding box.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_apply_transform_to_hdf5", + "docstring": "Transform tractogram(s) contained in the hdf5 output from a connectivity\nscript, using an affine/rigid transformation and nonlinear deformation\n(optional).\n\nSee scil_tractogram_apply_transform.py to apply directly to a tractogram.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nOr use >> scil_tractogram_apply_transform.py --help\n\nFormerly: scil_apply_transform_to_hdf5.py", + "help": "usage: scil_tractogram_apply_transform_to_hdf5.py [-h] [--inverse]\n [--in_deformation file]\n [--reverse_operation]\n [--cut_invalid | --remove_invalid | --keep_invalid]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_hdf5 in_target_file\n in_transfo out_hdf5\n\nTransform tractogram(s) contained in the hdf5 output from a connectivity\nscript, using an affine/rigid transformation and nonlinear deformation\n(optional).\n\nSee scil_tractogram_apply_transform.py to apply directly to a tractogram.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nOr use >> scil_tractogram_apply_transform.py --help\n\nFormerly: scil_apply_transform_to_hdf5.py\n\npositional arguments:\n in_hdf5 Path of the hdf5 containing the moving tractogram, to be transformed. (.h5 extension).\n in_target_file Path of the reference target file (.trk or .nii).\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_hdf5 Output tractogram filename (transformed data).\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nTransformation options:\n --inverse Apply the inverse linear transformation.\n --in_deformation file\n Path to the file containing a deformation field.\n --reverse_operation Apply the transformation in reverse (see doc), warp\n first, then linear.\n\nManagement of invalid streamlines:\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_invalid Remove the streamlines landing out of the bounding box.\n --keep_invalid Keep the streamlines landing out of the bounding box.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_assign_custom_color", + "docstring": "The script uses scalars from an anatomy, data_per_point or data_per_streamline\n(e.g. commit_weights) to visualize them on the streamlines.\nSaves the RGB values in the data_per_point 'color' with 3 values per point:\n(color_x, color_y, color_z).\n\nIf called with .tck, the output will always be .trk, because data_per_point has\nno equivalent in tck file.\n\nIf used with a visualization software like MI-Brain\n(https://github.com/imeka/mi-brain), the 'color' dps is applied by default at\nloading time.\n\nCOLORING METHOD\nThis script maps the raw values from these sources to RGB using a colormap.\n --use_dpp: The data from each point is converted to a color.\n --use_dps: The same color is applied to all points of the streamline.\n --from_anatomy: The voxel's color is used for the points of the streamlines\n crossing it. See also scil_tractogram_project_map_to_streamlines.py. You\n can have more options to project maps to dpp, and then use --use_dpp here.\n --along_profile: The data used here is each point's position in the\n streamline. To have nice results, you should first uniformize head/tail.\n See scil_tractogram_uniformize_endpoints.py.\n --local_angle.\n\nCOLORING OPTIONS\nA minimum and a maximum range can be provided to clip values. If the range of\nvalues is too large for intuitive visualization, a log transform can be\napplied.\n\nIf the data provided from --use_dps, --use_dpp and --from_anatomy are integer\nlabels, they can be mapped using a LookUp Table (--LUT).\nThe file provided as a LUT should be either .txt or .npy and if the size is\nN=20, then the data provided should be between 1-20.\n\nA custom colormap can be provided using --colormap. It should be a string\ncontaining a colormap name OR multiple Matplotlib named colors separated by -.\nThe colormap used for mapping values to colors can be saved to a png/jpg image\nusing the --out_colorbar option.\n\nSee also: scil_tractogram_assign_uniform_color.py, for simplified options.\n\nFormerly: scil_assign_custom_color_to_tractogram.py", + "help": "", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_assign_uniform_color", + "docstring": "Assign an hexadecimal RGB color to one or more Trackvis (.trk) tractogram.\n(If called with .tck, the output will always be .trk, because data_per_point\nhas no equivalent in tck file.)\n\nSaves the RGB values in the data_per_point 'color' with values\n(color_x, color_y, color_z).\n\nThe hexadecimal RGB color should be formatted as 0xRRGGBB or \"#RRGGBB\".\n\nSee also: scil_tractogram_assign_custom_color.py\n\nFormerly: scil_assign_uniform_color_to_tractograms.py", + "help": "usage: scil_tractogram_assign_uniform_color.py [-h]\n (--fill_color str | --dict_colors file.json)\n (--out_suffix [suffix] | --out_tractogram file.trk)\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_tractograms\n [in_tractograms ...]\n\nAssign an hexadecimal RGB color to one or more Trackvis (.trk) tractogram.\n(If called with .tck, the output will always be .trk, because data_per_point\nhas no equivalent in tck file.)\n\nSaves the RGB values in the data_per_point 'color' with values\n(color_x, color_y, color_z).\n\nThe hexadecimal RGB color should be formatted as 0xRRGGBB or \"#RRGGBB\".\n\nSee also: scil_tractogram_assign_custom_color.py\n\nFormerly: scil_assign_uniform_color_to_tractograms.py\n\npositional arguments:\n in_tractograms Input tractograms (.trk or .tck).\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nColoring Methods:\n --fill_color str Can be hexadecimal (ie. either \"#RRGGBB\" or 0xRRGGBB).\n --dict_colors file.json\n Json file: dictionnary mapping each tractogram's basename to a color.\n Do not put your file's extension in your dict.\n Same convention as --fill_color.\n\nOutput options:\n --out_suffix [suffix]\n Specify suffix to append to input basename.\n Mandatory choice if you run this script on multiple tractograms.\n Mandatory choice with --dict_colors.\n [None]\n --out_tractogram file.trk\n Output filename of colored tractogram (.trk).\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_commit", + "docstring": "Convex Optimization Modeling for Microstructure Informed Tractography (COMMIT)\nestimates, globally, how a given tractogram explains the DWI in terms of signal\nfit, assuming a certain forward microstructure model. It assigns a weight to\neach streamline, which represents how well it explains the DWI signal globally.\nThe default forward microstructure model is stick-zeppelin-ball, which requires\nmulti-shell data and a peak file (principal fiber directions in each voxel,\ntypically from a field of fODFs).\n\nIt is possible to use the ball-and-stick model for single-shell and multi-shell\ndata. In this case, the peak file is not mandatory. Multi-shell should follow a\n\"NODDI protocol\" (low and high b-values), multiple shells with similar b-values\nshould not be used with COMMIT.\n\nThe output from COMMIT is:\n- fit_NRMSE.nii.gz\n fiting error (Normalized Root Mean Square Error)\n- fit_RMSE.nii.gz\n fiting error (Root Mean Square Error)\n- results.pickle\n Dictionary containing the experiment parameters and final weights\n- compartment_EC.nii.gz\n (est. Extra-Cellular signal fraction)\n- compartment_IC.nii.gz\n (est. Intra-Cellular signal fraction)\n- compartment_ISO.nii.gz\n (est. isotropic signal fraction (freewater comportment)):\n Each of COMMIT compartments\n- streamline_weights.txt\n Text file containing the commit weights for each streamline of the\n input tractogram.\n- streamlines_length.txt\n Text file containing the length (mm) of each streamline.\n- streamline_weights_by_length.txt\n Text file containing the commit weights for each streamline of the\n input tractogram, ordered by their length.\n- tot_streamline_weights\n Text file containing the total commit weights of each streamline.\n Equal to commit_weights * streamlines_length (W_i * L_i)\n- essential.trk / non_essential.trk\n Tractograms containing the streamlines below or equal (essential) and\n above (non_essential) a threshold_weights of 0.\n- decompose_commit.h5\n In the case where the input is a hdf5 file only, we will save an output\n hdf5 with the following information separated into each bundle's dps:\n - streamlines_weights\n - streamline_weights_by_length\n For each bundle, only the essential streamlines are kept.\n\nThis script can divide the input tractogram in two using a threshold to apply\non the streamlines' weight. The threshold used is 0.0, keeping only streamlines\nthat have non-zero weight and that contribute to explain the DWI signal.\nStreamlines with 0 weight are essentially not necessary according to COMMIT.\n\nCOMMIT2 is available only for HDF5 data from\nscil_tractogram_segment_bundles_for_connectivity.py and\nwith the --ball_stick option. Use the --commit2 option to activite it, slightly\nlonger computation time. This wrapper offers a simplify way to call COMMIT,\nbut does not allow to use (or fine-tune) every parameter. If you want to use\nCOMMIT with full access to all parameters,\nvisit: https://github.com/daducci/COMMIT\n\nWhen tunning parameters, such as --iso_diff, --para_diff, --perp_diff or\n--lambda_commit_2 you should evaluate the quality of results by:\n - Looking at the 'density' (GTM) of the connnectome (essential tractogram)\n - Confirm the quality of WM bundles reconstruction (essential tractogram)\n - Inspect the (N)RMSE map and look for peaks or anomalies\n - Compare the density map before and after (essential tractogram)\n\nFormerly: scil_run_commit.py", + "help": "usage: scil_tractogram_commit.py [-h] [--nbr_dir NBR_DIR]\n [--nbr_iter NBR_ITER] [--in_peaks IN_PEAKS]\n [--in_tracking_mask IN_TRACKING_MASK]\n [--commit2]\n [--lambda_commit_2 LAMBDA_COMMIT_2]\n [--ball_stick] [--para_diff PARA_DIFF]\n [--perp_diff PERP_DIFF [PERP_DIFF ...]]\n [--iso_diff ISO_DIFF [ISO_DIFF ...]]\n [--keep_whole_tractogram]\n [--save_kernels DIRECTORY | --load_kernels DIRECTORY]\n [--compute_only] [--tolerance tol]\n [--skip_b0_check] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram in_dwi in_bval in_bvec out_dir\n\nConvex Optimization Modeling for Microstructure Informed Tractography (COMMIT)\nestimates, globally, how a given tractogram explains the DWI in terms of signal\nfit, assuming a certain forward microstructure model. It assigns a weight to\neach streamline, which represents how well it explains the DWI signal globally.\nThe default forward microstructure model is stick-zeppelin-ball, which requires\nmulti-shell data and a peak file (principal fiber directions in each voxel,\ntypically from a field of fODFs).\n\nIt is possible to use the ball-and-stick model for single-shell and multi-shell\ndata. In this case, the peak file is not mandatory. Multi-shell should follow a\n\"NODDI protocol\" (low and high b-values), multiple shells with similar b-values\nshould not be used with COMMIT.\n\nThe output from COMMIT is:\n- fit_NRMSE.nii.gz\n fiting error (Normalized Root Mean Square Error)\n- fit_RMSE.nii.gz\n fiting error (Root Mean Square Error)\n- results.pickle\n Dictionary containing the experiment parameters and final weights\n- compartment_EC.nii.gz\n (est. Extra-Cellular signal fraction)\n- compartment_IC.nii.gz\n (est. Intra-Cellular signal fraction)\n- compartment_ISO.nii.gz\n (est. isotropic signal fraction (freewater comportment)):\n Each of COMMIT compartments\n- streamline_weights.txt\n Text file containing the commit weights for each streamline of the\n input tractogram.\n- streamlines_length.txt\n Text file containing the length (mm) of each streamline.\n- streamline_weights_by_length.txt\n Text file containing the commit weights for each streamline of the\n input tractogram, ordered by their length.\n- tot_streamline_weights\n Text file containing the total commit weights of each streamline.\n Equal to commit_weights * streamlines_length (W_i * L_i)\n- essential.trk / non_essential.trk\n Tractograms containing the streamlines below or equal (essential) and\n above (non_essential) a threshold_weights of 0.\n- decompose_commit.h5\n In the case where the input is a hdf5 file only, we will save an output\n hdf5 with the following information separated into each bundle's dps:\n - streamlines_weights\n - streamline_weights_by_length\n For each bundle, only the essential streamlines are kept.\n\nThis script can divide the input tractogram in two using a threshold to apply\non the streamlines' weight. The threshold used is 0.0, keeping only streamlines\nthat have non-zero weight and that contribute to explain the DWI signal.\nStreamlines with 0 weight are essentially not necessary according to COMMIT.\n\nCOMMIT2 is available only for HDF5 data from\nscil_tractogram_segment_bundles_for_connectivity.py and\nwith the --ball_stick option. Use the --commit2 option to activite it, slightly\nlonger computation time. This wrapper offers a simplify way to call COMMIT,\nbut does not allow to use (or fine-tune) every parameter. If you want to use\nCOMMIT with full access to all parameters,\nvisit: https://github.com/daducci/COMMIT\n\nWhen tunning parameters, such as --iso_diff, --para_diff, --perp_diff or\n--lambda_commit_2 you should evaluate the quality of results by:\n - Looking at the 'density' (GTM) of the connnectome (essential tractogram)\n - Confirm the quality of WM bundles reconstruction (essential tractogram)\n - Inspect the (N)RMSE map and look for peaks or anomalies\n - Compare the density map before and after (essential tractogram)\n\nFormerly: scil_run_commit.py\n\npositional arguments:\n in_tractogram Input tractogram (.trk or .tck or .h5).\n in_dwi Diffusion-weighted image used by COMMIT (.nii.gz).\n in_bval b-values in the FSL format (.bval).\n in_bvec b-vectors in the FSL format (.bvec).\n out_dir Output directory for the COMMIT maps.\n\noptions:\n -h, --help show this help message and exit\n --nbr_dir NBR_DIR Number of directions, on the half of the sphere,\n representing the possible orientations of the response functions [500].\n --nbr_iter NBR_ITER Maximum number of iterations [1000].\n --in_peaks IN_PEAKS Peaks file representing principal direction(s) locally,\n typically coming from fODFs. This file is mandatory for the default \n stick-zeppelin-ball model.\n --in_tracking_mask IN_TRACKING_MASK\n Binary mask where tratography was allowed.\n If not set, uses a binary mask computed from the streamlines.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nCOMMIT2 options:\n --commit2 Run commit2, requires .h5 as input and will force\n ball&stick model.\n --lambda_commit_2 LAMBDA_COMMIT_2\n Specify the clustering prior strength [0.001].\n\nModel options:\n --ball_stick Use the ball&Stick model, disable the zeppelin compartment.\n Only model suitable for single-shell data.\n --para_diff PARA_DIFF\n Parallel diffusivity in mm^2/s.\n Default for both ball_stick and stick_zeppelin_ball: 1.7E-3.\n --perp_diff PERP_DIFF [PERP_DIFF ...]\n Perpendicular diffusivity in mm^2/s.\n Default for ball_stick: None\n Default for stick_zeppelin_ball: [0.51E-3]\n --iso_diff ISO_DIFF [ISO_DIFF ...]\n Istropic diffusivity in mm^2/s.\n Default for ball_stick: [2.0E-3]\n Default for stick_zeppelin_ball: [1.7E-3, 3.0E-3]\n\nTractogram options:\n --keep_whole_tractogram\n Save a tractogram copy with streamlines weights in the data_per_streamline\n [False].\n --compute_only Compute kernels only, --save_kernels must be used.\n\nKernels options:\n --save_kernels DIRECTORY\n Output directory for the COMMIT kernels.\n --load_kernels DIRECTORY\n Input directory where the COMMIT kernels are located.\n\nReferences:\n[1] Daducci, Alessandro, et al. \"COMMIT: convex optimization modeling for\n microstructure informed tractography.\" IEEE transactions on medical\n imaging 34.1 (2014): 246-257.\n[2] Schiavi, Simona, et al. \"A new method for accurate in vivo mapping of\n human brain connections using microstructural and anatomical information.\"\n Science advances 6.31 (2020): eaba8245.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_compress", + "docstring": "Compress tractogram by removing collinear (or almost) points.\n\nThe compression threshold represents the maximum distance (in mm) to the\noriginal position of the point.\n\nFormerly: scil_compress_streamlines.py", + "help": "usage: scil_tractogram_compress.py [-h] [-e ERROR_RATE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nCompress tractogram by removing collinear (or almost) points.\n\nThe compression threshold represents the maximum distance (in mm) to the\noriginal position of the point.\n\nFormerly: scil_compress_streamlines.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file (trk or tck).\n out_tractogram Path of the output tractogram file (trk or tck).\n\noptions:\n -h, --help show this help message and exit\n -e ERROR_RATE Maximum compression distance in mm [0.1].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_compute_TODI", + "docstring": "Compute a Track Orientation Density Image (TODI).\n\nEach segment of the streamlines is weighted by its length (to support\ncompressed streamlines).\n\nThis script can afterwards output a Track Density Image (TDI) or a TODI with SF\nor SH representation, based on streamlines' segments.\n\nFormerly: scil_compute_todi.py", + "help": "usage: scil_tractogram_compute_TODI.py [-h] [--sphere SPHERE] [--mask MASK]\n [--sh_order SH_ORDER]\n [--normalize_per_voxel]\n [--smooth_todi | --asymmetric]\n [--n_steps N_STEPS]\n [--out_mask OUT_MASK]\n [--out_tdi OUT_TDI]\n [--out_todi_sf OUT_TODI_SF]\n [--out_todi_sh OUT_TODI_SH]\n [--reference REFERENCE]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram\n\nCompute a Track Orientation Density Image (TODI).\n\nEach segment of the streamlines is weighted by its length (to support\ncompressed streamlines).\n\nThis script can afterwards output a Track Density Image (TDI) or a TODI with SF\nor SH representation, based on streamlines' segments.\n\nFormerly: scil_compute_todi.py\n\npositional arguments:\n in_tractogram Input streamlines file.\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nComputing options:\n --sphere SPHERE Sphere used for the angular discretization. [repulsion724]\n --mask MASK If set, use the given mask.\n --sh_order SH_ORDER Order of the original SH. [8]\n --normalize_per_voxel\n If set, normalize each SF/SH at each voxel.\n --smooth_todi If set, smooth TODI (angular and spatial).\n --asymmetric If set, compute asymmetric TODI.\n Cannot be used with --smooth_todi.\n --n_steps N_STEPS Number of steps for streamline segments subdivision prior to binning [1].\n\nOutput files. Saves only when filename is set:\n --out_mask OUT_MASK Mask showing where TDI > 0.\n --out_tdi OUT_TDI Output Track Density Image (TDI).\n --out_todi_sf OUT_TODI_SF\n Output TODI, with SF (each directions\n on the sphere, requires a lot of memory)\n --out_todi_sh OUT_TODI_SH\n Output TODI, with SH coefficients.\n\nReferences:\n [1] Dhollander T, Emsell L, Van Hecke W, Maes F, Sunaert S, Suetens P.\n Track orientation density imaging (TODI) and\n track orientation distribution (TOD) based tractography.\n NeuroImage. 2014 Jul 1;94:312-36.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_compute_density_map", + "docstring": "Compute a density map from a streamlines file. Can be binary.\n\nThis script correctly handles compressed streamlines.\n\nFormerly: scil_compute_streamlines_density_map.py", + "help": "usage: scil_tractogram_compute_density_map.py [-h] [--binary [FIXED_VALUE]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_img\n\nCompute a density map from a streamlines file. Can be binary.\n\nThis script correctly handles compressed streamlines.\n\nFormerly: scil_compute_streamlines_density_map.py\n\npositional arguments:\n in_bundle Tractogram filename.\n out_img path of the output image file.\n\noptions:\n -h, --help show this help message and exit\n --binary [FIXED_VALUE]\n If set, will store the same value for all intersected voxels, \n creating a binary map.When set without a value, 1 is used (and dtype \n uint8). If a value is given, will be used as the stored value.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_convert", + "docstring": "Conversion of '.tck', '.trk', '.fib', '.vtk' and 'dpy' files using updated file\nformat standard. TRK file always needs a reference file, a NIFTI, for\nconversion. The FIB file format is in fact a VTK, MITK Diffusion supports it.\n\nFormerly: scil_convert_tractogram.py", + "help": "usage: scil_tractogram_convert.py [-h] [--no_bbox_check]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram output_name\n\nConversion of '.tck', '.trk', '.fib', '.vtk' and 'dpy' files using updated file\nformat standard. TRK file always needs a reference file, a NIFTI, for\nconversion. The FIB file format is in fact a VTK, MITK Diffusion supports it.\n\nFormerly: scil_convert_tractogram.py\n\npositional arguments:\n in_tractogram Tractogram filename. Format must be one of \n trk, tck, vtk, fib, dpy\n output_name Output filename. Format must be one of \n trk, tck, vtk, fib, dpy\n\noptions:\n -h, --help show this help message and exit\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_convert_hdf5_to_trk", + "docstring": "Save connections of a hdf5 created with\n>> scil_tractogram_segment_bundles_for_connectivity.py.\n\nUseful for quality control and visual inspections.\n\nIt can either save all connections (default), individual connections specified\nwith --edge_keys or connections from specific nodes specified with --node_keys.\n\nWith the option --save_empty, a label_lists, as a txt file, must be provided.\nThis option saves existing connections and empty connections.\n\nThe output is a directory containing the thousands of connections:\nout_dir/\n |-- LABEL1_LABEL1.trk\n |-- LABEL1_LABEL2.trk\n |-- [...]\n |-- LABEL90_LABEL90.trk\n\nFormerly: scil_save_connections_from_hdf5.py", + "help": "usage: scil_tractogram_convert_hdf5_to_trk.py [-h] [--include_dps]\n [--edge_keys LABEL1_LABEL2 [LABEL1_LABEL2 ...]\n | --node_keys NODE [NODE ...]]\n [--save_empty labels_list]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_hdf5 out_dir\n\nSave connections of a hdf5 created with\n>> scil_tractogram_segment_bundles_for_connectivity.py.\n\nUseful for quality control and visual inspections.\n\nIt can either save all connections (default), individual connections specified\nwith --edge_keys or connections from specific nodes specified with --node_keys.\n\nWith the option --save_empty, a label_lists, as a txt file, must be provided.\nThis option saves existing connections and empty connections.\n\nThe output is a directory containing the thousands of connections:\nout_dir/\n |-- LABEL1_LABEL1.trk\n |-- LABEL1_LABEL2.trk\n |-- [...]\n |-- LABEL90_LABEL90.trk\n\nFormerly: scil_save_connections_from_hdf5.py\n\npositional arguments:\n in_hdf5 HDF5 filename (.h5) containing decomposed connections.\n out_dir Path of the output directory.\n\noptions:\n -h, --help show this help message and exit\n --include_dps Include the data_per_streamline the metadata.\n --edge_keys LABEL1_LABEL2 [LABEL1_LABEL2 ...]\n Keys to identify the edges (connections) of interest.\n --node_keys NODE [NODE ...]\n Node keys to identify the sub-networks of interest.\n Equivalent to adding any --edge_keys node_LABEL2 or LABEL2_node.\n --save_empty labels_list\n Save empty connections. Then, the list of possible connections is \n not found from the hdf5 but inferred from labels_list, a txt file \n containing a list of nodes saved by the decomposition script.\n *If used together with edge_keys or node_keys, the provided nodes must \n exist in labels_list.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n CAREFUL. The whole output directory will be deleted if it exists.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_count_streamlines", + "docstring": "Return the number of streamlines in a tractogram. Only support trk and tck in\norder to support the lazy loading from nibabel.\n\nFormerly: scil_count_streamlines.py", + "help": "usage: scil_tractogram_count_streamlines.py [-h] [--print_count_alone]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n in_tractogram\n\nReturn the number of streamlines in a tractogram. Only support trk and tck in\norder to support the lazy loading from nibabel.\n\nFormerly: scil_count_streamlines.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --print_count_alone If true, prints the result only. \n Else, prints the bundle name and count formatted as a json dict.(default)\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_cut_streamlines", + "docstring": "Filters streamlines and only keeps the parts of streamlines within or\nbetween the ROIs. Two options are available.\n\nInput mask:\n\nThe mask has either 1 entity/blob or\n2 entities/blobs (does not support disconnected voxels).\nThe option --biggest_blob can help if you have such a scenario.\n\nThe 1 entity scenario will 'trim' the streamlines so their longest segment is\nwithin the bounding box or a binary mask.\n\nThe 2 entities scenario will cut streamlines so their segment are within the\nbounding box or going from binary mask #1 to binary mask #2.\n\nInput label:\n\nThe label MUST contain 2 labels different from zero.\nLabel values could be anything.\nThe script will cut streamlines going from label 1 to label 2.\n\nBoth inputs and scenarios will erase data_per_point and data_per_streamline.\n\nFormerly: scil_cut_streamlines.py", + "help": "usage: scil_tractogram_cut_streamlines.py [-h] (--mask MASK | --label LABEL)\n [--label_ids LABEL_IDS LABEL_IDS]\n [--resample STEP_SIZE]\n [--biggest_blob]\n [--compress [COMPRESS_TH]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nFilters streamlines and only keeps the parts of streamlines within or\nbetween the ROIs. Two options are available.\n\nInput mask:\n\nThe mask has either 1 entity/blob or\n2 entities/blobs (does not support disconnected voxels).\nThe option --biggest_blob can help if you have such a scenario.\n\nThe 1 entity scenario will 'trim' the streamlines so their longest segment is\nwithin the bounding box or a binary mask.\n\nThe 2 entities scenario will cut streamlines so their segment are within the\nbounding box or going from binary mask #1 to binary mask #2.\n\nInput label:\n\nThe label MUST contain 2 labels different from zero.\nLabel values could be anything.\nThe script will cut streamlines going from label 1 to label 2.\n\nBoth inputs and scenarios will erase data_per_point and data_per_streamline.\n\nFormerly: scil_cut_streamlines.py\n\npositional arguments:\n in_tractogram Input tractogram file.\n out_tractogram Output tractogram file. Note: data_per_point will be discarded, if any!\n\noptions:\n -h, --help show this help message and exit\n --label_ids LABEL_IDS LABEL_IDS\n List of labels indices to use to cut streamlines (2 values).\n --resample STEP_SIZE Resample streamlines to a specific step-size in mm [None].\n --biggest_blob Use the biggest entity and force the 1 ROI scenario.\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.[0.1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMandatory mask options:\n Choose between mask or label input.\n\n --mask MASK Binary mask containing either 1 or 2 blobs.\n --label LABEL Label containing 2 blobs.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_detect_loops", + "docstring": "This script can be used to remove loops in two types of streamline datasets:\n\n - Whole brain: For this type, the script removes streamlines if they\n make a loop with an angle of more than 360 degrees. It's possible to change\n this angle with the --angle option. Warning: Don't use --qb option for a\n whole brain tractography.\n\n - Bundle dataset: For this type, it is possible to remove loops and\n streamlines outside the bundle. For the sharp angle turn, use --qb option.\n\nSee also:\n scil_tractogram_filter_by_anatomy.py\n\nFormerly: scil_detect_streamlines_loops.py", + "help": "usage: scil_tractogram_detect_loops.py [-h]\n [--looping_tractogram out_filename]\n [--qb [threshold]] [--angle ANGLE]\n [--display_counts] [--no_empty]\n [--indent INDENT] [--sort_keys]\n [--processes NBR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script can be used to remove loops in two types of streamline datasets:\n\n - Whole brain: For this type, the script removes streamlines if they\n make a loop with an angle of more than 360 degrees. It's possible to change\n this angle with the --angle option. Warning: Don't use --qb option for a\n whole brain tractography.\n\n - Bundle dataset: For this type, it is possible to remove loops and\n streamlines outside the bundle. For the sharp angle turn, use --qb option.\n\nSee also:\n scil_tractogram_filter_by_anatomy.py\n\nFormerly: scil_detect_streamlines_loops.py\n\npositional arguments:\n in_tractogram Tractogram input file name.\n out_tractogram Output tractogram without loops.\n\noptions:\n -h, --help show this help message and exit\n --looping_tractogram out_filename\n If set, saves detected looping streamlines.\n --qb [threshold] If set, uses QuickBundles to detect outliers (loops, sharp angle \n turns). Given threshold is the maximal streamline to bundle \n distance for a streamline to be considered as a tracking error.\n Default if set: [8.0]\n --angle ANGLE Maximum looping (or turning) angle of\n a streamline in degrees. [360]\n --display_counts Print streamline count before and after filtering\n --no_empty If set, will not save outputs if they are empty.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\nReferences:\n QuickBundles, based on [Garyfallidis12] Frontiers in Neuroscience, 2012.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_dpp_math", + "docstring": "Performs an operation on data per point (dpp) from input streamlines.\n\nAlthough the input data always comes from the dpp, the output can be either\na dpp or a data_per_streamline (dps), depending on the chosen options.\nTwo modes of operation are supported: dpp and dps.\n - In dps mode, the operation is performed on dpp across the dimension of\n the streamlines resulting in a single value (or array in the 4D case)\n per streamline, stored as dps.\n - In dpp mode, the operation is performed on each point separately,\n resulting in a new dpp.\n\nIf endpoints_only and dpp mode is set the operation will only be calculated at\nthe streamline endpoints the rest of the values along the streamline will be\nNaN.\n\nIf endpoints_only and dps mode is set operation will be calculated across the\ndata at the endpoints and stored as a single value (or array in the 4D case)\nper streamline.\n\nEndpoint only operation:\ncorrelation: correlation calculated between arrays extracted from streamline\nendpoints (data must be multivalued per point) and dps mode must be set.", + "help": "usage: scil_tractogram_dpp_math.py [-h] --mode {dpp,dps} --in_dpp_name key\n [key ...] --out_keys key [key ...]\n [--endpoints_only] [--keep_all_dpp_dps]\n [--overwrite_dpp_dps]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--no_bbox_check]\n OPERATION INPUT_FILE OUTPUT_FILE\n\nPerforms an operation on data per point (dpp) from input streamlines.\n\nAlthough the input data always comes from the dpp, the output can be either\na dpp or a data_per_streamline (dps), depending on the chosen options.\nTwo modes of operation are supported: dpp and dps.\n - In dps mode, the operation is performed on dpp across the dimension of\n the streamlines resulting in a single value (or array in the 4D case)\n per streamline, stored as dps.\n - In dpp mode, the operation is performed on each point separately,\n resulting in a new dpp.\n\nIf endpoints_only and dpp mode is set the operation will only be calculated at\nthe streamline endpoints the rest of the values along the streamline will be\nNaN.\n\nIf endpoints_only and dps mode is set operation will be calculated across the\ndata at the endpoints and stored as a single value (or array in the 4D case)\nper streamline.\n\nEndpoint only operation:\ncorrelation: correlation calculated between arrays extracted from streamline\nendpoints (data must be multivalued per point) and dps mode must be set.\n\npositional arguments:\n OPERATION The type of operation to be performed on the \n streamlines. Must be one of the following: \n [mean, sum, min, max, correlation.]\n INPUT_FILE Input tractogram containing streamlines and metadata.\n OUTPUT_FILE The file where the remaining streamlines \n are saved.\n\noptions:\n -h, --help show this help message and exit\n --mode {dpp,dps} Set to dps if the operation is to be performed \n across all dimensions resulting in a single value per \n streamline. Set to dpp if the operation is to be \n performed on each point separately resulting in a \n single value per point.\n --in_dpp_name key [key ...]\n Name or list of names of the data_per_point for \n operation to be performed on. If more than one dpp \n is selected, the same operation will be applied \n separately to each one.\n --out_keys key [key ...]\n Name of the resulting data_per_point or \n data_per_streamline to be saved in the output \n tractogram. If more than one --in_dpp_name was used, \n enter the same number of --out_keys values.\n --endpoints_only If set, will only perform operation on endpoints \n If not set, will perform operation on all streamline \n points.\n --keep_all_dpp_dps If set, previous data_per_point will be preserved \n in the output tractogram. Else, only --out_dpp_name \n keys will be saved.\n --overwrite_dpp_dps If set, if --keep_all_dpp_dps is set and some \n --out_keys keys already existed in your \n data_per_point or data_per_streamline, allow \n overwriting old data_per_point.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_extract_ushape", + "docstring": "This script extracts streamlines depending on their U-shapeness.\nThis script is a replica of Trackvis method.\n\nWhen ufactor is close to:\n* 0 it defines straight streamlines\n* 1 it defines U-fibers\n* -1 it defines S-fibers\n\nFormerly: scil_extract_ushape.py", + "help": "usage: scil_tractogram_extract_ushape.py [-h] [--minU MINU] [--maxU MAXU]\n [--remaining_tractogram filename]\n [--no_empty] [--display_counts]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script extracts streamlines depending on their U-shapeness.\nThis script is a replica of Trackvis method.\n\nWhen ufactor is close to:\n* 0 it defines straight streamlines\n* 1 it defines U-fibers\n* -1 it defines S-fibers\n\nFormerly: scil_extract_ushape.py\n\npositional arguments:\n in_tractogram Tractogram input file name.\n out_tractogram Output tractogram file name.\n\noptions:\n -h, --help show this help message and exit\n --minU MINU Min ufactor value. [0.5]\n --maxU MAXU Max ufactor value. [1.0]\n --remaining_tractogram filename\n If set, saves remaining streamlines.\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_anatomy", + "docstring": "This script filters streamlines in a tractogram according to their geometrical\nproperties (i.e. limiting their length and looping angle) and their anatomical\nending properties (i.e. the anatomical tissue or region their endpoints lie\nin).\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nThe filtering is performed sequentially in four steps, each step processing the\ndata on the output of the previous step:\n\n Step 1 - Remove streamlines below the minimum length and above the\n maximum length. These thresholds must be set with the ``--minL``\n and ``--maxL`` options.\n Step 2 - Ensure that no streamlines end in the cerebrospinal fluid\n according to the provided parcellation. A binary mask can be used\n alternatively through the ``--csf_bin`` option.\n Step 3 - Ensure that no streamlines end in white matter by ensuring that\n they reach the cortical regions according to the provided\n parcellation. The cortical regions of the parcellation can be\n dilated using the ``--ctx_dilation_radius``.\n Step 4 - Remove streamlines if they make a loop with an angle above a\n certain threshold. It's possible to change this angle with the\n ``-a`` option.\n\nLength and loop-based filtering (steps 1 and 2) will not have practical effects\nif no specific thresholds are provided (but will be still executed), since\ndefault values are 0 for the minimum allowed length and infinite for the\nmaximum allowed length and angle.\n\nThe anatomical region endings filtering requires a parcellation or label image\nfile including the cerebrospinal fluid and gray matter (cortical) regions\naccording to the Desikan-Killiany atlas. Intermediate tractograms (results of\neach step and outliers) and volumes can be saved throughout the process.\n\nExample usages:\n\n# Filter length, looping angle and anatomical ending region\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --minL 20 --maxL 200 -a 300\n# Filter only anatomical ending region, with WM dilation and provided csf mask\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --csf_bin csf_bin.nii.gz --ctx_dilation_radius 2\n\nFormerly: scil_filter_streamlines_anatomically.py\n\nNOTE: As of version 2.0.0, the dilation of the cortical mask has changed; from\nan in-house process to scipy's dilation. Results may differ from previous\nversions.", + "help": "usage: scil_tractogram_filter_by_anatomy.py [-h] [--minL MINL] [--maxL MAXL]\n [--angle ANGLE]\n [--csf_bin CSF_BIN]\n [--dilate_ctx value]\n [--save_intermediate_tractograms]\n [--save_volumes] [--save_counts]\n [--save_rejected] [--no_empty]\n [--indent INDENT] [--sort_keys]\n [--processes NBR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram in_wmparc out_path\n\nThis script filters streamlines in a tractogram according to their geometrical\nproperties (i.e. limiting their length and looping angle) and their anatomical\nending properties (i.e. the anatomical tissue or region their endpoints lie\nin).\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nThe filtering is performed sequentially in four steps, each step processing the\ndata on the output of the previous step:\n\n Step 1 - Remove streamlines below the minimum length and above the\n maximum length. These thresholds must be set with the ``--minL``\n and ``--maxL`` options.\n Step 2 - Ensure that no streamlines end in the cerebrospinal fluid\n according to the provided parcellation. A binary mask can be used\n alternatively through the ``--csf_bin`` option.\n Step 3 - Ensure that no streamlines end in white matter by ensuring that\n they reach the cortical regions according to the provided\n parcellation. The cortical regions of the parcellation can be\n dilated using the ``--ctx_dilation_radius``.\n Step 4 - Remove streamlines if they make a loop with an angle above a\n certain threshold. It's possible to change this angle with the\n ``-a`` option.\n\nLength and loop-based filtering (steps 1 and 2) will not have practical effects\nif no specific thresholds are provided (but will be still executed), since\ndefault values are 0 for the minimum allowed length and infinite for the\nmaximum allowed length and angle.\n\nThe anatomical region endings filtering requires a parcellation or label image\nfile including the cerebrospinal fluid and gray matter (cortical) regions\naccording to the Desikan-Killiany atlas. Intermediate tractograms (results of\neach step and outliers) and volumes can be saved throughout the process.\n\nExample usages:\n\n# Filter length, looping angle and anatomical ending region\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --minL 20 --maxL 200 -a 300\n# Filter only anatomical ending region, with WM dilation and provided csf mask\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --csf_bin csf_bin.nii.gz --ctx_dilation_radius 2\n\nFormerly: scil_filter_streamlines_anatomically.py\n\nNOTE: As of version 2.0.0, the dilation of the cortical mask has changed; from\nan in-house process to scipy's dilation. Results may differ from previous\nversions.\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n in_wmparc Path of the white matter parcellation atlas (.nii or .nii.gz)\n out_path Path to the output files.\n\noptions:\n -h, --help show this help message and exit\n --minL MINL Minimum length of streamlines, in mm. [0.0]\n --maxL MAXL Maximum length of streamlines, in mm. [inf]\n --angle ANGLE Maximum looping (or turning) angle of a streamline, \n in degrees. [inf]\n --csf_bin CSF_BIN Allow CSF endings filtering with this binary\n mask instead of using the atlas (.nii or .nii.gz)\n --dilate_ctx value If set, dilate the cortical labels. Value is the dilation \n radius, in voxels (an integer > 0)\n --save_intermediate_tractograms\n Save accepted and discarded streamlines after each step.\n --save_volumes Save volumetric images (e.g. binarised label \n images, etc) in the filtering process.\n --save_counts Save the streamline counts to a file (.json)\n --save_rejected Save rejected streamlines to output tractogram.\n --no_empty Do not write file if there is no streamlines.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\n References:\n [1] J\u00f6rgens, D., Descoteaux, M., Moreno, R., 2021. Challenges for\n tractogram \ufb01ltering. In: \u00d6zarslan, E., Schultz, T., Zhang, E., Fuster,\n A. (Eds.), Anisotropy Across Fields and Scales. Springer. Mathematics\n and Visualization.\n [2] Legarreta, J., Petit, L., Rheault, F., Theaud, G., Lemaire, C.,\n Descoteaux, M., Jodoin, P.M. Filtering in tractography using\n autoencoders (FINTA). Medical Image Analysis. 2021\n \n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_length", + "docstring": "Script to filter streamlines based on their lengths.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Filtering by length is its step1)\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_length.py", + "help": "usage: scil_tractogram_filter_by_length.py [-h] [--minL MINL] [--maxL MAXL]\n [--no_empty] [--display_counts]\n [--save_rejected] [--indent INDENT]\n [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nScript to filter streamlines based on their lengths.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Filtering by length is its step1)\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_length.py\n\npositional arguments:\n in_tractogram Streamlines input file name.\n out_tractogram Streamlines output file name.\n\noptions:\n -h, --help show this help message and exit\n --minL MINL Minimum length of streamlines, in mm. [0.0]\n --maxL MAXL Maximum length of streamlines, in mm. [inf]\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering\n --save_rejected Save rejected streamlines to output tractogram.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_orientation", + "docstring": "Script to filter streamlines based on their distance traveled in a specific\ndimension (x, y, or z).\n\nUseful to help differentiate bundles.\n\nExamples: In a brain aligned with x coordinates in left - right axis and y\ncoordinates in anterior-posterior axis, a streamline from the ...\n - corpus callosum will likely travel a very short distance in the y axis.\n - cingulum will likely travel a very short distance in the x axis.\n\nNote: we consider that x, y, z are the coordinates of the streamlines; we\ndo not verify if they are aligned with the brain's orientation.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_orientation.py", + "help": "usage: scil_tractogram_filter_by_orientation.py [-h] [--min_x MIN_X]\n [--max_x MAX_X]\n [--min_y MIN_Y]\n [--max_y MAX_Y]\n [--min_z MIN_Z]\n [--max_z MAX_Z] [--use_abs]\n [--no_empty]\n [--display_counts]\n [--save_rejected filename]\n [--indent INDENT]\n [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_tractogram out_tractogram\n\nScript to filter streamlines based on their distance traveled in a specific\ndimension (x, y, or z).\n\nUseful to help differentiate bundles.\n\nExamples: In a brain aligned with x coordinates in left - right axis and y\ncoordinates in anterior-posterior axis, a streamline from the ...\n - corpus callosum will likely travel a very short distance in the y axis.\n - cingulum will likely travel a very short distance in the x axis.\n\nNote: we consider that x, y, z are the coordinates of the streamlines; we\ndo not verify if they are aligned with the brain's orientation.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_orientation.py\n\npositional arguments:\n in_tractogram Streamlines input file name.\n out_tractogram Streamlines output file name.\n\noptions:\n -h, --help show this help message and exit\n --min_x MIN_X Minimum distance in the first dimension, in mm.[0.0]\n --max_x MAX_X Maximum distance in the first dimension, in mm.[inf]\n --min_y MIN_Y Minimum distance in the second dimension, in mm.[0.0]\n --max_y MAX_Y Maximum distance in the second dimension, in mm.[inf]\n --min_z MIN_Z Minimum distance in the third dimension, in mm.[0.0]\n --max_z MAX_Z Maximum distance in the third dimension, in mm.[inf]\n --use_abs If set, will use the total of distances in absolute value (ex, coming back on yourself will contribute to the total distance instead of cancelling it).\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering.\n --save_rejected filename\n Save the SFT of rejected streamlines.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_roi", + "docstring": "Filtering of a tractogram based on any combination of conditions involving a\nROI (ex: keep streamlines whose endoints are inside the ROI, exclude\nstreamlines not entirely included in a ROI, etc.)\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Can reject streamlines with endpoints in the WM or the CSF based on\n labels)\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n\nCondition\n---------\nFor every type of ROI, two or three values are expected: MODE CRITERIA DISTANCE\n(DISTANCE is always optional)\n- MODE must be one of these values: ['any', 'all', 'either_end', 'both_ends']\n - any: any part of the streamline must be in the mask\n - all: all parts of the streamline must be in the mask.\n - either_end: at least one end of the streamline must be in the mask.\n - both_ends: both ends of the streamline must be in the mask.\n- CRITERIA must be one of these values: ['include', 'exclude']\n - Include: If condition from MODE is respected, streamline is included.\n - Exlucde: If condition from MODE is respected, streamline is excluded.\n- DISTANCE must be an int and is optional.\n\n\nType of ROI\n-----------\n- Drawn ROI: Directly loaded from a binary file.\n- Atlas ROI: Selected label from an atlas.\n - ID is one or multiple integer values in the atlas. If multiple values,\n ID needs to be between quotes.\n Example: \"1:6 9 10:15\" will use values between 1 and 6 and values\n between 10 and 15 included as well as value 9.\n- BDO: The ROI is the interior of a bounding box.\n- Planes: The ROI is the equivalent of a one-voxel plane.\n * Using mode 'all' with x/y/z plane works but makes very little sense.\n\nNote: `--drawn_roi MASK.nii.gz all include` is equivalent to\n `--drawn_roi INVERSE_MASK.nii.gz any exclude`\n\nFor example, this allows to find out all streamlines entirely in the WM in one\ncommand (without manually inverting the mask first) or to remove any streamline\nstaying in the GM without getting out.\n\nSupports multiple filtering conditions\n--------------------------------------\nMultiple filtering conditions can be used, with varied ROI types if necessary.\nCombining two conditions is equivalent to a logical AND between the conditions.\nOrder of application does not matter for the final result, but may change the\nintermediate files, if any.\n\nDistance management\n-------------------\nDISTANCE is optional, and it should be used carefully with large voxel size\n(e.g > 2.5mm). The value is in voxel for ROIs and in mm for bounding boxes.\nAnisotropic data will affect each direction differently.\n When using --overwrite_distance, any filtering option with given criteria\nwill have its DISTANCE value replaced.\n\nFormerly: scil_filter_tractogram.py", + "help": "usage: scil_tractogram_filter_by_roi.py [-h]\n [--drawn_roi DRAWN_ROI [DRAWN_ROI ...]]\n [--atlas_roi ATLAS_ROI [ATLAS_ROI ...]]\n [--bdo BDO [BDO ...]]\n [--x_plane X_PLANE [X_PLANE ...]]\n [--y_plane Y_PLANE [Y_PLANE ...]]\n [--z_plane Z_PLANE [Z_PLANE ...]]\n [--filtering_list FILTERING_LIST]\n [--overwrite_distance OVERWRITE_DISTANCE [OVERWRITE_DISTANCE ...]]\n [--save_masks_atlas_roi SAVE_MASKS_ATLAS_ROI]\n [--no_empty] [--display_counts]\n [--save_rejected FILENAME]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nFiltering of a tractogram based on any combination of conditions involving a\nROI (ex: keep streamlines whose endoints are inside the ROI, exclude\nstreamlines not entirely included in a ROI, etc.)\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Can reject streamlines with endpoints in the WM or the CSF based on\n labels)\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n\nCondition\n---------\nFor every type of ROI, two or three values are expected: MODE CRITERIA DISTANCE\n(DISTANCE is always optional)\n- MODE must be one of these values: ['any', 'all', 'either_end', 'both_ends']\n - any: any part of the streamline must be in the mask\n - all: all parts of the streamline must be in the mask.\n - either_end: at least one end of the streamline must be in the mask.\n - both_ends: both ends of the streamline must be in the mask.\n- CRITERIA must be one of these values: ['include', 'exclude']\n - Include: If condition from MODE is respected, streamline is included.\n - Exlucde: If condition from MODE is respected, streamline is excluded.\n- DISTANCE must be an int and is optional.\n\nType of ROI\n-----------\n- Drawn ROI: Directly loaded from a binary file.\n- Atlas ROI: Selected label from an atlas.\n - ID is one or multiple integer values in the atlas. If multiple values,\n ID needs to be between quotes.\n Example: \"1:6 9 10:15\" will use values between 1 and 6 and values\n between 10 and 15 included as well as value 9.\n- BDO: The ROI is the interior of a bounding box.\n- Planes: The ROI is the equivalent of a one-voxel plane.\n * Using mode 'all' with x/y/z plane works but makes very little sense.\n\nNote: `--drawn_roi MASK.nii.gz all include` is equivalent to\n `--drawn_roi INVERSE_MASK.nii.gz any exclude`\n\nFor example, this allows to find out all streamlines entirely in the WM in one\ncommand (without manually inverting the mask first) or to remove any streamline\nstaying in the GM without getting out.\n\nSupports multiple filtering conditions\n--------------------------------------\nMultiple filtering conditions can be used, with varied ROI types if necessary.\nCombining two conditions is equivalent to a logical AND between the conditions.\nOrder of application does not matter for the final result, but may change the\nintermediate files, if any.\n\nDistance management\n-------------------\nDISTANCE is optional, and it should be used carefully with large voxel size\n(e.g > 2.5mm). The value is in voxel for ROIs and in mm for bounding boxes.\nAnisotropic data will affect each direction differently.\n When using --overwrite_distance, any filtering option with given criteria\nwill have its DISTANCE value replaced.\n\nFormerly: scil_filter_tractogram.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n out_tractogram Path of the output tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --drawn_roi DRAWN_ROI [DRAWN_ROI ...]\n ROI_NAME MODE CRITERIA DISTANCE (distance in voxel is optional)\n Filename of a hand drawn ROI (.nii or .nii.gz).\n --atlas_roi ATLAS_ROI [ATLAS_ROI ...]\n ATLAS_NAME ID MODE CRITERIA DISTANCE (distance in voxel is optional)\n Filename of an atlas (.nii or .nii.gz).\n --bdo BDO [BDO ...] BDO_NAME MODE CRITERIA DISTANCE (distance in mm is optional)\n Filename of a bounding box (bdo) file from MI-Brain.\n --x_plane X_PLANE [X_PLANE ...]\n PLANE MODE CRITERIA DISTANCE (distance in voxel is optional)\n Slice number in X, in voxel space.\n --y_plane Y_PLANE [Y_PLANE ...]\n PLANE MODE CRITERIA DISTANCE (distance in voxel is optional)\n Slice number in Y, in voxel space.\n --z_plane Z_PLANE [Z_PLANE ...]\n PLANE MODE CRITERIA DISTANCE (distance in voxel is optional)\n Slice number in Z, in voxel space.\n --filtering_list FILTERING_LIST\n Text file containing one rule per line\n (i.e. drawn_roi mask.nii.gz both_ends include 1).\n --overwrite_distance OVERWRITE_DISTANCE [OVERWRITE_DISTANCE ...]\n MODE CRITERIA DISTANCE (distance in voxel for ROIs and in mm for bounding box).\n If set, it will overwrite the distance associated to a specific mode/criteria.\n --save_masks_atlas_roi SAVE_MASKS_ATLAS_ROI\n If set, will save the atlas roi masks. The value to provide is the \n prefix, ex: my_path/atlas_roi_. Whole filename will be \n my_path/atlas_roi_{id}.nii.gz\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering\n --save_rejected FILENAME\n Save rejected streamlines to output tractogram.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_fix_trk", + "docstring": "This script is made to fix DSI-Studio or Startrack TRK file\n(unknown space/convention) to make it compatible with TrackVis,\nMI-Brain, Dipy Horizon (Stateful Tractogram).\n\nDSI-Studio\n==========\n\nThe script either make it match with an anatomy from DSI-Studio (AC-PC aligned,\nsometimes flipped) or if --in_native_fa is provided it moves it back to native\nDWI space (this involved registration).\n\nSince DSI-Studio sometimes leaves some skull around the brain, the --auto_crop\naims to stabilize registration. If this option fails, manually BET both FA.\nRegistration is more robust at resolution above 2mm (iso), be careful.\n\nIf you are fixing bundles, use this script once with --save_transfo and verify\nresults. Once satisfied, call the scripts on bundles using a bash for loop with\n--load_transfo to save computation.\n\nWe recommand the --cut_invalid to remove invalid points of streamlines rather\nremoving entire streamlines.\n\nThis script was tested on various datasets and worked on all of them. However,\nalways verify the results and if a specific case does not work. Open an issue\non the Scilpy GitHub repository.\n\nStartrack\n==========\n\nThe script will create a new stateful tractogram using the reference in\norder to fix the missing information in the header of the trk.\n\n\nWARNING: This script is still experimental, DSI-Studio and Startrack\nevolve quickly and results may vary depending on the data itself\nas well as DSI-studio/Startrack version.\n\nFormerly: scil_fix_dsi_studio_trk.py", + "help": "usage: scil_tractogram_fix_trk.py [-h] [--software string]\n [--cut_invalid | --remove_invalid]\n [--in_dsi_fa IN_DSI_FA]\n [--in_native_fa IN_NATIVE_FA] [--auto_crop]\n [--save_transfo FILE | --load_transfo FILE]\n [--reference REFERENCE] [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script is made to fix DSI-Studio or Startrack TRK file\n(unknown space/convention) to make it compatible with TrackVis,\nMI-Brain, Dipy Horizon (Stateful Tractogram).\n\nDSI-Studio\n==========\n\nThe script either make it match with an anatomy from DSI-Studio (AC-PC aligned,\nsometimes flipped) or if --in_native_fa is provided it moves it back to native\nDWI space (this involved registration).\n\nSince DSI-Studio sometimes leaves some skull around the brain, the --auto_crop\naims to stabilize registration. If this option fails, manually BET both FA.\nRegistration is more robust at resolution above 2mm (iso), be careful.\n\nIf you are fixing bundles, use this script once with --save_transfo and verify\nresults. Once satisfied, call the scripts on bundles using a bash for loop with\n--load_transfo to save computation.\n\nWe recommand the --cut_invalid to remove invalid points of streamlines rather\nremoving entire streamlines.\n\nThis script was tested on various datasets and worked on all of them. However,\nalways verify the results and if a specific case does not work. Open an issue\non the Scilpy GitHub repository.\n\nStartrack\n==========\n\nThe script will create a new stateful tractogram using the reference in\norder to fix the missing information in the header of the trk.\n\nWARNING: This script is still experimental, DSI-Studio and Startrack\nevolve quickly and results may vary depending on the data itself\nas well as DSI-studio/Startrack version.\n\nFormerly: scil_fix_dsi_studio_trk.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file from DSI studio (.trk).\n out_tractogram Path of the output tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --software string Software used to create in_tractogram.\n Choices: ['dsi_studio', 'startrack']\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_invalid Remove the streamlines landing out of the bounding box.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nDSI options:\n --in_dsi_fa IN_DSI_FA\n Path of the input FA from DSI Studio (.nii.gz).\n --in_native_fa IN_NATIVE_FA\n Path of the input FA from Dipy/MRtrix (.nii.gz).\n Move the tractogram back to a \"proper\" space, include registration.\n --auto_crop If both FA are not already BET, perform registration \n using a centered-cube crop to ignore the skull.\n A good BET for both is more robust.\n --save_transfo FILE Save estimated transformation to avoid recomputing (.txt).\n --load_transfo FILE Load estimated transformation to apply to other files (.txt).\n\nStarTrack options:\n --reference REFERENCE\n Reference anatomy (.nii or .nii.gz).\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_flip", + "docstring": "Flip streamlines locally around specific axes.\n\nIMPORTANT: this script should only be used in case of absolute necessity.\nIt's better to fix the real tools than to force flipping streamlines to\nhave them fit in the tools.\n\nFormerly: scil_flip_streamlines.py", + "help": "usage: scil_tractogram_flip.py [-h] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram {x,y,z}\n [{x,y,z} ...]\n\nFlip streamlines locally around specific axes.\n\nIMPORTANT: this script should only be used in case of absolute necessity.\nIt's better to fix the real tools than to force flipping streamlines to\nhave them fit in the tools.\n\nFormerly: scil_flip_streamlines.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n out_tractogram Path of the output tractogram file.\n {x,y,z} The axes you want to flip. eg: to flip the x and y axes use: x y.\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_math", + "docstring": "Performs an operation on a list of streamline files. The supported\noperations are:\n\ndifference: Keep the streamlines from the first file that are not in\n any of the following files.\n\nintersection: Keep the streamlines that are present in all files.\n\nunion: Keep all streamlines while removing duplicates.\n\nconcatenate: Keep all streamlines with duplicates.\n\nlazy_concatenate: Keep all streamlines with duplicates, never load the whole\n tractograms in memory. Only works with trk/tck file,\n metadata will be lost and invalid streamlines are kept.\n\nIf a file 'duplicate.trk' have identical streamlines, calling the script using\nthe difference/intersection/union with a single input will remove these\nduplicated streamlines.\n\nTo allow a soft match, use the --precision option to increase the allowed\nthreshold for similarity. A precision of 1 represents 10**(-1), so a\nmaximum distance of 0.1mm is allowed. If the streamlines are identical, the\ndefault value of 3 (or 0.001mm distance) should work.\n\nIf there is a 0.5mm shift, use a precision of 0 (or 1mm distance) and the\n--robust option. Should make it work, but slightly slower. Will merge all\nstreamlines similar when rounded to that precision level.\n\nThe metadata (data per point, data per streamline) of the streamlines that\nare kept in the output will be preserved. This requires that all input files\nshare the same type of metadata. If this is not the case, use the option\n--no_metadata to strip the metadata from the output. Or --fake_metadata to\ninitialize dummy metadata in the file missing them.\n\nFormerly: scil_streamlines_math.py", + "help": "usage: scil_tractogram_math.py [-h] [--precision NBR_OF_DECIMALS] [--robust]\n [--no_metadata] [--fake_metadata]\n [--save_indices OUT_INDEX_FILE] [--save_empty]\n [--no_bbox_check] [--indent INDENT]\n [--sort_keys] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n OPERATION INPUT_FILES [INPUT_FILES ...]\n OUTPUT_FILE\n\nPerforms an operation on a list of streamline files. The supported\noperations are:\n\ndifference: Keep the streamlines from the first file that are not in\n any of the following files.\n\nintersection: Keep the streamlines that are present in all files.\n\nunion: Keep all streamlines while removing duplicates.\n\nconcatenate: Keep all streamlines with duplicates.\n\nlazy_concatenate: Keep all streamlines with duplicates, never load the whole\n tractograms in memory. Only works with trk/tck file,\n metadata will be lost and invalid streamlines are kept.\n\nIf a file 'duplicate.trk' have identical streamlines, calling the script using\nthe difference/intersection/union with a single input will remove these\nduplicated streamlines.\n\nTo allow a soft match, use the --precision option to increase the allowed\nthreshold for similarity. A precision of 1 represents 10**(-1), so a\nmaximum distance of 0.1mm is allowed. If the streamlines are identical, the\ndefault value of 3 (or 0.001mm distance) should work.\n\nIf there is a 0.5mm shift, use a precision of 0 (or 1mm distance) and the\n--robust option. Should make it work, but slightly slower. Will merge all\nstreamlines similar when rounded to that precision level.\n\nThe metadata (data per point, data per streamline) of the streamlines that\nare kept in the output will be preserved. This requires that all input files\nshare the same type of metadata. If this is not the case, use the option\n--no_metadata to strip the metadata from the output. Or --fake_metadata to\ninitialize dummy metadata in the file missing them.\n\nFormerly: scil_streamlines_math.py\n\npositional arguments:\n OPERATION The type of operation to be performed on the streamlines. Must\n be one of the following: difference, intersection, union, concatenate, lazy_concatenate.\n INPUT_FILES The list of files that contain the streamlines to operate on.\n OUTPUT_FILE The file where the remaining streamlines are saved.\n\noptions:\n -h, --help show this help message and exit\n --precision NBR_OF_DECIMALS, -p NBR_OF_DECIMALS\n Precision used to compare streamlines [4].\n --robust, -r Use version robust to small translation/rotation.\n --no_metadata, -n Strip the streamline metadata from the output.\n --fake_metadata Skip the metadata verification, create fake metadata if missing, can lead to unexpected behavior.\n --save_indices OUT_INDEX_FILE, -s OUT_INDEX_FILE\n Save the streamline indices to the supplied json file.\n --save_empty If set, we will save all results, even if tractogram if empty.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_pairwise_comparison", + "docstring": "This script is designed to compare and help visualize differences between two\ntractograms. This can be especially useful in studies where multiple\ntractograms from different algorithms or parameters need to be compared.\n\nA similar script (scil_bundle_pairwise_comparison.py) is available for bundles,\nwith metrics more adapted to bundles (and spatial agreement).\n\nThe difference is computed in terms of\n- A voxel-wise spatial distance between streamlines crossing each voxel.\n This can help to see if both tractography reconstructions at each voxel\n look similar (out_diff.nii.gz)\n- An angular correlation (ACC) between streamline orientation from TODI.\n This compares the local orientation of streamlines at each voxel\n (out_acc.nii.gz)\n- A patch-wise correlation between streamline density maps from both\n tractograms. This compares where the high/low density regions agree or not\n (out_corr.nii.gz)\n- A heatmap combining all the previous metrics using a harmonic means of the\n normalized metrics to summarize general agreement (out_heatmap.nii.gz)", + "help": "usage: scil_tractogram_pairwise_comparison.py [-h] [--out_dir OUT_DIR]\n [--out_prefix OUT_PREFIX]\n [--in_mask IN_FILE]\n [--skip_streamlines_distance]\n [--processes NBR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram_1 in_tractogram_2\n\nThis script is designed to compare and help visualize differences between two\ntractograms. This can be especially useful in studies where multiple\ntractograms from different algorithms or parameters need to be compared.\n\nA similar script (scil_bundle_pairwise_comparison.py) is available for bundles,\nwith metrics more adapted to bundles (and spatial agreement).\n\nThe difference is computed in terms of\n- A voxel-wise spatial distance between streamlines crossing each voxel.\n This can help to see if both tractography reconstructions at each voxel\n look similar (out_diff.nii.gz)\n- An angular correlation (ACC) between streamline orientation from TODI.\n This compares the local orientation of streamlines at each voxel\n (out_acc.nii.gz)\n- A patch-wise correlation between streamline density maps from both\n tractograms. This compares where the high/low density regions agree or not\n (out_corr.nii.gz)\n- A heatmap combining all the previous metrics using a harmonic means of the\n normalized metrics to summarize general agreement (out_heatmap.nii.gz)\n\npositional arguments:\n in_tractogram_1 Input tractogram 1.\n in_tractogram_2 Input tractogram 2.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Directory where all output files will be saved.\n If not specified, outputs will be saved in the current directory.\n --out_prefix OUT_PREFIX\n Prefix for output files. Useful for distinguishing between different runs [out].\n --in_mask IN_FILE Optional input mask.\n --skip_streamlines_distance\n Skip computation of the spatial distance between streamlines. Slowest part of the computation.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_print_info", + "docstring": "Prints information on a loaded tractogram: number of streamlines, and\nmean / min / max / std of\n - length in number of points\n - length in mm\n - step size.\n\nFor trk files: also prints the data_per_point and data_per_streamline keys.\n\nSee also:\n - scil_header_print_info.py to see the header, affine, volume dimension.\n - scil_bundle_shape_measures.py to see bundle-specific information.", + "help": "usage: scil_tractogram_print_info.py [-h] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [--indent INDENT] [--sort_keys]\n in_tractogram\n\nPrints information on a loaded tractogram: number of streamlines, and\nmean / min / max / std of\n - length in number of points\n - length in mm\n - step size.\n\nFor trk files: also prints the data_per_point and data_per_streamline keys.\n\nSee also:\n - scil_header_print_info.py to see the header, affine, volume dimension.\n - scil_bundle_shape_measures.py to see bundle-specific information.\n\npositional arguments:\n in_tractogram Tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_project_map_to_streamlines", + "docstring": "Projects maps extracted from a map onto the points of streamlines.\n\nThe default options will take data from a nifti image (3D or 4D) and project it\nonto the points of streamlines. If the image is 4D, the data is stored as a\nlist of 1D arrays per streamline. If the image is 3D, the data is stored as a\nlist of values per streamline.\n\nSee also scil_tractogram_project_streamlines_to_map.py for the reverse action.\n\n* Note that the data from your maps will be projected only on the coordinates\nof the points of your streamlines. Data underlying the whole segments between\ntwo consecutive points is not used. If your streamlines are strongly\ncompressed, or if they have a very big step size, the result will possibly\nreflect poorly your map. You may use scil_tractogram_resample.py to upsample\nyour streamlines first.\n\n* Hint: The streamlines themselves are not modified here, only their dpp. To\navoid multiplying data on disk, you could use the following arguments to save\nthe new dpp in your current tractogram:\n>> scil_tractogram_project_map_to_streamlines.py $in_bundle $in_bundle\n --keep_all_dpp -f", + "help": "usage: scil_tractogram_project_map_to_streamlines.py [-h] --in_maps IN_MAPS\n [IN_MAPS ...]\n --out_dpp_name\n OUT_DPP_NAME\n [OUT_DPP_NAME ...]\n [--trilinear]\n [--endpoints_only]\n [--keep_all_dpp]\n [--overwrite_dpp]\n [--reference REFERENCE]\n [-f]\n [-v [{DEBUG,INFO,WARNING}]]\n in_tractogram\n out_tractogram\n\nProjects maps extracted from a map onto the points of streamlines.\n\nThe default options will take data from a nifti image (3D or 4D) and project it\nonto the points of streamlines. If the image is 4D, the data is stored as a\nlist of 1D arrays per streamline. If the image is 3D, the data is stored as a\nlist of values per streamline.\n\nSee also scil_tractogram_project_streamlines_to_map.py for the reverse action.\n\n* Note that the data from your maps will be projected only on the coordinates\nof the points of your streamlines. Data underlying the whole segments between\ntwo consecutive points is not used. If your streamlines are strongly\ncompressed, or if they have a very big step size, the result will possibly\nreflect poorly your map. You may use scil_tractogram_resample.py to upsample\nyour streamlines first.\n\n* Hint: The streamlines themselves are not modified here, only their dpp. To\navoid multiplying data on disk, you could use the following arguments to save\nthe new dpp in your current tractogram:\n>> scil_tractogram_project_map_to_streamlines.py $in_bundle $in_bundle\n --keep_all_dpp -f\n\npositional arguments:\n in_tractogram Fiber bundle file.\n out_tractogram Output file.\n\noptions:\n -h, --help show this help message and exit\n --in_maps IN_MAPS [IN_MAPS ...]\n Nifti map to project onto streamlines.\n --out_dpp_name OUT_DPP_NAME [OUT_DPP_NAME ...]\n Name of the data_per_point to be saved in the \n output tractogram.\n --trilinear If set, will use trilinear interpolation \n else will use nearest neighbor interpolation \n by default.\n --endpoints_only If set, will only project the map onto the \n endpoints of the streamlines (all other values along \n streamlines will be NaN). If not set, will project \n the map onto all points of the streamlines.\n --keep_all_dpp If set, previous data_per_point will be preserved \n in the output tractogram. Else, only --out_dpp_name \n keys will be saved.\n --overwrite_dpp If set, if --keep_all_dpp is set and some \n --out_dpp_name keys already existed in your \n data_per_point, allow overwriting old data_per_point.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -f Force overwriting of the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_project_streamlines_to_map", + "docstring": "Projects metrics onto the underlying voxels of a streamlines. This script can\nproject data from data_per_point (dpp) or data_per_streamline (dps) to maps.\n\nYou choose to project data from all points of the streamlines, or from the\nendpoints only. The idea then is to visualize the cortical areas affected by\nmetrics (assuming streamlines start/end in the cortex).\n\nSee also scil_tractogram_project_map_to_streamlines.py for the reverse action.\n\nHow to the data is loaded:\n - From dps: uses the same value for each point of the streamline.\n - From dpp: one value per point.\n\nHow the data is used:\n 1. Average all points of the streamline to get a mean value, set this value\n to all points.\n 2. Average the two endpoints and get their mean value, set this value to\n all points.\n 3. Keep each point individually.\n\nHow the data is projected to a map:\n A. Using each point.\n B. Using the endpoints only.\n\nFor more complex operations than the average per streamline, see\nscil_tractogram_dpp_math.py.", + "help": "usage: scil_tractogram_project_streamlines_to_map.py [-h]\n (--use_dps key [key ...] | --use_dpp key [key ...] | --load_dps file [file ...] | --load_dpp file [file ...])\n (--mean_endpoints | --mean_streamline | --point_by_point)\n (--to_endpoints | --to_wm)\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bundle out_prefix\n\nProjects metrics onto the underlying voxels of a streamlines. This script can\nproject data from data_per_point (dpp) or data_per_streamline (dps) to maps.\n\nYou choose to project data from all points of the streamlines, or from the\nendpoints only. The idea then is to visualize the cortical areas affected by\nmetrics (assuming streamlines start/end in the cortex).\n\nSee also scil_tractogram_project_map_to_streamlines.py for the reverse action.\n\nHow to the data is loaded:\n - From dps: uses the same value for each point of the streamline.\n - From dpp: one value per point.\n\nHow the data is used:\n 1. Average all points of the streamline to get a mean value, set this value\n to all points.\n 2. Average the two endpoints and get their mean value, set this value to\n all points.\n 3. Keep each point individually.\n\nHow the data is projected to a map:\n A. Using each point.\n B. Using the endpoints only.\n\nFor more complex operations than the average per streamline, see\nscil_tractogram_dpp_math.py.\n\npositional arguments:\n in_bundle Fiber bundle file.\n out_prefix Folder + prefix to save endpoints metric(s). We will save \n one nifti file per per dpp/dps key given.\n Ex: my_path/subjX_bundleY_ with --use_dpp key1 will output \n my_path/subjX_bundleY_key1.nii.gz\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nWhere to get the statistics from. (Choose one):\n --use_dps key [key ...]\n Use the data_per_streamline from the tractogram.\n It must be a .trk\n --use_dpp key [key ...]\n Use the data_per_point from the tractogram. \n It must be a trk.\n --load_dps file [file ...]\n Load data per streamline (scalar) .txt or .npy.\n Must load an array with the right shape.\n --load_dpp file [file ...]\n Load data per point (scalar) from .txt or .npy.\n Must load an array with the right shape.\n\nProcessing choices. (Choose one):\n --mean_endpoints Uses one single value per streamline: the mean of the two \n endpoints.\n --mean_streamline Use one single value per streamline: the mean of all \n points of the streamline.\n --point_by_point Directly project the streamlines values onto the map.\n\nWhere to send the statistics. (Choose one):\n --to_endpoints Project metrics onto a mask of the endpoints.\n --to_wm Project metrics into streamlines coverage.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_qbx", + "docstring": "Compute clusters using QuickBundlesX and save them separately.\nWe cannot know the number of clusters in advance.\n\nQuickbundles:\nGaryfallidis, E. et al. (2012). Quickbundles, a method for tractography\nsimplification. Frontiers in neuroscience, 6, 175.\n\nQuickbundlesX:\nGaryfallidis, E. et al. (2016) QuickBundlesX: sequential clustering of millions\nof streamlines in multiple levels of detail at record execution time. 24th\nInternational Society of Magnetic Resonance in Medicine (ISMRM).\n\n\"QuickBundlesX shows a remarkable 20+X speedup over its predecessor\"\n\nFormerly: scil_compute_qbx.py", + "help": "usage: scil_tractogram_qbx.py [-h] [--nb_points NB_POINTS]\n [--out_centroids OUT_CENTROIDS]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram dist_thresh out_clusters_dir\n\nCompute clusters using QuickBundlesX and save them separately.\nWe cannot know the number of clusters in advance.\n\nQuickbundles:\nGaryfallidis, E. et al. (2012). Quickbundles, a method for tractography\nsimplification. Frontiers in neuroscience, 6, 175.\n\nQuickbundlesX:\nGaryfallidis, E. et al. (2016) QuickBundlesX: sequential clustering of millions\nof streamlines in multiple levels of detail at record execution time. 24th\nInternational Society of Magnetic Resonance in Medicine (ISMRM).\n\n\"QuickBundlesX shows a remarkable 20+X speedup over its predecessor\"\n\nFormerly: scil_compute_qbx.py\n\npositional arguments:\n in_tractogram Tractogram filename.\n Path of the input tractogram or bundle.\n dist_thresh Last QuickBundlesX threshold in mm. Typically \n the value are between 10-20mm.\n out_clusters_dir Path where to save the clusters directory.\n\noptions:\n -h, --help show this help message and exit\n --nb_points NB_POINTS\n Streamlines will be resampled to have this number of points [20].\n --out_centroids OUT_CENTROIDS\n Output tractogram filename.\n Format must be readable by the Nibabel API.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_register", + "docstring": "Generate a linear transformation matrix from the registration of 2 tractograms.\nTypically, this script is run before scil_tractogram_apply_transform.py.\n\nFor more information on how to use the various registration scripts, see the\ndoc at:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_register_tractogram.py", + "help": "usage: scil_tractogram_register.py [-h] [--out_name OUT_NAME] [--only_rigid]\n [--moving_tractogram_ref MOVING_TRACTOGRAM_REF]\n [--static_tractogram_ref STATIC_TRACTOGRAM_REF]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n moving_tractogram static_tractogram\n\nGenerate a linear transformation matrix from the registration of 2 tractograms.\nTypically, this script is run before scil_tractogram_apply_transform.py.\n\nFor more information on how to use the various registration scripts, see the\ndoc at:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_register_tractogram.py\n\npositional arguments:\n moving_tractogram Path of the moving tractogram.\n static_tractogram Path of the target tractogram.\n\noptions:\n -h, --help show this help message and exit\n --out_name OUT_NAME Filename of the transformation matrix. \n The registration type will be appended as a suffix,\n [_.txt]. \n Default: [transformation.txt]\n --only_rigid If set, will only use a rigid transformation (uses affine by default).\n --moving_tractogram_ref MOVING_TRACTOGRAM_REF\n Reference anatomy for moving_tractogram (if tck/vtk/fib/dpy) file\n support (.nii or .nii.gz).\n --static_tractogram_ref STATIC_TRACTOGRAM_REF\n Reference anatomy for static_tractogram (if tck/vtk/fib/dpy) file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] E. Garyfallidis, O. Ocegueda, D. Wassermann, M. Descoteaux\nRobust and efficient linear registration of white-matter fascicles in the\nspace of streamlines, NeuroImage, Volume 117, 15 August 2015, Pages 124-140\n(http://www.sciencedirect.com/science/article/pii/S1053811915003961)\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_remove_invalid", + "docstring": "Removal of streamlines that are out of the volume bounding box. In voxel space\nno negative coordinate and no above volume dimension coordinate are possible.\nAny streamline that do not respect these two conditions are removed.\n\nThe --cut_invalid option will cut streamlines so that their longest segment are\nwithin the bounding box\n\nFormerly: scil_remove_invalid_streamlines.py", + "help": "usage: scil_tractogram_remove_invalid.py [-h] [--cut_invalid]\n [--remove_single_point]\n [--remove_overlapping_points]\n [--threshold THRESHOLD] [--no_empty]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nRemoval of streamlines that are out of the volume bounding box. In voxel space\nno negative coordinate and no above volume dimension coordinate are possible.\nAny streamline that do not respect these two conditions are removed.\n\nThe --cut_invalid option will cut streamlines so that their longest segment are\nwithin the bounding box\n\nFormerly: scil_remove_invalid_streamlines.py\n\npositional arguments:\n in_tractogram Tractogram filename. Format must be one of \n trk, tck, vtk, fib, dpy.\n out_tractogram Output filename. Format must be one of \n trk, tck, vtk, fib, dpy.\n\noptions:\n -h, --help show this help message and exit\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_single_point\n Consider single point streamlines invalid.\n --remove_overlapping_points\n Consider streamlines with overlapping points invalid.\n --threshold THRESHOLD\n Maximum distance between two points to be considered overlapping [0.001 mm].\n --no_empty Do not save empty tractogram.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_resample", + "docstring": "Script to resample a tractogram to a set number of streamlines.\nDefault behavior:\n- IF number of requested streamlines is lower than streamline count: DOWNSAMPLE\n- IF number of requested streamlines is higher than streamline count: UPSAMPLE\nTo prevent upsample if not desired use --never_upsample.\n\nCan be useful to build training sets for machine learning algorithms, to\nupsample under-represented bundles or downsample over-represented bundles.\n\nWorks by either selecting a subset of streamlines or by generating new\nstreamlines by adding gaussian noise to existing ones.\n\nUpsampling:\n Includes smoothing to compensate for the noisiness of new streamlines\n generated by the process.\nDownsampling:\n Includes the possibility of choosing randomly *per Quickbundle cluster* to\n ensure that all clusters are represented in the final tractogram.\n\nExample usage:\n$ scil_tractogram_resample.py input.trk 1000 output.trk --point_wise_std 0.5 --gaussian 5 --keep_invalid_streamlines\n$ scil_visualize_bundles.py output.trk --local_coloring --width=0.1", + "help": "usage: scil_tractogram_resample.py [-h] [--never_upsample]\n [--point_wise_std POINT_WISE_STD]\n [--tube_radius TUBE_RADIUS]\n [--gaussian SIGMA] [-e ERROR_RATE]\n [--keep_invalid_streamlines]\n [--downsample_per_cluster]\n [--qbx_thresholds QBX_THRESHOLDS [QBX_THRESHOLDS ...]]\n [--seed SEED] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram nb_streamlines out_tractogram\n\nScript to resample a tractogram to a set number of streamlines.\nDefault behavior:\n- IF number of requested streamlines is lower than streamline count: DOWNSAMPLE\n- IF number of requested streamlines is higher than streamline count: UPSAMPLE\nTo prevent upsample if not desired use --never_upsample.\n\nCan be useful to build training sets for machine learning algorithms, to\nupsample under-represented bundles or downsample over-represented bundles.\n\nWorks by either selecting a subset of streamlines or by generating new\nstreamlines by adding gaussian noise to existing ones.\n\nUpsampling:\n Includes smoothing to compensate for the noisiness of new streamlines\n generated by the process.\nDownsampling:\n Includes the possibility of choosing randomly *per Quickbundle cluster* to\n ensure that all clusters are represented in the final tractogram.\n\nExample usage:\n$ scil_tractogram_resample.py input.trk 1000 output.trk --point_wise_std 0.5 --gaussian 5 --keep_invalid_streamlines\n$ scil_visualize_bundles.py output.trk --local_coloring --width=0.1\n\npositional arguments:\n in_tractogram Input tractography file.\n nb_streamlines Number of streamlines to resample the tractogram to.\n out_tractogram Output tractography file.\n\noptions:\n -h, --help show this help message and exit\n --never_upsample Make sure to never upsample a tractogram.\n Useful when downsample batch of files using bash.\n --seed SEED Use a specific random seed for the resampling.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nUpsampling params:\n --point_wise_std POINT_WISE_STD\n Noise to add to existing streamlines points to generate new ones [1].\n --tube_radius TUBE_RADIUS\n Maximum distance to generate streamlines around the original ones [1].\n --gaussian SIGMA Sigma for smoothing. Use the value of surrounding X,Y,Z points on the streamline to blur the streamlines.\n A good sigma choice would be around 5.\n -e ERROR_RATE Maximum compression distance in mm [0.1].\n --keep_invalid_streamlines\n Keep invalid newly generated streamlines that may go out of the \n bounding box.\n\nDownsampling params:\n --downsample_per_cluster\n If set, downsampling will be done per cluster (computed with \n Quickbundles) to ensure that at least some streamlines are \n kept per bundle. Else, random downsampling is performed (default).\n --qbx_thresholds QBX_THRESHOLDS [QBX_THRESHOLDS ...]\n If you chose option '--downsample_per_cluster', you may set \n the QBx threshold value(s) here. Default: [40, 30, 20]\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_resample_nb_points", + "docstring": "Script to resample a set of streamlines to either a new number of points per\nstreamline or to a fixed step size. WARNING: data_per_point is not carried.\n\nFormerly: scil_resample_streamlines.py", + "help": "usage: scil_tractogram_resample_nb_points.py [-h]\n (--nb_pts_per_streamline NB_PTS_PER_STREAMLINE | --step_size STEP_SIZE)\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nScript to resample a set of streamlines to either a new number of points per\nstreamline or to a fixed step size. WARNING: data_per_point is not carried.\n\nFormerly: scil_resample_streamlines.py\n\npositional arguments:\n in_tractogram Streamlines input file name.\n out_tractogram Streamlines output file name.\n\noptions:\n -h, --help show this help message and exit\n --nb_pts_per_streamline NB_PTS_PER_STREAMLINE\n Number of points per streamline in the output.\n --step_size STEP_SIZE\n Step size in the output (in mm).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_seed_density_map", + "docstring": "Compute a density map of seeds saved in .trk file.\n\nFormerly: scil_compute_seed_density_map.py", + "help": "usage: scil_tractogram_seed_density_map.py [-h] [--binary [FIXED_VALUE]]\n [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n tractogram_filename\n seed_density_filename\n\nCompute a density map of seeds saved in .trk file.\n\nFormerly: scil_compute_seed_density_map.py\n\npositional arguments:\n tractogram_filename Tracts filename. Format must be .trk. \n File should contain a \"seeds\" value in the data_per_streamline.\n These seeds must be in space: voxel, origin: corner.\n seed_density_filename\n Output seed density filename. Format must be Nifti.\n\noptions:\n -h, --help show this help message and exit\n --binary [FIXED_VALUE]\n If set, will store the same value for all intersected voxels, creating a binary map.\n When set without a value, 1 is used (and dtype uint8).\n If a value is given, will be used as the stored value.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_segment_and_score", + "docstring": "Scores input tractogram overall and bundlewise.\n\nOutputs\n-------\n\n - results.json: Contains a full tractometry report.\n - processing_stats.json: Contains information on the segmentation of\n bundles (ex: the number of wpc per criteria).\n - Splits the input tractogram into\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (if args.compute_ic)\n segmented_WPC/*_wpc.trk (if args.save_wpc_separately)\n IS.trk OR NC.trk (if args.compute_ic)\n\nBy default, if a streamline fits in many bundles, it will be included in every\none. This means a streamline may be a VS for a bundle and an IS for\n(potentially many) others. If you want to assign each streamline to at most one\nbundle, use the `--unique` flag.\n\nConfig file\n-----------\n\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\nMandatory:\n - endpoints OR [head AND tail]: filename for the endpoints ROI.\n If 'enpoints' is used, we will automatically separate the mask into two\n ROIs, acting as head and tail. Quality check is strongly recommended.\n\nOptional:\n Concerning metrics:\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n Concerning inclusion criteria (other streamlines will be WPC):\n - all_mask: ROI serving as \"all\" criteria: to be included in the bundle,\n ALL points of a streamline must be inside the mask.*\n - any_mask: ROI serving as \"any\" criteria: streamlines\n must touch that mask in at least one point (\"any\" point) to be included\n in the bundle.\n - angle: angle criteria. Streamlines containing loops and sharp turns above\n given angle will be rejected from the bundle.\n - length: maximum and minimum lengths per bundle.\n - length_x / length_x_abs: maximum and minimum total distance in the x\n direction (i.e. first coordinate).**\n - length_y / length_y_abs: maximum and minimum total distance in the y\n direction (i.e. second coordinate).**\n - length_z / length_z_abs: maximum and minimum total distance in the z\n direction (i.e. third coordinate).**\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n** With absolute values: coming back on yourself will contribute to the total\ndistance instead of cancelling it.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n \"angle\": 300,\n \"length\": [140, 150],\n \"endpoints\": \"PATH/file1.nii.gz\"\n }\n}", + "help": "usage: scil_tractogram_segment_and_score.py [-h] [--json_prefix p]\n [--gt_dir DIR]\n [--use_gt_masks_as_all_masks]\n [--dilate_endpoints NB_PASS]\n [--remove_invalid]\n [--save_wpc_separately]\n [--compute_ic] [--unique]\n [--remove_wpc_belonging_to_another_bundle]\n [--no_empty] [--indent INDENT]\n [--sort_keys] [--no_bbox_check]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram gt_config out_dir\n\nScores input tractogram overall and bundlewise.\n\nOutputs\n-------\n\n - results.json: Contains a full tractometry report.\n - processing_stats.json: Contains information on the segmentation of\n bundles (ex: the number of wpc per criteria).\n - Splits the input tractogram into\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (if args.compute_ic)\n segmented_WPC/*_wpc.trk (if args.save_wpc_separately)\n IS.trk OR NC.trk (if args.compute_ic)\n\nBy default, if a streamline fits in many bundles, it will be included in every\none. This means a streamline may be a VS for a bundle and an IS for\n(potentially many) others. If you want to assign each streamline to at most one\nbundle, use the `--unique` flag.\n\nConfig file\n-----------\n\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\nMandatory:\n - endpoints OR [head AND tail]: filename for the endpoints ROI.\n If 'enpoints' is used, we will automatically separate the mask into two\n ROIs, acting as head and tail. Quality check is strongly recommended.\n\nOptional:\n Concerning metrics:\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n Concerning inclusion criteria (other streamlines will be WPC):\n - all_mask: ROI serving as \"all\" criteria: to be included in the bundle,\n ALL points of a streamline must be inside the mask.*\n - any_mask: ROI serving as \"any\" criteria: streamlines\n must touch that mask in at least one point (\"any\" point) to be included\n in the bundle.\n - angle: angle criteria. Streamlines containing loops and sharp turns above\n given angle will be rejected from the bundle.\n - length: maximum and minimum lengths per bundle.\n - length_x / length_x_abs: maximum and minimum total distance in the x\n direction (i.e. first coordinate).**\n - length_y / length_y_abs: maximum and minimum total distance in the y\n direction (i.e. second coordinate).**\n - length_z / length_z_abs: maximum and minimum total distance in the z\n direction (i.e. third coordinate).**\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n** With absolute values: coming back on yourself will contribute to the total\ndistance instead of cancelling it.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n \"angle\": 300,\n \"length\": [140, 150],\n \"endpoints\": \"PATH/file1.nii.gz\"\n }\n}\n\nTractometry\n-----------\nGlobal connectivity metrics:\n\n- Computed by default:\n - VS: valid streamlines, belonging to a bundle (i.e. respecting all the\n criteria for that bundle; endpoints, limit_mask, gt_mask.).\n - IS: invalid streamlines. All other streamlines. IS = IC + NC.\n\n- Optional:\n - WPC: wrong path connections, streamlines connecting correct ROIs but not\n respecting the other criteria for that bundle. Such streamlines always\n exist but they are only saved separately if specified in the options.\n Else, they are merged back with the IS.\n By definition. WPC are only computed if \"limits masks\" are provided.\n - IC: invalid connections, streamlines joining an incorrect combination of\n ROIs. Use carefully, quality depends on the quality of your ROIs and no\n analysis is done on the shape of the streamlines.\n - NC: no connections. Invalid streamlines minus invalid connections.\n\n- Fidelity metrics:\n - OL: Overlap. Percentage of ground truth voxels containing streamline(s)\n for a given bundle.\n - OR: Overreach. Amount of voxels containing streamline(s) when they\n shouldn't, for a given bundle. We compute two versions :\n OR_pct_vs = divided by the total number of voxel covered by the bundle.\n (percentage of the voxels touched by VS).\n Values range between 0 and 100%. Values are not defined when we\n recovered no streamline for a bundle, but we set the OR_pct_vs to 0\n in that case.\n OR_pct_gt = divided by the total size of the ground truth bundle mask.\n Values could be higher than 100%.\n - f1 score: which is the same as the Dice score.\n\npositional arguments:\n in_tractogram Input tractogram to score\n gt_config .json dict configured as specified above.\n out_dir Output directory for the resulting segmented bundles.\n\noptions:\n -h, --help show this help message and exit\n --json_prefix p Prefix of the two output json files. Ex: 'study_x_'.Files will be saved inside out_dir.\n Suffixes will be 'processing_stats.json' and 'results.json'.\n --no_empty Do not write file if there is no streamline.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nAdditions to gt_config:\n --gt_dir DIR Root path of the ground truth files listed in the gt_config. \n If not set, filenames in the config file are considered \n as absolute paths.\n --use_gt_masks_as_all_masks\n If set, the gt_config's 'gt_mask' will also be used as\n 'all_mask' for each bundle. Note that this means the\n OR will necessarily be 0.\n\nPreprocessing:\n --dilate_endpoints NB_PASS\n Dilate endpoint masks n-times. Default: 0.\n --remove_invalid Remove invalid streamlines before scoring.\n\nTractometry choices:\n --save_wpc_separately\n If set, streamlines rejected from VC based on the config\n file criteria will be saved separately from IS (and IC)\n in one file *_wpc.tck per bundle.\n --compute_ic If set, IS are split into NC + IC, where IC are computed as one bundle per\n pair of ROI not belonging to a true connection, named\n *_*_IC.tck.\n --unique If set, streamlines are assigned to the first bundle they fit in and not to all.\n --remove_wpc_belonging_to_another_bundle\n If set, WPC actually belonging to any VB (in the \n case of overlapping ROIs) will be removed\n from the WPC classification.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_segment_bundles", + "docstring": "Compute BundleSeg & supports multi-atlas & multi-parameters (RBx-like).\nThe model needs to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are not sure about the transformation 'direction' you can try\nscil_tractogram_segment_bundles.py (with the -v option), a warning will popup\nif the provided transformation is not used correctly.\n\nThe number of folders inside 'models_directories' will increase the number of\nruns. Each folder is considered like an atlas and bundles inside will initiate\nmore BundleSeg executions. The more atlases you have, the more robust the\nrecognition will be.\n\n--minimal_vote_ratio is a value between 0 and 1. If you have 5 input model\ndirectories and a minimal_vote_ratio of 0.5, you will need at least 3 votes\n\nExample data and usage available at: https://zenodo.org/record/7950602\n\nFor RAM usage, it is recommanded to use this heuristic:\n (size of inputs tractogram (GB) * number of processes) < RAM (GB)\nThis is important because many instances of data structures are initialized\nin parallel and can lead to a RAM overflow.\n\nFormerly: scil_recognize_multi_bundles.py", + "help": "usage: scil_tractogram_segment_bundles.py [-h] [--out_dir OUT_DIR]\n [--minimal_vote_ratio MINIMAL_VOTE_RATIO]\n [--seed SEED] [--inverse]\n [--reference REFERENCE]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractograms [in_tractograms ...]\n in_config_file in_directory\n in_transfo\n\nCompute BundleSeg & supports multi-atlas & multi-parameters (RBx-like).\nThe model needs to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are not sure about the transformation 'direction' you can try\nscil_tractogram_segment_bundles.py (with the -v option), a warning will popup\nif the provided transformation is not used correctly.\n\nThe number of folders inside 'models_directories' will increase the number of\nruns. Each folder is considered like an atlas and bundles inside will initiate\nmore BundleSeg executions. The more atlases you have, the more robust the\nrecognition will be.\n\n--minimal_vote_ratio is a value between 0 and 1. If you have 5 input model\ndirectories and a minimal_vote_ratio of 0.5, you will need at least 3 votes\n\nExample data and usage available at: https://zenodo.org/record/7950602\n\nFor RAM usage, it is recommanded to use this heuristic:\n (size of inputs tractogram (GB) * number of processes) < RAM (GB)\nThis is important because many instances of data structures are initialized\nin parallel and can lead to a RAM overflow.\n\nFormerly: scil_recognize_multi_bundles.py\n\npositional arguments:\n in_tractograms Input tractogram filename (.trk or .tck).\n in_config_file Path of the config file (.json)\n in_directory Path of parent folder of models directories.\n Each folder inside will be considered as adifferent atlas.\n in_transfo Path for the transformation to model space (.txt, .npy or .mat).\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Path for the output directory [voting_results].\n --minimal_vote_ratio MINIMAL_VOTE_RATIO\n Streamlines will only be considered for saving if\n recognized often enough [0.5].\n --seed SEED Random number generator seed 0.\n --inverse Use the inverse transformation.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] St-Onge, Etienne, Kurt G. Schilling, and Francois Rheault.\n\"BundleSeg: A versatile,reliable and reproducible approach to white\nmatter bundle segmentation.\" International Workshop on Computational\nDiffusion MRI. Cham: Springer Nature Switzerland (2023)\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_segment_bundles_for_connectivity", + "docstring": "Compute a connectivity matrix from a tractogram and a parcellation.\n\nCurrent strategy is to keep the longest streamline segment connecting 2\nregions. If the streamline crosses other gray matter regions before reaching\nits final connected region, the kept connection is still the longest. This is\nrobust to compressed streamlines.\n\nThe output file is a hdf5 (.h5) where the keys are 'LABEL1_LABEL2' and each\ngroup is composed of 'data', 'offsets' and 'lengths' from the array_sequence.\nThe 'data' is stored in VOX/CORNER for simplicity and efficiency. See script\nscil_tractogram_convert_hdf5_to_trk.py to convert to a list of .trk bundles.\n\nFor the --outlier_threshold option the default is a recommended good trade-off\nfor a freesurfer parcellation. With smaller parcels (brainnetome, glasser) the\nthreshold should most likely be reduced.\n\nGood candidate connections to QC are the brainstem to precentral gyrus\nconnection and precentral left to precentral right connection, or equivalent\nin your parcellation.\n\nNOTE: this script can take a while to run. Please be patient.\nExample: on a tractogram with 1.8M streamlines, running on a SSD:\n- 15 minutes without post-processing, only saving final bundles.\n- 30 minutes with full post-processing, only saving final bundles.\n- 60 minutes with full post-processing, saving all possible files.\n\nFormerly: scil_decompose_connectivity.py", + "help": "usage: scil_tractogram_segment_bundles_for_connectivity.py [-h] [--no_pruning]\n [--no_remove_loops]\n [--no_remove_outliers]\n [--no_remove_curv_dev]\n [--min_length MIN_LENGTH]\n [--max_length MAX_LENGTH]\n [--outlier_threshold OUTLIER_THRESHOLD]\n [--loop_max_angle LOOP_MAX_ANGLE]\n [--curv_qb_distance CURV_QB_DISTANCE]\n [--out_dir OUT_DIR]\n [--save_raw_connections]\n [--save_intermediate]\n [--save_discarded]\n [--out_labels_list OUT_FILE]\n [--reference REFERENCE]\n [--no_bbox_check]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_tractograms\n [in_tractograms ...]\n in_labels out_hdf5\n\nCompute a connectivity matrix from a tractogram and a parcellation.\n\nCurrent strategy is to keep the longest streamline segment connecting 2\nregions. If the streamline crosses other gray matter regions before reaching\nits final connected region, the kept connection is still the longest. This is\nrobust to compressed streamlines.\n\nThe output file is a hdf5 (.h5) where the keys are 'LABEL1_LABEL2' and each\ngroup is composed of 'data', 'offsets' and 'lengths' from the array_sequence.\nThe 'data' is stored in VOX/CORNER for simplicity and efficiency. See script\nscil_tractogram_convert_hdf5_to_trk.py to convert to a list of .trk bundles.\n\nFor the --outlier_threshold option the default is a recommended good trade-off\nfor a freesurfer parcellation. With smaller parcels (brainnetome, glasser) the\nthreshold should most likely be reduced.\n\nGood candidate connections to QC are the brainstem to precentral gyrus\nconnection and precentral left to precentral right connection, or equivalent\nin your parcellation.\n\nNOTE: this script can take a while to run. Please be patient.\nExample: on a tractogram with 1.8M streamlines, running on a SSD:\n- 15 minutes without post-processing, only saving final bundles.\n- 30 minutes with full post-processing, only saving final bundles.\n- 60 minutes with full post-processing, saving all possible files.\n\nFormerly: scil_decompose_connectivity.py\n\npositional arguments:\n in_tractograms Tractogram filenames. Format must be one of \n trk, tck, vtk, fib, dpy.\n in_labels Labels file name (nifti). Labels must have 0 as background.\n out_hdf5 Output hdf5 file (.h5).\n\noptions:\n -h, --help show this help message and exit\n --out_labels_list OUT_FILE\n Save the labels list as text file.\n Needed for scil_connectivity_compute_matrices.py and others.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nPost-processing options:\n --no_pruning If set, will NOT prune on length.\n Length criteria in --min_length, --max_length.\n --no_remove_loops If set, will NOT remove streamlines making loops.\n Angle criteria based on --loop_max_angle.\n --no_remove_outliers If set, will NOT remove outliers using QB.\n Criteria based on --outlier_threshold.\n --no_remove_curv_dev If set, will NOT remove streamlines that deviate from the mean curvature.\n Threshold based on --curv_qb_distance.\n\nPruning options:\n --min_length MIN_LENGTH\n Pruning minimal segment length. [20.0]\n --max_length MAX_LENGTH\n Pruning maximal segment length. [200.0]\n\nOutliers and loops options:\n --outlier_threshold OUTLIER_THRESHOLD\n Outlier removal threshold when using hierarchical QB. [0.6]\n --loop_max_angle LOOP_MAX_ANGLE\n Maximal winding angle over which a streamline is considered as looping. [330.0]\n --curv_qb_distance CURV_QB_DISTANCE\n Clustering threshold for centroids curvature filtering with QB. [10.0]\n\nSaving options:\n --out_dir OUT_DIR Output directory for each connection as separate file (.trk).\n --save_raw_connections\n If set, will save all raw cut connections in a subdirectory.\n --save_intermediate If set, will save the intermediate results of filtering.\n --save_discarded If set, will save discarded streamlines in subdirectories.\n Includes loops, outliers and qb_loops.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_segment_one_bundle", + "docstring": "Compute a simple Recobundles (single-atlas & single-parameters).\nThe model need to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are unsure about the transformation 'direction' use the verbose\noption (-v) and try with and without the --inverse flag. If you are not using\nthe right transformation 'direction' a warning will popup. If there is no\nwarning in both case it means the transformation is very close to identity and\nboth 'direction' will work.\n\nFormerly: scil_recognize_single_bundles.py", + "help": "usage: scil_tractogram_segment_one_bundle.py [-h]\n [--tractogram_clustering_thr TRACTOGRAM_CLUSTERING_THR]\n [--model_clustering_thr MODEL_CLUSTERING_THR]\n [--pruning_thr PRUNING_THR]\n [--slr_threads SLR_THREADS]\n [--seed SEED] [--inverse]\n [--no_empty]\n [--in_pickle IN_PICKLE | --out_pickle OUT_PICKLE]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram in_model in_transfo\n out_tractogram\n\nCompute a simple Recobundles (single-atlas & single-parameters).\nThe model need to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are unsure about the transformation 'direction' use the verbose\noption (-v) and try with and without the --inverse flag. If you are not using\nthe right transformation 'direction' a warning will popup. If there is no\nwarning in both case it means the transformation is very close to identity and\nboth 'direction' will work.\n\nFormerly: scil_recognize_single_bundles.py\n\npositional arguments:\n in_tractogram Input tractogram filename.\n in_model Model to use for recognition.\n in_transfo Path for the transformation to model space (.txt, .npy or .mat).\n out_tractogram Output tractogram filename.\n\noptions:\n -h, --help show this help message and exit\n --tractogram_clustering_thr TRACTOGRAM_CLUSTERING_THR\n Clustering threshold used for the whole brain [8mm].\n --model_clustering_thr MODEL_CLUSTERING_THR\n Clustering threshold used for the model [4mm].\n --pruning_thr PRUNING_THR\n MDF threshold used for final streamlines selection [6mm].\n --slr_threads SLR_THREADS\n Number of threads for SLR [1].\n --seed SEED Random number generator seed [None].\n --inverse Use the inverse transformation.\n --no_empty Do not write file if there is no streamline.\n --in_pickle IN_PICKLE\n Input pickle clusters map file.\n Will override the tractogram_clustering_thr parameter.\n --out_pickle OUT_PICKLE\n Output pickle clusters map file.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nGaryfallidis, E., Cote, M. A., Rheault, F., ... &\nDescoteaux, M. (2018). Recognition of white matter\nbundles using local and global streamline-based registration and\nclustering. NeuroImage, 170, 283-295.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_shuffle", + "docstring": "Shuffle the ordering of streamlines.\n\nFormerly: scil_shuffle_streamlines.py", + "help": "usage: scil_tractogram_shuffle.py [-h] [--seed SEED] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nShuffle the ordering of streamlines.\n\nFormerly: scil_shuffle_streamlines.py\n\npositional arguments:\n in_tractogram Input tractography file.\n out_tractogram Output tractography file.\n\noptions:\n -h, --help show this help message and exit\n --seed SEED Random number generator seed [None].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_smooth", + "docstring": "This script will smooth the streamlines, usually to remove the 'wiggles' in\nprobabilistic tracking.\n\nTwo choices of methods are available:\n- Gaussian will use the surrounding coordinates for smoothing. Streamlines are\nresampled to 1mm step-size and the smoothing is performed on the coordinate\narray. The sigma will be indicative of the number of points surrounding the\ncenter points to be used for blurring.\n- Spline will fit a spline curve to every streamline using a sigma and the\nnumber of control points. The sigma represents the allowed distance from the\ncontrol points. The final streamlines are obtained by evaluating the spline at\nconstant intervals so that it will have the same number of points as initially.\n\nThis script enforces endpoints to remain the same.\n\nWARNING:\n- too low of a sigma (e.g: 1) with a lot of control points (e.g: 15)\nwill create crazy streamlines that could end up out of the bounding box.\n- data_per_point will be lost.\n\nFormerly: scil_smooth_streamlines.py", + "help": "usage: scil_tractogram_smooth.py [-h]\n (--gaussian SIGMA | --spline SIGMA NB_CTRL_POINT)\n [--compress [COMPRESS_TH]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script will smooth the streamlines, usually to remove the 'wiggles' in\nprobabilistic tracking.\n\nTwo choices of methods are available:\n- Gaussian will use the surrounding coordinates for smoothing. Streamlines are\nresampled to 1mm step-size and the smoothing is performed on the coordinate\narray. The sigma will be indicative of the number of points surrounding the\ncenter points to be used for blurring.\n- Spline will fit a spline curve to every streamline using a sigma and the\nnumber of control points. The sigma represents the allowed distance from the\ncontrol points. The final streamlines are obtained by evaluating the spline at\nconstant intervals so that it will have the same number of points as initially.\n\nThis script enforces endpoints to remain the same.\n\nWARNING:\n- too low of a sigma (e.g: 1) with a lot of control points (e.g: 15)\nwill create crazy streamlines that could end up out of the bounding box.\n- data_per_point will be lost.\n\nFormerly: scil_smooth_streamlines.py\n\npositional arguments:\n in_tractogram Input tractography file.\n out_tractogram Output tractography file.\n\noptions:\n -h, --help show this help message and exit\n --gaussian SIGMA Sigma for smoothing. Use the value of surronding\n X,Y,Z points on the streamline to blur the streamlines.\n A good sigma choice would be around 5.\n --spline SIGMA NB_CTRL_POINT\n Sigma for smoothing. Model each streamline as a spline.\n A good sigma choice would be around 5 and control point around 10.\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.[0.1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_tractogram_split", + "docstring": "Split a tractogram into multiple files, 2 options available :\nSplit into X files, or split into files of Y streamlines.\n\nBy default, streamlines to add to each chunk will be chosen randomly.\nOptionally, you can split streamlines...\n - sequentially (the first n/nb_chunks streamlines in the first chunk and so\n on).\n - randomly, but per Quickbundles clusters.\n\nFormerly: scil_split_tractogram.py", + "help": "usage: scil_tractogram_split.py [-h] [--out_dir OUT_DIR]\n (--chunk_size CHUNK_SIZE | --nb_chunks NB_CHUNKS)\n [--split_per_cluster | --do_not_randomize]\n [--qbx_thresholds t [t ...]] [--seed SEED]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_prefix\n\nSplit a tractogram into multiple files, 2 options available :\nSplit into X files, or split into files of Y streamlines.\n\nBy default, streamlines to add to each chunk will be chosen randomly.\nOptionally, you can split streamlines...\n - sequentially (the first n/nb_chunks streamlines in the first chunk and so\n on).\n - randomly, but per Quickbundles clusters.\n\nFormerly: scil_split_tractogram.py\n\npositional arguments:\n in_tractogram Tractogram input file name.\n out_prefix Prefix for the output tractogram, index will be appended \n automatically (ex, _0.trk), based on input type.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Put all output tractogram in a specific directory.\n --chunk_size CHUNK_SIZE\n The maximum number of streamlines per file.\n --nb_chunks NB_CHUNKS\n Divide the file in equal parts.\n --split_per_cluster If set, splitting will be done per cluster (computed with \n Quickbundles) to ensure that at least some streamlines are \n kept from each bundle in each chunk. Else, random splitting is\n performed (default).\n --do_not_randomize If set, splitting is done sequentially through the original \n sft instead of using random indices.\n --qbx_thresholds t [t ...]\n If you chose option '--split_per_cluster', you may set the \n QBx threshold value(s) here. Default: [40, 30, 20]\n --seed SEED Use a specific random seed for the subsampling.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_viz_bingham_fit", + "docstring": "Visualize 2-dimensional Bingham volume slice loaded from disk. The volume is\nassumed to be saved from scil_fodf_to_bingham.py.\n\nGiven an image of Bingham coefficients, this script displays a slice in a\ngiven orientation.", + "help": "usage: scil_viz_bingham_fit.py [-h] [--slice_index SLICE_INDEX]\n [--win_dims WIDTH HEIGHT]\n [--interactor {image,trackball}]\n [--axis_name {sagittal,coronal,axial}]\n [--silent] [--output OUTPUT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--color_per_lobe]\n in_bingham\n\nVisualize 2-dimensional Bingham volume slice loaded from disk. The volume is\nassumed to be saved from scil_fodf_to_bingham.py.\n\nGiven an image of Bingham coefficients, this script displays a slice in a\ngiven orientation.\n\npositional arguments:\n in_bingham Input SH image file.\n\noptions:\n -h, --help show this help message and exit\n --slice_index SLICE_INDEX\n Index of the slice to visualize along a given axis. Defaults to middle of volume.\n --win_dims WIDTH HEIGHT\n The dimensions for the vtk window. [(768, 768)]\n --interactor {image,trackball}\n Specify interactor mode for vtk window. [trackball]\n --axis_name {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n --silent Disable interactive visualization.\n --output OUTPUT Path to output file.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Name of the sphere used to reconstruct SF. [symmetric362]\n --color_per_lobe Color each bingham distribution with a different color. [False]\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_viz_bundle", + "docstring": "Visualize bundles.\n\nExample usages:\n\n# Visualize streamlines as tubes, each bundle with a different color\n>>> scil_viz_bundle.py path_to_bundles/ --shape tube --random_coloring 1337\n\n# Visualize a tractogram with each streamlines drawn as lines, colored with\n# their local orientation, but only load 1 in 10 streamlines\n>>> scil_viz_bundle.py tractogram.trk --shape line --subsample 10\n\n# Visualize CSTs as large tubes and color them from a list of colors in a file\n>>> scil_viz_bundle.py path_to_bundles/CST_* --width 0.5\n --color_dict colors.json", + "help": "usage: scil_viz_bundle.py [-h]\n [--random_coloring SEED | --uniform_coloring R G B | --local_coloring | --color_dict JSON | --color_from_streamlines KEY | --color_from_points KEY]\n [--shape {line,tube}] [--width WIDTH]\n [--subsample SUBSAMPLE] [--downsample DOWNSAMPLE]\n [--background R G B] [-v [{DEBUG,INFO,WARNING}]]\n in_bundles [in_bundles ...]\n\nVisualize bundles.\n\nExample usages:\n\n# Visualize streamlines as tubes, each bundle with a different color\n>>> scil_viz_bundle.py path_to_bundles/ --shape tube --random_coloring 1337\n\n# Visualize a tractogram with each streamlines drawn as lines, colored with\n# their local orientation, but only load 1 in 10 streamlines\n>>> scil_viz_bundle.py tractogram.trk --shape line --subsample 10\n\n# Visualize CSTs as large tubes and color them from a list of colors in a file\n>>> scil_viz_bundle.py path_to_bundles/CST_* --width 0.5\n --color_dict colors.json\n\npositional arguments:\n in_bundles List of tractography files supported by nibabel.\n\noptions:\n -h, --help show this help message and exit\n --shape {line,tube} Display streamlines either as lines or tubes.\n [Default: tube]\n --width WIDTH Width of tubes or lines representing streamlines\n [Default: 0.25]\n --subsample SUBSAMPLE\n Only load 1 in N streamlines.\n [Default: 1]\n --downsample DOWNSAMPLE\n Downsample streamlines to N points.\n [Default: None]\n --background R G B RBG values [0, 255] of the color of the background.\n [Default: [0, 0, 0]]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nColouring options:\n --random_coloring SEED\n Assign a random color to bundles.\n --uniform_coloring R G B\n Assign a uniform color to streamlines.\n --local_coloring Assign coloring to streamlines depending on their local orientations.\n --color_dict JSON JSON file containing colors for each bundle.\n Bundle filenames are indicated as keys and colors as values.\n A 'default' key and value can be included.\n --color_from_streamlines KEY\n Extract a color per streamline from the data_per_streamline property of the tractogram at the specified key.\n --color_from_points KEY\n Extract a color per point from the data_per_point property of the tractogram at the specified key.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_viz_bundle_screenshot_mni", + "docstring": "Register bundle to a template for screenshots using a reference.\nThe template can be any MNI152 (any resolution, cropped or not)\nIf your in_anat has a skull, select a MNI152 template with a skull and\nvice-versa.\n\nIf the bundle is already in MNI152 space, do not use --target_template.\n\nAxial, coronal and sagittal slices are captured.\nSagittal can be capture from the left (default) or the right.\n\nFor the --roi argument: If 1 value is provided, the ROI will be white,\nif 4 values are provided, the ROI will be colored with the RGB values\nprovided, if 5 values are provided, it is RGBA (values from 0-255).", + "help": "usage: scil_viz_bundle_screenshot_mni.py [-h]\n [--target_template TARGET_TEMPLATE]\n [--local_coloring | --uniform_coloring R G B | --reference_coloring COLORBAR]\n [--roi ROI [ROI ...]] [--right]\n [--anat_opacity ANAT_OPACITY]\n [--output_suffix OUTPUT_SUFFIX]\n [--out_dir OUT_DIR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle in_anat\n\nRegister bundle to a template for screenshots using a reference.\nThe template can be any MNI152 (any resolution, cropped or not)\nIf your in_anat has a skull, select a MNI152 template with a skull and\nvice-versa.\n\nIf the bundle is already in MNI152 space, do not use --target_template.\n\nAxial, coronal and sagittal slices are captured.\nSagittal can be capture from the left (default) or the right.\n\nFor the --roi argument: If 1 value is provided, the ROI will be white,\nif 4 values are provided, the ROI will be colored with the RGB values\nprovided, if 5 values are provided, it is RGBA (values from 0-255).\n\npositional arguments:\n in_bundle Path of the input bundle.\n in_anat Path of the reference file (.nii or nii.gz).\n\noptions:\n -h, --help show this help message and exit\n --target_template TARGET_TEMPLATE\n Path to the target MNI152 template for registration. If in_anat has a skull, select a MNI152 template with a skull and vice-versa.\n --local_coloring Color streamlines using local segments orientation.\n --uniform_coloring R G B\n Color streamlines with uniform coloring.\n --reference_coloring COLORBAR\n Color streamlines with reference coloring (0-255).\n --roi ROI [ROI ...] Path to a ROI file (.nii or nii.gz).\n --right Take screenshot from the right instead of the left for the sagittal plane.\n --anat_opacity ANAT_OPACITY\n Set the opacity for the anatomy, use 0 for complete transparency, 1 for opaque. [0.3]\n --output_suffix OUTPUT_SUFFIX\n Add a suffix to the output, else the axis name is used.\n --out_dir OUT_DIR Put all images in a specific directory.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_viz_bundle_screenshot_mosaic", + "docstring": "Visualize bundles from a list. The script will output a mosaic (image) with\nscreenshots, 6 views per bundle in the list.", + "help": "usage: scil_viz_bundle_screenshot_mosaic.py [-h] [--uniform_coloring R G B]\n [--random_coloring SEED]\n [--zoom ZOOM] [--ttf TTF]\n [--ttf_size TTF_SIZE]\n [--opacity_background OPACITY_BACKGROUND]\n [--resolution_of_thumbnails RESOLUTION_OF_THUMBNAILS]\n [--light_screenshot]\n [--no_information]\n [--no_bundle_name]\n [--no_streamline_number]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_volume in_bundles\n [in_bundles ...] out_image\n\nVisualize bundles from a list. The script will output a mosaic (image) with\nscreenshots, 6 views per bundle in the list.\n\npositional arguments:\n in_volume Volume used as background (e.g. T1, FA, b0).\n in_bundles List of tractography files supported by nibabel or binary mask files.\n out_image Name of the output image mosaic (e.g. mosaic.jpg, mosaic.png).\n\noptions:\n -h, --help show this help message and exit\n --uniform_coloring R G B\n Assign an uniform color to streamlines (or ROIs).\n --random_coloring SEED\n Assign a random color to streamlines (or ROIs).\n --zoom ZOOM Rendering zoom. A value greater than 1 is a zoom-in,\n a value less than 1 is a zoom-out [1.0].\n --ttf TTF Path of the true type font to use for legends.\n --ttf_size TTF_SIZE Font size (int) to use for the legends [35].\n --opacity_background OPACITY_BACKGROUND\n Opacity of background image, between 0 and 1.0 [0.4].\n --resolution_of_thumbnails RESOLUTION_OF_THUMBNAILS\n Resolution of thumbnails used in mosaic [300].\n --light_screenshot Keep only 3 views instead of 6 [False].\n --no_information Don't display axis and bundle information [False].\n --no_bundle_name Don't display bundle name [False].\n --no_streamline_number\n Don't display bundle streamlines number [False].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_viz_connectivity", + "docstring": "Script to display a connectivity matrix and adjust the desired visualization.\nMade to work with scil_tractogram_segment_bundles_for_connectivity.py and\nscil_connectivity_reorder_rois.py.\n\nThis script can either display the axis labels as:\n- Coordinates (0..N)\n- Labels (using --labels_list)\n- Names (using --labels_list and --lookup_table)\nExamples of labels_list.txt and lookup_table.json can be found in the\nfreesurfer_flow output (https://github.com/scilus/freesurfer_flow)\n\nIf the matrix was made from a bigger matrix using\nscil_connectivity_reorder_rois.py, provide the text file(s), using\n--labels_list and/or --reorder_txt.\n\nThe chord chart is always displaying parting in the order they are defined\n(clockwise), the color is attributed in that order following a colormap. The\nthickness of the line represent the 'size/intensity', the greater the value is\nthe thicker the line will be. In order to hide the low values, two options are\navailable:\n- Angle threshold + alpha, any connections with a small angle on the chord\n chart will be slightly transparent to increase the focus on bigger\n connections.\n- Percentile, hide any connections with a value below that percentile", + "help": "", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_viz_dti_screenshot", + "docstring": "Register DWI to a template for screenshots.\nThe templates are on http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009\n\nFor quick quality control, the MNI template can be downsampled to 2mm iso.\nAxial, coronal and sagittal slices are captured.", + "help": "usage: scil_viz_dti_screenshot.py [-h] [--shells SHELLS [SHELLS ...]]\n [--out_suffix OUT_SUFFIX]\n [--out_dir OUT_DIR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_template\n\nRegister DWI to a template for screenshots.\nThe templates are on http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009\n\nFor quick quality control, the MNI template can be downsampled to 2mm iso.\nAxial, coronal and sagittal slices are captured.\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bval file, in FSL format.\n in_bvec Path of the bvec file, in FSL format.\n in_template Path to the target MNI152 template for \n registration, use the one provided online.\n\noptions:\n -h, --help show this help message and exit\n --shells SHELLS [SHELLS ...]\n Shells to use for DTI fit (usually below 1200), b0 must be listed.\n --out_suffix OUT_SUFFIX\n Add a suffix to the output, else the axis name is used.\n --out_dir OUT_DIR Put all images in a specific directory.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_viz_fodf", + "docstring": "Visualize 2-dimensional fODF slice loaded from disk.\n\nGiven an image of SH coefficients, this script displays a slice in a\ngiven orientation. The user can also add a background on top of which the\nfODF are to be displayed. Using a full SH basis, the script can be used to\nvisualize asymmetric fODF. The user can supply a peaks image to visualize\npeaks on top of fODF.\n\nIf a transparency_mask is given (e.g. a brain mask), all values outside the\nmask non-zero values are set to full transparency in the saved scene.\n\n!!! CAUTION !!! The script is memory intensive about (9kB of allocated RAM per\nvoxel, or 9GB for a 1M voxel volume) with a sphere interpolated to 362 points.", + "help": "usage: scil_viz_fodf.py [-h] [--slice_index SLICE_INDEX]\n [--win_dims WIDTH HEIGHT]\n [--interactor {image,trackball}]\n [--axis_name {sagittal,coronal,axial}] [--silent]\n [--in_transparency_mask IN_TRANSPARENCY_MASK]\n [--output OUTPUT] [-f]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--sphere {repulsion200,repulsion100,symmetric724,symmetric642,symmetric362,repulsion724}]\n [--sph_subdivide SPH_SUBDIVIDE] [--mask MASK]\n [--colormap COLORMAP | --color_rgb COLOR_RGB COLOR_RGB COLOR_RGB]\n [--scale SCALE] [--radial_scale_off] [--norm_off]\n [-v [{DEBUG,INFO,WARNING}]] [--background BACKGROUND]\n [--bg_range MIN MAX] [--bg_opacity BG_OPACITY]\n [--bg_offset BG_OFFSET]\n [--bg_interpolation {nearest,linear}]\n [--bg_color BG_COLOR BG_COLOR BG_COLOR]\n [--peaks PEAKS]\n [--peaks_color PEAKS_COLOR PEAKS_COLOR PEAKS_COLOR]\n [--peaks_width PEAKS_WIDTH]\n [--peaks_values PEAKS_VALUES | --peaks_length PEAKS_LENGTH]\n [--variance VARIANCE] [--variance_k VARIANCE_K]\n [--var_color VAR_COLOR VAR_COLOR VAR_COLOR]\n in_fodf\n\nVisualize 2-dimensional fODF slice loaded from disk.\n\nGiven an image of SH coefficients, this script displays a slice in a\ngiven orientation. The user can also add a background on top of which the\nfODF are to be displayed. Using a full SH basis, the script can be used to\nvisualize asymmetric fODF. The user can supply a peaks image to visualize\npeaks on top of fODF.\n\nIf a transparency_mask is given (e.g. a brain mask), all values outside the\nmask non-zero values are set to full transparency in the saved scene.\n\n!!! CAUTION !!! The script is memory intensive about (9kB of allocated RAM per\nvoxel, or 9GB for a 1M voxel volume) with a sphere interpolated to 362 points.\n\npositional arguments:\n in_fodf Input SH image file.\n\noptions:\n -h, --help show this help message and exit\n --slice_index SLICE_INDEX\n Index of the slice to visualize along a given axis. Defaults to middle of volume.\n --win_dims WIDTH HEIGHT\n The dimensions for the vtk window. [(768, 768)]\n --interactor {image,trackball}\n Specify interactor mode for vtk window. [trackball]\n --axis_name {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n --silent Disable interactive visualization.\n --in_transparency_mask IN_TRANSPARENCY_MASK\n Input mask image file.\n --output OUTPUT Path to output file.\n -f Force overwriting of the output files.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --sphere {repulsion200,repulsion100,symmetric724,symmetric642,symmetric362,repulsion724}\n Name of the sphere used to reconstruct SF. [symmetric362]\n --sph_subdivide SPH_SUBDIVIDE\n Number of subdivisions for given sphere. If not supplied, use the given sphere as is.\n --mask MASK Optional mask file. Only fODF inside the mask are displayed.\n --colormap COLORMAP Colormap for the ODF slicer. If None, then a RGB colormap will be used. [None]\n --color_rgb COLOR_RGB COLOR_RGB COLOR_RGB\n Uniform color for the ODF slicer given as RGB, scaled between 0 and 1. [None]\n --scale SCALE Scaling factor for FODF. [0.5]\n --radial_scale_off Disable radial scale for ODF slicer.\n --norm_off Disable normalization of ODF slicer.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nBackground arguments:\n --background BACKGROUND\n Background image file. If RGB, values must be between 0 and 255.\n --bg_range MIN MAX The range of values mapped to range [0, 1] for background image. [(bg.min(), bg.max())]\n --bg_opacity BG_OPACITY\n The opacity of the background image. Opacity of 0.0 means transparent and 1.0 is completely visible. [1.0]\n --bg_offset BG_OFFSET\n The offset of the background image. [0.5]\n --bg_interpolation {nearest,linear}\n Interpolation mode for the background image. [nearest]\n --bg_color BG_COLOR BG_COLOR BG_COLOR\n The color of the overall background, behind everything. Must be RGB values scaled between 0 and 1. [(0, 0, 0)]\n\nPeaks arguments:\n --peaks PEAKS Peaks image file.\n --peaks_color PEAKS_COLOR PEAKS_COLOR PEAKS_COLOR\n Color used for peaks, as RGB values scaled between 0 and 1. If None, then a RGB colormap is used. [None]\n --peaks_width PEAKS_WIDTH\n Width of peaks segments. [1.0]\n\nPeaks scaling arguments:\n Choose between peaks values and arbitrary length.\n\n --peaks_values PEAKS_VALUES\n Peaks values file.\n --peaks_length PEAKS_LENGTH\n Length of the peaks segments. [0.65]\n\nVariance arguments:\n For the visualization of fodf uncertainty, the variance is used as follow: mean + k * sqrt(variance), where mean is the input fodf (in_fodf) and k is the scaling factor (variance_k).\n\n --variance VARIANCE FODF variance file.\n --variance_k VARIANCE_K\n Scaling factor (k) for the computation of the fodf uncertainty. [1]\n --var_color VAR_COLOR VAR_COLOR VAR_COLOR\n Color of variance outline. Must be RGB values scaled between 0 and 1. [(1, 1, 1)]\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_viz_gradients_screenshot", + "docstring": "Vizualisation for directions on a sphere, either from a gradient sampling (i.e.\na list of b-vectors) or from a Dipy sphere.", + "help": "usage: scil_viz_gradients_screenshot.py [-h]\n (--in_gradient_scheme IN_GRADIENT_SCHEME [IN_GRADIENT_SCHEME ...] | --dipy_sphere {symmetric362,symmetric642,symmetric724,repulsion724,repulsion100,repulsion200})\n [--dis-sym]\n [--out_basename OUT_BASENAME]\n [--res RES] [--dis-sphere]\n [--dis-proj] [--plot_shells]\n [--same-color] [--opacity OPACITY]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n\nVizualisation for directions on a sphere, either from a gradient sampling (i.e.\na list of b-vectors) or from a Dipy sphere.\n\noptions:\n -h, --help show this help message and exit\n --in_gradient_scheme IN_GRADIENT_SCHEME [IN_GRADIENT_SCHEME ...]\n Gradient sampling filename. (only accepts .bvec and\n .bval together or only .b).\n --dipy_sphere {symmetric362,symmetric642,symmetric724,repulsion724,repulsion100,repulsion200}\n Dipy sphere choice.\n --dis-sym Disable antipodal symmetry.\n --out_basename OUT_BASENAME\n Output file name picture without extension (will be\n png file(s)).\n --res RES Resolution of the output picture(s).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n\nEnable/Disable renderings.:\n --dis-sphere Disable the rendering of the sphere.\n --dis-proj Disable rendering of the projection supershell.\n --plot_shells Enable rendering each shell individually.\n\nRendering options.:\n --same-color Use same color for all shell.\n --opacity OPACITY Opacity for the shells.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_viz_tractogram_seeds", + "docstring": "Visualize seeds used to generate the tractogram or bundle.\nWhen tractography was run, each streamline produced by the tracking algorithm\nsaved its seeding point (its origin).\n\nThe tractogram must have been generated from scil_tracking_local.py or\nscil_tracking_pft.py with the --save_seeds option.", + "help": "usage: scil_viz_tractogram_seeds.py [-h] [--save SAVE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n tractogram\n\nVisualize seeds used to generate the tractogram or bundle.\nWhen tractography was run, each streamline produced by the tracking algorithm\nsaved its seeding point (its origin).\n\nThe tractogram must have been generated from scil_tracking_local.py or\nscil_tracking_pft.py with the --save_seeds option.\n\npositional arguments:\n tractogram Tractogram file (must be trk)\n\noptions:\n -h, --help show this help message and exit\n --save SAVE If set, save a screenshot of the result in the specified filename\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_viz_tractogram_seeds_3d", + "docstring": "Visualize seeds as 3D points, with heatmaps corresponding to seed density\n\nExample usages:\n\n>>> scil_viz_tractogram_seeds_3d.py seeds.nii.gz --tractogram tractogram.trk", + "help": "usage: scil_viz_tractogram_seeds_3d.py [-h] [--tractogram TRACTOGRAM]\n [--colormap COLORMAP]\n [--seed_opacity SEED_OPACITY]\n [--tractogram_shape {line,tube}]\n [--tractogram_opacity TRACTOGRAM_OPACITY]\n [--tractogram_width TRACTOGRAM_WIDTH]\n [--tractogram_color R G B [R G B ...]]\n [--background R G B [R G B ...]]\n [-v [{DEBUG,INFO,WARNING}]]\n in_seed_map\n\nVisualize seeds as 3D points, with heatmaps corresponding to seed density\n\nExample usages:\n\n>>> scil_viz_tractogram_seeds_3d.py seeds.nii.gz --tractogram tractogram.trk\n\npositional arguments:\n in_seed_map Seed density map.\n\noptions:\n -h, --help show this help message and exit\n --tractogram TRACTOGRAM\n Tractogram coresponding to the seeds.\n --colormap COLORMAP Name of the map for the density coloring. Can be any colormap that matplotlib offers.\n [Default: bone]\n --seed_opacity SEED_OPACITY\n Opacity of the contour generated.\n [Default: 0.5]\n --tractogram_shape {line,tube}\n Display streamlines either as lines or tubes.\n [Default: tube]\n --tractogram_opacity TRACTOGRAM_OPACITY\n Opacity of the streamlines.\n [Default: 0.5]\n --tractogram_width TRACTOGRAM_WIDTH\n Width of tubes or lines representing streamlines.\n [Default: 0.05]\n --tractogram_color R G B [R G B ...]\n Color for the tractogram.\n --background R G B [R G B ...]\n RBG values [0, 255] of the color of the background.\n [Default: [0, 0, 0]]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_viz_volume_histogram", + "docstring": "Script to display a histogram of a metric (FA, MD, etc.) from a binary mask\n(wm mask, vascular mask, ect.).\nThese two images must be coregister with each other.\n\n>>> scil_viz_volume_histogram.py metric.nii.gz mask_bin.nii.gz 8\n out_filename_image.png", + "help": "usage: scil_viz_volume_histogram.py [-h] [--title TITLE] [--x_label X_LABEL]\n [--colors COLORS] [--show_only]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_metric in_mask n_bins out_png\n\nScript to display a histogram of a metric (FA, MD, etc.) from a binary mask\n(wm mask, vascular mask, ect.).\nThese two images must be coregister with each other.\n\n>>> scil_viz_volume_histogram.py metric.nii.gz mask_bin.nii.gz 8\n out_filename_image.png\n\npositional arguments:\n in_metric Metric map ex : FA, MD,... .\n in_mask Binary mask data to extract value.\n n_bins Number of bins to use for the histogram.\n out_png Output filename for the figure.\n\noptions:\n -h, --help show this help message and exit\n --show_only Do not save the figure, only display.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nHistogram options:\n --title TITLE Use the provided info for the histogram title. [Histogram]\n --x_label X_LABEL Use the provided info for the x axis name.\n --colors COLORS Use the provided info for the bars color. [#0504aa]\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_viz_volume_scatterplot", + "docstring": "Script to display scatter plot between two maps (ex. FA and MD, ihMT and MT).\nBy default, no mask is applied to the data.\nDifferent options are available to mask or threshold data:\n - a binary mask\n - two probability maps, which can be used to threshold maps with\n --in_prob_maps. A same threshold is applied on these two maps (--thr).\n - parcellation, which can be used to plot values for each region of\n an atlas (--in_atlas) or a subset of regions (--specific_label).\n Atlas option required a json file (--atlas_lut) with indices and\n names of each label corresponding to the atlas as following:\n \"1\": \"lh_A8m\",\n \"2\": \"rh_A8m\",\n The numbers must be corresponding to the label indices in the json file.\n\nBe careful, you can not use all of them at the same time.\n\nFor general scatter plot without mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n\nFor scatter plot with mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_bin_mask mask_wm.nii.gz\n\nFor tissue probability scatter plot:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --prob_maps wm_map.nii.gz gm_map.nii.gz\n\nFor scatter plot using atlas:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n --specific_label 34 67 87", + "help": "usage: scil_viz_volume_scatterplot.py [-h] [--out_dir OUT_DIR] [--thr THR]\n [--not_exclude_zero]\n [--in_bin_mask IN_BIN_MASK | --in_prob_maps IN_PROB_MAPS IN_PROB_MAPS | --in_atlas IN_ATLAS]\n [--atlas_lut ATLAS_LUT]\n [--specific_label SPECIFIC_LABEL [SPECIFIC_LABEL ...]]\n [--in_folder] [--title TITLE]\n [--x_label X_LABEL] [--y_label Y_LABEL]\n [--label LABEL]\n [--label_prob LABEL_PROB]\n [--marker MARKER]\n [--marker_size MARKER_SIZE]\n [--transparency TRANSPARENCY]\n [--dpi DPI] [--colors color1 color2]\n [--show_only]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_x_map in_y_map out_name\n\nScript to display scatter plot between two maps (ex. FA and MD, ihMT and MT).\nBy default, no mask is applied to the data.\nDifferent options are available to mask or threshold data:\n - a binary mask\n - two probability maps, which can be used to threshold maps with\n --in_prob_maps. A same threshold is applied on these two maps (--thr).\n - parcellation, which can be used to plot values for each region of\n an atlas (--in_atlas) or a subset of regions (--specific_label).\n Atlas option required a json file (--atlas_lut) with indices and\n names of each label corresponding to the atlas as following:\n \"1\": \"lh_A8m\",\n \"2\": \"rh_A8m\",\n The numbers must be corresponding to the label indices in the json file.\n\nBe careful, you can not use all of them at the same time.\n\nFor general scatter plot without mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n\nFor scatter plot with mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_bin_mask mask_wm.nii.gz\n\nFor tissue probability scatter plot:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --prob_maps wm_map.nii.gz gm_map.nii.gz\n\nFor scatter plot using atlas:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n --specific_label 34 67 87\n\npositional arguments:\n in_x_map Map in x axis, FA for example.\n in_y_map Map in y axis, MD for example.\n out_name Output filename for the figure without extension.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Output directory to save scatter plot.\n --thr THR Use to apply threshold only on probability maps (same for both map) with --in_prob_maps option. [0.9]\n --not_exclude_zero Keep zero value in data.\n --in_bin_mask IN_BIN_MASK\n Binary mask. Use this option to extract x and y maps value from specific mask or region: wm_mask or roi_mask for example.\n --in_prob_maps IN_PROB_MAPS IN_PROB_MAPS\n Probability maps, WM and GW for example.\n --in_atlas IN_ATLAS Path to the input atlas image.\n --show_only Do not save the figure, only display. Not avalaible with --in_atlas option.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nAtlas options:\n --atlas_lut ATLAS_LUT\n Path of the LUT file corresponding to atlas used to name the regions of interest.\n --specific_label SPECIFIC_LABEL [SPECIFIC_LABEL ...]\n Label list to use to do scatter plot. Label must corresponding to atlas LUT file. [None]\n --in_folder Save label plots in subfolder \"Label_plots\".\n\nScatter plot options:\n --title TITLE Use the provided info for the title name. [Scatter Plot]\n --x_label X_LABEL Use the provided info for the x axis name. [x]\n --y_label Y_LABEL Use the provided info for the y axis name. [y]\n --label LABEL Use the provided info for the legend box corresponding to mask or first probability map. [None]\n --label_prob LABEL_PROB\n Use the provided info for the legend box corresponding to the second probability map. [Threshold prob_map 2]\n --marker MARKER Use the provided info for the marker shape. [.]\n --marker_size MARKER_SIZE\n Use the provided info for the marker size. [15]\n --transparency TRANSPARENCY\n Use the provided info for the point transparency. [0.4]\n --dpi DPI Use the provided info for the dpi resolution. [300]\n --colors color1 color2\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_viz_volume_screenshot", + "docstring": "Take screenshot(s) of one or more slices in a given image volume along the\nrequested axis. If slice indices are not provided, all slices in the volume\nare used. The name of the output images are suffixed with _slice_{id}, with\nid being the slice number in the volume. If a labelmap image is provided (e.g.\na tissue segmentation map), it is overlaid on the volume slices. Same goes if\na mask is provided, with the difference that it can be rendered as a\ntransparency overlay as well as a contour.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nExample:\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_annotated.png\n --display_slice_number --display_lr\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_masked.png\n --transparency brainmask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial.png\n --slices 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_sagittal.png --axis sagittal\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_plasma_cmap.png\n --slices 30 40 50 60 70 80 90 100 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_overlay.png\n --slices 30 40 50 60 70 80 90 100 --overlays brain_mask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_contour.png\n --slices 30 40 50 60 70 80 90 100\n --overlays brain_mask.nii.gz --overlays_as_contours\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_map.png\n --slices 30 40 50 60 70 80 90 100 --labelmap tissue_map.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_viridis_cmap.png\n --slices 30 40 50 60 70 80 90 100\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_peaks.png\n --slices 30 40 50 60 70 80 90 100 --peaks peaks.nii.gz --volume_opacity 0.5", + "help": "usage: scil_viz_volume_screenshot.py [-h]\n [--volume_cmap_name VOLUME_CMAP_NAME]\n [--volume_opacity VOLUME_OPACITY]\n [--transparency TRANSPARENCY]\n [--slices SID [SID ...]]\n [--axis {sagittal,coronal,axial}]\n [--size WIDTH HEIGHT]\n [--display_slice_number] [--display_lr]\n [--labelmap LABELMAP]\n [--labelmap_cmap_name LABELMAP_CMAP_NAME]\n [--labelmap_opacity LABELMAP_OPACITY]\n [--overlays OVERLAYS [OVERLAYS ...]]\n [--overlays_as_contours]\n [--overlays_colors R G B [R G B ...]]\n [--overlays_opacity OVERLAYS_OPACITY]\n [--peaks PEAKS [PEAKS ...]]\n [--peaks_width PEAKS_WIDTH]\n [--peaks_opacity PEAKS_OPACITY]\n [-v [{DEBUG,INFO,WARNING}]]\n volume out_fname\n\nTake screenshot(s) of one or more slices in a given image volume along the\nrequested axis. If slice indices are not provided, all slices in the volume\nare used. The name of the output images are suffixed with _slice_{id}, with\nid being the slice number in the volume. If a labelmap image is provided (e.g.\na tissue segmentation map), it is overlaid on the volume slices. Same goes if\na mask is provided, with the difference that it can be rendered as a\ntransparency overlay as well as a contour.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nExample:\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_annotated.png\n --display_slice_number --display_lr\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_masked.png\n --transparency brainmask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial.png\n --slices 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_sagittal.png --axis sagittal\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_plasma_cmap.png\n --slices 30 40 50 60 70 80 90 100 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_overlay.png\n --slices 30 40 50 60 70 80 90 100 --overlays brain_mask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_contour.png\n --slices 30 40 50 60 70 80 90 100\n --overlays brain_mask.nii.gz --overlays_as_contours\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_map.png\n --slices 30 40 50 60 70 80 90 100 --labelmap tissue_map.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_viridis_cmap.png\n --slices 30 40 50 60 70 80 90 100\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_peaks.png\n --slices 30 40 50 60 70 80 90 100 --peaks peaks.nii.gz --volume_opacity 0.5\n\npositional arguments:\n volume Input 3D Nifti file (.nii/.nii.gz).\n out_fname Name of the output image(s). If multiple slices are provided (or none), their index will be append to the name (e.g. volume.jpg, volume.png becomes volume_slice_0.jpg, volume_slice_0.png).\n\noptions:\n -h, --help show this help message and exit\n --transparency TRANSPARENCY\n Transparency Nifti image (.nii/.nii.gz). Can either be a binary mask or a scalar image in the range [0, 1].\n --size WIDTH HEIGHT Size of the output image. [(768, 768)]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nSlicing:\n --slices SID [SID ...]\n Slice indices to screenshot. If None are supplied, all slices inside the transparency mask are selected.\n --axis {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n\nInput overlays:\n --labelmap LABELMAP Input labelmap file (.nii/.nii.gz).\n --overlays OVERLAYS [OVERLAYS ...]\n 3D Nifti image(s) to overlay (.nii/.nii.gz).\n --peaks PEAKS [PEAKS ...]\n Peaks Nifti image (.nii/.nii.gz).\n\nVolume rendering:\n --volume_cmap_name VOLUME_CMAP_NAME\n Colormap name for the 3D Nifti image data. [None]\n --volume_opacity VOLUME_OPACITY\n Opacity value for the 3D Nifti image data. [1.0]\n --labelmap_cmap_name LABELMAP_CMAP_NAME\n Colormap name for the labelmap image data. [viridis]\n --labelmap_opacity LABELMAP_OPACITY\n Opacity value for the labelmap image data. [0.5]\n\nPeaks rendering:\n --peaks_width PEAKS_WIDTH\n Width of the peaks lines. [3.0]\n --peaks_opacity PEAKS_OPACITY\n Opacity value for the peaks overlay. [1.0]\n\nOverlay rendering:\n --overlays_as_contours\n Display overlays contours and reduce the opacity of their inner region (see the `--overlays_opacity` argument).\n --overlays_colors R G B [R G B ...]\n Colors for the overlays or contours. You may provide a single color, for all overlays/contours, or one color for each. Each color is given as three values: R G B\n --overlays_opacity OVERLAYS_OPACITY\n Opacity value for the masks overlays. When combined with `--overlays_as_contours`, this will be the opacity of the region inside the computed contours. [0.5]\n\nAnnotations:\n --display_slice_number\n If true, displays the slice number in the upper left corner.\n --display_lr If true, add left and right annotations to the images.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_viz_volume_screenshot_mosaic", + "docstring": "Compose a mosaic of screenshots of the given image volume slices along the\nrequested axis. The provided transparency mask (e.g. a brain mask volume) is\nused to set the screenshot values outside the mask non-zero values to full\ntransparency. Additionally, if a labelmap image is provided (e.g. a tissue\nsegmentation map), it is overlaid on the volume slices. Also, a series of\nmasks can be provided and will be used to generate contours overlaid on each\nvolume slice.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nThe screenshots are overlapped according to the given factors.\n\nThe mosaic supports either horizontal, vertical or matrix arrangements.\n\nExample:\n>>> scil_viz_volume_screenshot_mosaic.py 1 8 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial.png 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --overlap_factor 0.6 0.5 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 tissues.nii.gz brain_mask.nii.gz\n mosaic_overlap_tissues_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_viridis_cmap.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_contours.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --overlays wm_mask.nii.gz gm_mask.nii.gz csf_mask.nii.gz", + "help": "usage: scil_viz_volume_screenshot_mosaic.py [-h]\n [--volume_cmap_name VOLUME_CMAP_NAME]\n [--volume_opacity VOLUME_OPACITY]\n [--axis {sagittal,coronal,axial}]\n [--size WIDTH HEIGHT]\n [--labelmap LABELMAP]\n [--labelmap_cmap_name LABELMAP_CMAP_NAME]\n [--labelmap_opacity LABELMAP_OPACITY]\n [--overlays OVERLAYS [OVERLAYS ...]]\n [--overlays_as_contours]\n [--overlays_colors R G B [R G B ...]]\n [--overlays_opacity OVERLAYS_OPACITY]\n [--overlap rWIDTH rHEIGHT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n rows cols volume transparency\n out_fname SID [SID ...]\n\nCompose a mosaic of screenshots of the given image volume slices along the\nrequested axis. The provided transparency mask (e.g. a brain mask volume) is\nused to set the screenshot values outside the mask non-zero values to full\ntransparency. Additionally, if a labelmap image is provided (e.g. a tissue\nsegmentation map), it is overlaid on the volume slices. Also, a series of\nmasks can be provided and will be used to generate contours overlaid on each\nvolume slice.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nThe screenshots are overlapped according to the given factors.\n\nThe mosaic supports either horizontal, vertical or matrix arrangements.\n\nExample:\n>>> scil_viz_volume_screenshot_mosaic.py 1 8 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial.png 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --overlap_factor 0.6 0.5 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 tissues.nii.gz brain_mask.nii.gz\n mosaic_overlap_tissues_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_viridis_cmap.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_contours.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --overlays wm_mask.nii.gz gm_mask.nii.gz csf_mask.nii.gz\n\npositional arguments:\n rows The mosaic row count.\n cols The mosaic column count.\n volume Input 3D Nifti file (.nii/.nii.gz).\n transparency Transparency Nifti image (.nii/.nii.gz). Can either be a binary mask or a scalar image in the range [0, 1].\n out_fname Name of the output image (e.g. img.jpg, img.png).\n SID Slice indices to screenshot.\n\noptions:\n -h, --help show this help message and exit\n --axis {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n --size WIDTH HEIGHT Size of the output image. [(768, 768)]\n --overlap rWIDTH rHEIGHT\n The overlap factor as a ratio of each image dimension. [(0.6, 0.0)]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nInput overlays:\n --labelmap LABELMAP Input labelmap file (.nii/.nii.gz).\n --overlays OVERLAYS [OVERLAYS ...]\n 3D Nifti image(s) to overlay (.nii/.nii.gz).\n\nVolume rendering:\n --volume_cmap_name VOLUME_CMAP_NAME\n Colormap name for the 3D Nifti image data. [None]\n --volume_opacity VOLUME_OPACITY\n Opacity value for the 3D Nifti image data. [1.0]\n --labelmap_cmap_name LABELMAP_CMAP_NAME\n Colormap name for the labelmap image data. [viridis]\n --labelmap_opacity LABELMAP_OPACITY\n Opacity value for the labelmap image data. [0.5]\n\nOverlay rendering:\n --overlays_as_contours\n Display overlays contours and reduce the opacity of their inner region (see the `--overlays_opacity` argument).\n --overlays_colors R G B [R G B ...]\n Colors for the overlays or contours. You may provide a single color, for all overlays/contours, or one color for each. Each color is given as three values: R G B\n --overlays_opacity OVERLAYS_OPACITY\n Opacity value for the masks overlays. When combined with `--overlays_as_contours`, this will be the opacity of the region inside the computed contours. [0.5]\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_volume_apply_transform", + "docstring": "Transform Nifti (.nii.gz) using an affine/rigid transformation.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_apply_transform_to_image.py.", + "help": "usage: scil_volume_apply_transform.py [-h] [--inverse] [--keep_dtype]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file in_target_file in_transfo\n out_name\n\nTransform Nifti (.nii.gz) using an affine/rigid transformation.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_apply_transform_to_image.py.\n\npositional arguments:\n in_file Path of the file to be transformed (nii or nii.gz)\n in_target_file Path of the reference target file (.nii.gz).\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_name Output filename of the transformed data.\n\noptions:\n -h, --help show this help message and exit\n --inverse Apply the inverse transformation.\n --keep_dtype If True, keeps the data_type of the input image (in_file) when saving the output image (out_name).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_volume_b0_synthesis", + "docstring": "Wrapper for SyNb0 available in Dipy, to run it on a single subject.\nRequires Skull-Strip b0 and t1w images as input, the script will normalize the\nt1w's WM to 110, co-register both images, then register it to the appropriate\ntemplate, run SyNb0 and then transform the result back to the original space.\n\nSyNb0 is a deep learning model that predicts a synthetic a distortion-free\nb0 image from a distorted b0 and T1w.\n\nThis script must be used carefully, as it is meant to be used in an\nenvironment with the following dependencies already installed (not installed by\ndefault in Scilpy):\n- tensorflow-addons\n- tensorrt\n- tensorflow", + "help": "usage: scil_volume_b0_synthesis.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_b0 in_b0_mask in_t1 in_t1_mask out_b0\n\nWrapper for SyNb0 available in Dipy, to run it on a single subject.\nRequires Skull-Strip b0 and t1w images as input, the script will normalize the\nt1w's WM to 110, co-register both images, then register it to the appropriate\ntemplate, run SyNb0 and then transform the result back to the original space.\n\nSyNb0 is a deep learning model that predicts a synthetic a distortion-free\nb0 image from a distorted b0 and T1w.\n\nThis script must be used carefully, as it is meant to be used in an\nenvironment with the following dependencies already installed (not installed by\ndefault in Scilpy):\n- tensorflow-addons\n- tensorrt\n- tensorflow\n\npositional arguments:\n in_b0 Input b0 image.\n in_b0_mask Input b0 mask.\n in_t1 Input t1w image.\n in_t1_mask Input t1w mask.\n out_b0 Output b0 image without distortion.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Schilling, Kurt G., et al. \"Synthesized b0 for diffusion distortion\n correction (Synb0-DisCo).\" Magnetic resonance imaging 64 (2019): 62-70.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_volume_count_non_zero_voxels", + "docstring": "Count the number of non-zero voxels in an image file.\n\nIf you give it an image with more than 3 dimensions, it will summarize the 4th\n(or more) dimension to one voxel, and then find non-zero voxels over this.\nThis means that if there is at least one non-zero voxel in the 4th dimension,\nthis voxel of the 3D volume will be considered as non-zero.\n\nFormerly: scil_count_non_zero_voxels.py", + "help": "usage: scil_volume_count_non_zero_voxels.py [-h] [--out OUT_FILE] [--stats]\n [--id VALUE_ID]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n IN_FILE\n\nCount the number of non-zero voxels in an image file.\n\nIf you give it an image with more than 3 dimensions, it will summarize the 4th\n(or more) dimension to one voxel, and then find non-zero voxels over this.\nThis means that if there is at least one non-zero voxel in the 4th dimension,\nthis voxel of the 3D volume will be considered as non-zero.\n\nFormerly: scil_count_non_zero_voxels.py\n\npositional arguments:\n IN_FILE Input file name, in nifti format.\n\noptions:\n -h, --help show this help message and exit\n --out OUT_FILE Name of the output file, which will be saved as a text file.\n --stats If set, output the value using a stats format. Using this synthax will append\n a line to the output file, instead of creating a file with only one line.\n This is useful to create a file to be used as the source of data for a graph.\n Can be combined with --id\n --id VALUE_ID Id of the current count. If used, the value of this argument will be\n output (followed by a \":\") before the count value.\n Mostly useful with --stats.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_volume_crop", + "docstring": "Crop a volume using a given or an automatically computed bounding box. If a\npreviously computed bounding box file is given, the cropping will be applied\nand the affine fixed accordingly.\n\nWarning: This works well on masked images (like with FSL-Bet) volumes since\nit's looking for non-zero data. Therefore, you should validate the results on\nother types of images that haven't been masked.\n\nFormerly: scil_crop_volume.py", + "help": "usage: scil_volume_crop.py [-h] [--ignore_voxel_size]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--input_bbox INPUT_BBOX | --output_bbox OUTPUT_BBOX]\n in_image out_image\n\nCrop a volume using a given or an automatically computed bounding box. If a\npreviously computed bounding box file is given, the cropping will be applied\nand the affine fixed accordingly.\n\nWarning: This works well on masked images (like with FSL-Bet) volumes since\nit's looking for non-zero data. Therefore, you should validate the results on\nother types of images that haven't been masked.\n\nFormerly: scil_crop_volume.py\n\npositional arguments:\n in_image Path of the nifti file to crop.\n out_image Path of the cropped nifti file to write.\n\noptions:\n -h, --help show this help message and exit\n --ignore_voxel_size Ignore voxel size compatibility test between input bounding box and data. Warning, use only if you know what you are doing.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --input_bbox INPUT_BBOX\n Path of the pickle file from which to take the bounding box to crop input file.\n --output_bbox OUTPUT_BBOX\n Path of the pickle file where to write the computed bounding box. (.pickle extension)\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_volume_flip", + "docstring": "Flip the volume according to the specified axis.\n\nFormerly: scil_flip_volume.py", + "help": "usage: scil_volume_flip.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image dimension [dimension ...]\n\nFlip the volume according to the specified axis.\n\nFormerly: scil_flip_volume.py\n\npositional arguments:\n in_image Path of the input volume (nifti).\n out_image Path of the output volume (nifti).\n dimension The axes you want to flip. eg: to flip the x and y axes use: x y.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_volume_math", + "docstring": "Performs an operation on a list of images. The supported operations are\nlisted below.\n\nThis script is loading all images in memory, will often crash after a few\nhundred images.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of images.\n> scil_volume_math.py multiplication img.nii.gz 10 mult_10.nii.gz\n\nFormerly: scil_image_math.py", + "help": "usage: scil_volume_math.py [-h] [--data_type DATA_TYPE] [--exclude_background]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference,concatenate,dilation,erosion,closing,opening,blur}\n in_args [in_args ...] out_image\n\nPerforms an operation on a list of images. The supported operations are\nlisted below.\n\nThis script is loading all images in memory, will often crash after a few\nhundred images.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of images.\n> scil_volume_math.py multiplication img.nii.gz 10 mult_10.nii.gz\n\nFormerly: scil_image_math.py\n\n lower_threshold: IMG THRESHOLD\n All values below the threshold will be set to zero.\n All values above the threshold will be set to one.\n \n upper_threshold: IMG THRESHOLD\n All values below the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_eq: IMG THRESHOLD\n All values below the threshold will be set to zero.\n All values above or equal the threshold will be set to one.\n \n upper_threshold_eq: IMG THRESHOLD\n All values below or equal the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_otsu: IMG\n All values below or equal to the Otsu threshold will be set to zero.\n All values above the Otsu threshold will be set to one.\n (Otsu's method is an algorithm to perform automatic image thresholding\n of the background.)\n \n upper_threshold_otsu: IMG\n All values below the Otsu threshold will be set to one.\n All values above or equal to the Otsu threshold will be set to zero.\n Equivalent to lower_threshold_otsu followed by an inversion.\n \n lower_clip: IMG THRESHOLD\n All values below the threshold will be set to threshold.\n \n upper_clip: IMG THRESHOLD\n All values above the threshold will be set to threshold.\n \n absolute_value: IMG\n All negative values will become positive.\n \n round: IMG\n Round all decimal values to the closest integer.\n \n ceil: IMG\n Ceil all decimal values to the next integer.\n \n floor: IMG\n Floor all decimal values to the previous integer.\n \n normalize_sum: IMG\n Normalize the image so the sum of all values is one.\n \n normalize_max: IMG\n Normalize the image so the maximum value is one.\n \n log_10: IMG\n Apply a log (base 10) to all non zeros values of an image.\n \n log_e: IMG\n Apply a natural log to all non zeros values of an image.\n \n convert: IMG\n Perform no operation, but simply change the data type.\n \n invert: IMG\n Operation on binary image to interchange 0s and 1s in a binary mask.\n \n addition: IMGs\n Add multiple images together.\n \n subtraction: IMG_1 IMG_2\n Subtract first image by the second (IMG_1 - IMG_2).\n \n multiplication: IMGs\n Multiply multiple images together (danger of underflow and overflow)\n \n division: IMG_1 IMG_2\n Divide first image by the second (danger of underflow and overflow)\n Ignore zeros values, excluded from the operation.\n \n mean: IMGs\n Compute the mean of images.\n If a single 4D image is provided, average along the last dimension.\n \n std: IMGs\n Compute the standard deviation average of multiple images.\n If a single 4D image is provided, compute the STD along the last\n dimension.\n \n correlation: IMGs\n Computes the correlation of the 3x3x3 neighborhood of each voxel, for\n all pair of input images. The final image is the average correlation\n (through all pairs).\n For a given pair of images\n - Background is considered as 0. May lead to very high correlations\n close to the border of the background regions, or very poor ones if the\n background in both images differ.\n - Images are zero-padded. For the same reason as higher, may lead to\n very high correlations if you have data close to the border of the\n image.\n - NaN values (if a voxel's neighborhood is entirely uniform; std 0) are\n replaced by\n - 0 if at least one neighborhood was entirely containing background.\n - 1 if the voxel's neighborhoods are uniform in both images\n - 0 if the voxel's neighborhoods is uniform in one image, but not\n the other.\n\n UPDATE AS OF VERSION 2.0: Random noise was previously added in the\n process to help avoid NaN values. Now replaced by either 0 or 1 as\n explained above.\n \n union: IMGs\n Operation on binary image to keep voxels, that are non-zero, in at\n least one file.\n \n intersection: IMGs\n Operation on binary image to keep the voxels, that are non-zero,\n are present in all files.\n \n difference: IMG_1 IMG_2\n Operation on binary image to keep voxels from the first file that are\n not in the second file (non-zeros).\n \n concatenate: IMGs\n Concatenate a list of 3D and 4D images into a single 4D image.\n \n dilation: IMG, VALUE\n Binary morphological operation to spatially extend the values of an\n image to their neighbors. VALUE is in voxels: an integer > 0.\n \n erosion: IMG, VALUE\n Binary morphological operation to spatially shrink the volume contained\n in a binary image. VALUE is in voxels: an integer > 0.\n \n closing: IMG, VALUE\n Binary morphological operation, dilation followed by an erosion.\n \n opening: IMG, VALUE\n Binary morphological operation, erosion followed by a dilation.\n \n blur: IMG, VALUE\n Apply a gaussian blur to a single image. VALUE is sigma, the standard\n deviation of the Gaussian kernel.\n \n\npositional arguments:\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference,concatenate,dilation,erosion,closing,opening,blur}\n The type of operation to be performed on the images.\n in_args The list of image files or parameters. Refer to each operation's documentation of the expected arguments.\n out_image Output image path.\n\noptions:\n -h, --help show this help message and exit\n --data_type DATA_TYPE\n Data type of the output image. Use the format: \n uint8, int16, int/float32, int/float64.\n --exclude_background Does not affect the background of the original images.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_volume_remove_outliers_ransac", + "docstring": "Remove outliers from image using the RANSAC algorithm.\nThe RANSAC algorithm parameters are sensitive to the input data.\n\nNOTE: Current default parameters are tuned for ad/md/rd images only.\n\nFormerly: scil_remove_outliers_ransac.py", + "help": "usage: scil_volume_remove_outliers_ransac.py [-h] [--min_fit MIN_FIT]\n [--max_iter MAX_ITER]\n [--fit_thr FIT_THR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image\n\nRemove outliers from image using the RANSAC algorithm.\nThe RANSAC algorithm parameters are sensitive to the input data.\n\nNOTE: Current default parameters are tuned for ad/md/rd images only.\n\nFormerly: scil_remove_outliers_ransac.py\n\npositional arguments:\n in_image Nifti image.\n out_image Corrected Nifti image.\n\noptions:\n -h, --help show this help message and exit\n --min_fit MIN_FIT The minimum number of data values required to fit the model. [50]\n --max_iter MAX_ITER The maximum number of iterations allowed in the algorithm. [1000]\n --fit_thr FIT_THR Threshold value for determining when a data point fits a model. [0.01]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_volume_resample", + "docstring": "Script to resample a dataset to match the resolution of another\nreference dataset or to the resolution specified as in argument.\n\nFormerly: scil_resample_volume.py", + "help": "usage: scil_volume_resample.py [-h]\n (--ref REF | --volume_size VOLUME_SIZE [VOLUME_SIZE ...] | --voxel_size VOXEL_SIZE [VOXEL_SIZE ...] | --iso_min)\n [--interp {nn,lin,quad,cubic}]\n [--enforce_dimensions]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image\n\nScript to resample a dataset to match the resolution of another\nreference dataset or to the resolution specified as in argument.\n\nFormerly: scil_resample_volume.py\n\npositional arguments:\n in_image Path of the input volume.\n out_image Path of the resampled volume.\n\noptions:\n -h, --help show this help message and exit\n --ref REF Reference volume to resample to.\n --volume_size VOLUME_SIZE [VOLUME_SIZE ...]\n Sets the size for the volume. If the value is set to is Y, it will resample to a shape of Y x Y x Y.\n --voxel_size VOXEL_SIZE [VOXEL_SIZE ...]\n Sets the voxel size. If the value is set to is Y, it will set a voxel size of Y x Y x Y.\n --iso_min Resample the volume to R x R x R with R being the smallest current voxel dimension.\n --interp {nn,lin,quad,cubic}\n Interpolation mode.\n nn: nearest neighbour\n lin: linear\n quad: quadratic\n cubic: cubic\n Defaults to linear\n --enforce_dimensions Enforce the reference volume dimension.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_volume_reshape_to_reference", + "docstring": "Reshape / reslice / resample *.nii or *.nii.gz using a reference.\nThis script can be used to align freesurfer/civet output, as .mgz,\nto the original input image.\n\n>>> scil_volume_reshape_to_reference.py wmparc.mgz t1.nii.gz wmparc_t1.nii.gz\\\n --interpolation nearest\n\nFormerly: scil_reshape_to_reference.py", + "help": "usage: scil_volume_reshape_to_reference.py [-h]\n [--interpolation {linear,nearest}]\n [--keep_dtype]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file in_ref_file out_file\n\nReshape / reslice / resample *.nii or *.nii.gz using a reference.\nThis script can be used to align freesurfer/civet output, as .mgz,\nto the original input image.\n\n>>> scil_volume_reshape_to_reference.py wmparc.mgz t1.nii.gz wmparc_t1.nii.gz\\\n --interpolation nearest\n\nFormerly: scil_reshape_to_reference.py\n\npositional arguments:\n in_file Path of the image (.nii or .mgz) to be reshaped.\n in_ref_file Path of the reference image (.nii).\n out_file Output filename of the reshaped image (.nii).\n\noptions:\n -h, --help show this help message and exit\n --interpolation {linear,nearest}\n Interpolation: \"linear\" or \"nearest\". [linear]\n --keep_dtype If True, keeps the data_type of the input image (in_file) when saving the output image (out_file).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_volume_stats_in_ROI", + "docstring": "Compute the statistics (mean, std) of scalar maps, which can represent\ndiffusion metrics, in a ROI. Prints the results.\n\nThe mask can either be a binary mask, or a weighting mask. If the mask is\na weighting mask it should either contain floats between 0 and 1 or should be\nnormalized with --normalize_weights. IMPORTANT: if the mask contains weights\n(and not 0 and 1 exclusively), the standard deviation will also be weighted.", + "help": "usage: scil_volume_stats_in_ROI.py [-h]\n (--metrics_dir dir | --metrics file [file ...])\n [--bin] [--normalize_weights]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_mask\n\nCompute the statistics (mean, std) of scalar maps, which can represent\ndiffusion metrics, in a ROI. Prints the results.\n\nThe mask can either be a binary mask, or a weighting mask. If the mask is\na weighting mask it should either contain floats between 0 and 1 or should be\nnormalized with --normalize_weights. IMPORTANT: if the mask contains weights\n(and not 0 and 1 exclusively), the standard deviation will also be weighted.\n\npositional arguments:\n in_mask Mask volume filename.\n Can be a binary mask or a weighted mask.\n\noptions:\n -h, --help show this help message and exit\n --bin If set, will consider every value of the mask higherthan 0 to be \n part of the mask (equivalent weighting for every voxel).\n --normalize_weights If set, the weights will be normalized to the [0,1] range.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics input options:\n --metrics_dir dir Name of the directory containing metrics files: we will \n load all nifti files.\n --metrics file [file ...]\n Metrics nifti filename. List of the names of the metrics file, \n in nifti format.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [], + "keywords": [] + }, + { + "name": "scil_volume_stats_in_labels", + "docstring": "Computes the information from the input map for each cortical region\n(corresponding to an atlas).\n\nHint: For instance, this script could be useful if you have a seed map from a\nspecific bundle, to know from which regions it originated.\n\nFormerly: scil_compute_seed_by_labels.py", + "help": "usage: scil_volume_stats_in_labels.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_labels in_labels_lut in_map\n\nComputes the information from the input map for each cortical region\n(corresponding to an atlas).\n\nHint: For instance, this script could be useful if you have a seed map from a\nspecific bundle, to know from which regions it originated.\n\nFormerly: scil_compute_seed_by_labels.py\n\npositional arguments:\n in_labels Path of the input label file.\n in_labels_lut Path of the LUT file corresponding to labels,used to name the regions of interest.\n in_map Path of the input map file. Expecting a 3D file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [], + "keywords": [] + } + ] +} \ No newline at end of file From 83b70885020072faaa24386705385238fc9d36a9 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Fri, 21 Jun 2024 09:00:07 -0400 Subject: [PATCH 09/69] search by synonyms --- scilpy-bot-scripts/contextual_search.py | 60 + scilpy-bot-scripts/generate_synonyms.py | 90 + .../json_files/Scilpy_vocabulary.txt | 431 + scilpy-bot-scripts/json_files/acronyms.json | 1298 + .../json_files/knowledge_base.json | 43388 +++++++++++++++- .../json_files/knowledge_base_word2vec.json | 9715 ++++ scripts/scil_search_keywords.py | 33 +- 7 files changed, 54844 insertions(+), 171 deletions(-) create mode 100644 scilpy-bot-scripts/contextual_search.py create mode 100644 scilpy-bot-scripts/generate_synonyms.py create mode 100644 scilpy-bot-scripts/json_files/Scilpy_vocabulary.txt create mode 100644 scilpy-bot-scripts/json_files/acronyms.json create mode 100644 scilpy-bot-scripts/json_files/knowledge_base_word2vec.json diff --git a/scilpy-bot-scripts/contextual_search.py b/scilpy-bot-scripts/contextual_search.py new file mode 100644 index 000000000..5890f80aa --- /dev/null +++ b/scilpy-bot-scripts/contextual_search.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import json +import spacy +from pathlib import Path + +# Initialize SpaCy +nlp = spacy.load('en_core_web_md') + +def load_knowledge_base(json_file): + """Load the knowledge base from a JSON file.""" + with open(json_file, 'r') as f: + knowledge_base = json.load(f) + return knowledge_base + +def contextual_search(query, knowledge_base, threshold=0.1): + """Perform a contextual search based on the user query.""" + query_doc = nlp(query) + best_match = None + highest_similarity = 0 + + for script in knowledge_base['scripts']: + # Combine docstring, help text, synonyms, and keywords for better matching + description = ( + script['docstring'] + ' ' + script['help'] + ' ' + + ' '.join(script['synonyms']) + ' ' + ' '.join(script['keywords']) + ) + description_doc = nlp(description) + similarity = query_doc.similarity(description_doc) + if similarity > highest_similarity and similarity>threshold: + highest_similarity = similarity + best_match = script + + return best_match, highest_similarity + +def main(): + base_dir = Path(__file__).parent + + json_file = base_dir / 'json_files' / 'knowledge_base.json' + + # Load the knowledge base from JSON file + knowledge_base = load_knowledge_base(json_file) + + # Example user query + query = "I need a script that computes the SH coefficient directly on the raw DWI signal." + + + # Perform contextual search + best_match, similarity = contextual_search(query, knowledge_base) + + if best_match: + print(f"The best match is {best_match['name']} with a similarity score of {similarity:.2f}") + print(f"Docstring: {best_match['docstring']}") + print(f"Help: {best_match['help']}") + else: + print("No relevant script found.") + +if __name__ == '__main__': + main() diff --git a/scilpy-bot-scripts/generate_synonyms.py b/scilpy-bot-scripts/generate_synonyms.py new file mode 100644 index 000000000..a7354ea4f --- /dev/null +++ b/scilpy-bot-scripts/generate_synonyms.py @@ -0,0 +1,90 @@ +import json +import gensim.downloader as api +from scipy.spatial.distance import cosine +import re +from pathlib import Path + +#Load vocabulary and Acronyms +def load_vocabulary(vocab_file_path): + with open(vocab_file_path, 'r', encoding='utf-8') as file: + vocabulary = [line.strip() for line in file] + return vocabulary + +def load_acronyms(acronyms_file_path): + with open(acronyms_file_path, 'r', encoding='utf-8') as file: + acronyms = json.load(file) + return {entry['abbreviation']: entry['Description'] for entry in acronyms} + +#load pre-trained word vectors +word_vectors = api.load("word2vec-google-news-300") + +#calculate similarity and find synonyms +def get_word_embedding(word): + if word in word_vectors: + return word_vectors[word] + return None + +def calculate_similarity(word1, word2): + embedding1 = get_word_embedding(word1) + embedding2 = get_word_embedding(word2) + if embedding1 is not None and embedding2 is not None: + return 1 - cosine(embedding1, embedding2) + return 0 + +def find_synonyms(word, vocabulary, acronyms_dict, threshold=0.7): + synonyms = [] + for vocab_word in vocabulary: + # Check if it's an acronym + if vocab_word.startswith('*'): + acronym = vocab_word[1:] + if acronym in acronyms_dict: + description = acronyms_dict[acronym] + description_words = description.split() + for desc_word in description_words: + similarity = calculate_similarity(word, desc_word) + if similarity >= threshold: + synonyms.append(vocab_word) + break + else: + similarity = calculate_similarity(word, vocab_word) + if similarity >= threshold: + synonyms.append(vocab_word) + return synonyms + +def extract_words(text): + return re.findall(r'\w+', text.lower()) + + +def generate_synonyms(script_entry, vocabulary, acronyms_dict): + words = set(extract_words(script_entry["docstring"]) + extract_words(script_entry["help"])) + synonyms_dict = {} + for word in words: + synonyms = find_synonyms(word, vocabulary, acronyms_dict) + if len(synonyms) != 0: + synonyms.append(word) + #synonyms_dict[word] = synonyms + script_entry['synonyms'].append(synonyms) + return script_entry + +def update_scripts_with_synonyms(json_filepath, vocabulary, acronyms_dict): + with open(json_filepath, 'r', encoding='utf-8') as file: + data = json.load(file) + + for script_entry in data['scripts']: + script_entry['synonyms'] = [] # Initialize the synonyms list + updated_script = generate_synonyms(script_entry, vocabulary, acronyms_dict) + + with open(json_filepath, 'w', encoding='utf-8') as file: + json.dump(data, file, indent=4) + + +base_dir = Path(__file__).parent + +vocab_filepath = base_dir/'json_files'/'Scilpy_vocabulary.txt' +acronyms_filepath = base_dir/'json_files'/'acronyms.json' +json_filepath = base_dir/'json_files'/'knowledge_base_word2vec.json' +vocabulary = load_vocabulary(vocab_filepath) +acronyms_dict = load_acronyms(acronyms_filepath) + +update_scripts_with_synonyms(json_filepath, vocabulary, acronyms_dict) +print(f"Scripts in {json_filepath} have been updated with synonyms.") diff --git a/scilpy-bot-scripts/json_files/Scilpy_vocabulary.txt b/scilpy-bot-scripts/json_files/Scilpy_vocabulary.txt new file mode 100644 index 000000000..299367400 --- /dev/null +++ b/scilpy-bot-scripts/json_files/Scilpy_vocabulary.txt @@ -0,0 +1,431 @@ +*ATS +Allin +*BLS +BrainMap +Brainnetome +*CDL +*CIN +*CVL +Catani +Conclusion +Connections +*DDI +Data +Dejerine +Diffusion +Experiment +Fan +Figure +Figures +Frontal +Functionally +Gage +Gyrus +*HCP +*IFG +*INS +*IPL +*ITG +Imaging +Laird +Lancaster +Lobe +Longitudinal +*MFG +*MME +*MRI +*MTG +Maier +Mandonnet +MesL +*MesLS +Middle +Neher +*OFG +*PCG +*PCUN +*PaG +Pandya +*PoCG +*ROIs +*RVA +*SFG +*SLS +*SPL +Sarubbo +Schotten +Sporns +Structurally +*TOIs +TractoFind +Tractofind +Trends +acted +action +active +affective +algorithm +analysis +anatomical +anatomically +anatomy +animal +anterior +application +applied +arcuate +area +areas +articulating +assigned +association +atlas +attention +atypical +average +awareness +axial +axon +axonal +axons +basal +based +binary +blue +body +brain +brainnetome +branch +bundles +callosum +capsule +categorized +caudal +cell +cingulum +clear +clinical +cognition +commissural +complex +complexes +comprised +comprising +compromised +conditions +connect +connected +connecting +connection +connectional +connections +connectivity +connectome +connects +considered +consistently +contrast +coronal +corpus +cortex +cortical +create +*dMRI +dark +data +database +defined +degeneration +degree +depicted +desires +difference +differences +diffusion +direction +discovery +disease +disorder +dissection +dissimilar +divergence +dominant +dorsal +dorsally +dorsolateral +echoed +emotion +emotional +episodic +error +examine +examined +examining +executing +exist +experience +experiment +exploration +false +fasciculus +fibre +fibres +form +forms +frontal +fronto +function +functional +functionally +functions +fundamental +future +general +genu +global +greater +green +grey +gyrus +healthy +held +hemisphere +hemispheres +hemispheric +high +higher +highest +highly +homologous +human +humans +image +imaging +imagining +implicated +inconsistent +increase +indicating +individual +individuals +inferior +inferiorly +inhibiting +insight +insights +insula +insular +integrate +integrated +integrating +internal +intersected +invalid +involved +key +keywords +knowledge +lack +language +large +larger +lateral +learning +left +level +limb +limbic +limitation +limitations +limiting +literature +lobe +lobes +lobule +long +longitudinal +loss +main +manner +maps +matrices +matter +meaning +medial +memory +mesial +meta +methodologies +methodology +methods +midbody +middle +mique +mirror +mortem +motion +motor +movement +movements +mémique +méthique +naming +network +networks +neuroanatomy +neuron +nuclei +nucleus +observation +observed +observing +occipital +occurrence +occurrent +orbital +order +orientation +oriented +overt +pain +papers +paracentral +parameter +parameters +parcels +parietal +participant +participants +pass +pathological +pathway +pathways +perception +planes +population +positive +positives +possibility +post +postcentral +posterior +potential +precentral +precision +presented +primary +principal +probabilistic +probability +process +processes +processing +project +projecting +projection +proposed +question +random +ranged +reasoning +red +region +regions +reliability +reliable +rendered +rendering +reported +represent +resolved +result +role +rooted +rostroventral +*sTOI +sTOIs +sagittal +scientific +seeding +sensory +sexual +shape +signal +similarity +size +social +somatosensation +space +spatial +specific +step +streamline +streamlines +structural +structurally +structure +structures +studies +study +subject +subjects +sublobe +sublobes +subsequently +sulcus +superior +superiorly +supported +surviving +systematic +systems +tasting +taxonomy +temporal +terminations +thalamus +thinking +threshold +thresholds +tool +total +tracing +tracking +tract +tractogram +tractograms +tractography +tracts +traditionally +trajectory +transverse +traverse +trend +true +understanding +unique +unknown +utilized +vTOI +*vTOIs +valid +validation +variability +variable +variance +variety +ventral +ventrolateral +vertically +view +views +visual +vivo +voies +volume +volumes +voxel +weighted +white +work +working diff --git a/scilpy-bot-scripts/json_files/acronyms.json b/scilpy-bot-scripts/json_files/acronyms.json new file mode 100644 index 000000000..ed35b9834 --- /dev/null +++ b/scilpy-bot-scripts/json_files/acronyms.json @@ -0,0 +1,1298 @@ +[ + { + "abbreviation": "k-nn", + "Description": "k-nearest neighbors" + }, + { + "abbreviation": "1d", + "Description": "one-dimensional" + }, + { + "abbreviation": "2d", + "Description": "two-dimensional" + }, + { + "abbreviation": "3d", + "Description": "three-dimensional" + }, + { + "abbreviation": "ac", + "Description": "anterior commissure" + }, + { + "abbreviation": "ae", + "Description": "autoencoder" + }, + { + "abbreviation": "af", + "Description": "arcuate fascicle" + }, + { + "abbreviation": "ai", + "Description": "artificial intelligence" + }, + { + "abbreviation": "ann", + "Description": "artificial neural network" + }, + { + "abbreviation": "ar", + "Description": "acoustic radiation" + }, + { + "abbreviation": "atr", + "Description": "anterior thalamic radiation" + }, + { + "abbreviation": "cc", + "Description": "corpus callosum" + }, + { + "abbreviation": "cing", + "Description": "cingulum" + }, + { + "abbreviation": "cinta", + "Description": "clustering in tractography using autoencoders" + }, + { + "abbreviation": "cnn", + "Description": "convolutional neural network" + }, + { + "abbreviation": "csd", + "Description": "constrained spherical deconvolution" + }, + { + "abbreviation": "csf", + "Description": "cerebrospinal fluid" + }, + { + "abbreviation": "cst", + "Description": "corticospinal tract" + }, + { + "abbreviation": "dl", + "Description": "deep learning" + }, + { + "abbreviation": "dmri", + "Description": "diffusion magnetic resonance imaging" + }, + { + "abbreviation": "dodf", + "Description": "diffusion orientation distribution function" + }, + { + "abbreviation": "dt", + "Description": "diffusion tensor" + }, + { + "abbreviation": "dti", + "Description": "diffusion tensor imaging" + }, + { + "abbreviation": "dw-mri", + "Description": "diffusion-weighted magnetic resonance imaging" + }, + { + "abbreviation": "dwi", + "Description": "diffusion-weighted imaging" + }, + { + "abbreviation": "dwm", + "Description": "deep white matter" + }, + { + "abbreviation": "eap", + "Description": "ensemble average (diffusion) propagator" + }, + { + "abbreviation": "fa", + "Description": "fractional anisotropy" + }, + { + "abbreviation": "fat", + "Description": "frontal aslant tract" + }, + { + "abbreviation": "fc", + "Description": "fully connected" + }, + { + "abbreviation": "finta", + "Description": "filtering in tractography using autoencoders" + }, + { + "abbreviation": "fmri", + "Description": "functional magnetic resonance imaging" + }, + { + "abbreviation": "fod", + "Description": "fiber orientation distribution" + }, + { + "abbreviation": "fodf", + "Description": "fiber orientation distribution function" + }, + { + "abbreviation": "ft", + "Description": "fourier transform" + }, + { + "abbreviation": "fx", + "Description": "fornix" + }, + { + "abbreviation": "ge", + "Description": "gradient echo" + }, + { + "abbreviation": "gesta", + "Description": "generative sampling in bundle tractography using autoencoders" + }, + { + "abbreviation": "gm", + "Description": "gray matter" + }, + { + "abbreviation": "hardi", + "Description": "high angular resolution diffusion imaging" + }, + { + "abbreviation": "ic", + "Description": "internal capsule" + }, + { + "abbreviation": "icp", + "Description": "inferior cerebellar peduncle" + }, + { + "abbreviation": "ifof", + "Description": "inferior fronto-occipital fascicle" + }, + { + "abbreviation": "ils", + "Description": "inferior longitudinal system" + }, + { + "abbreviation": "jif", + "Description": "journal impact factor" + }, + { + "abbreviation": "mcp", + "Description": "middle cerebellar peduncle" + }, + { + "abbreviation": "ml", + "Description": "machine learning" + }, + { + "abbreviation": "mlp", + "Description": "multilayer perceptron" + }, + { + "abbreviation": "mls", + "Description": "middle longitudinal system" + }, + { + "abbreviation": "mr", + "Description": "magnetic resonance" + }, + { + "abbreviation": "mri", + "Description": "magnetic resonance imaging" + }, + { + "abbreviation": "nn", + "Description": "neural network" + }, + { + "abbreviation": "nos", + "Description": "number of streamlines" + }, + { + "abbreviation": "odf", + "Description": "orientation distribution function (also referred to as orientation density function)" + }, + { + "abbreviation": "or", + "Description": "optic radiation" + }, + { + "abbreviation": "pc", + "Description": "posterior commissure" + }, + { + "abbreviation": "pca", + "Description": "principal component analysis" + }, + { + "abbreviation": "pdf", + "Description": "probability density function" + }, + { + "abbreviation": "pgse", + "Description": "pulsed-gradient spin echo" + }, + { + "abbreviation": "pli", + "Description": "polarized light imaging" + }, + { + "abbreviation": "popt", + "Description": "parieto-occipital pontine tract" + }, + { + "abbreviation": "ps-oct", + "Description": "polarization-sensitive optical coherence tomography" + }, + { + "abbreviation": "rf", + "Description": "radio frequency" + }, + { + "abbreviation": "rnn", + "Description": "recurrent neural network" + }, + { + "abbreviation": "roc", + "Description": "receiver operating characteristic" + }, + { + "abbreviation": "scp", + "Description": "superior cerebellar peduncle" + }, + { + "abbreviation": "sd", + "Description": "spherical deconvolution" + }, + { + "abbreviation": "se", + "Description": "spin echo" + }, + { + "abbreviation": "set", + "Description": "surface-enhanced tractography" + }, + { + "abbreviation": "sls", + "Description": "superior longitudinal system" + }, + { + "abbreviation": "smri", + "Description": "structural magnetic resonance imaging" + }, + { + "abbreviation": "swm", + "Description": "superficial white matter" + }, + { + "abbreviation": "t1-w", + "Description": "t1-weighted image" + }, + { + "abbreviation": "te", + "Description": "echo time" + }, + { + "abbreviation": "tr", + "Description": "repetition time" + }, + { + "abbreviation": "uf", + "Description": "uncinate fascicle" + }, + { + "abbreviation": "vae", + "Description": "variational autoencoder" + }, + { + "abbreviation": "wm", + "Description": "white matter" + }, + { + "abbreviation": "3d", + "Description": "three dimensions" + }, + { + "abbreviation": "4d", + "Description": "four dimensions" + }, + { + "abbreviation": "act", + "Description": "anatomically-constrained tractography" + }, + { + "abbreviation": "amico", + "Description": "accelerated microstructure imaging via convex optimization" + }, + { + "abbreviation": "apm", + "Description": "average pathlength map" + }, + { + "abbreviation": "bet", + "Description": "brain extraction tool" + }, + { + "abbreviation": "cdmri", + "Description": "computational diffusion mri" + }, + { + "abbreviation": "cg", + "Description": "cingulum" + }, + { + "abbreviation": "cmc", + "Description": "continuous maps criterion" + }, + { + "abbreviation": "commit", + "Description": "convex optimization modeling for microstructure informed tractography" + }, + { + "abbreviation": "csa", + "Description": "constant solid-angle" + }, + { + "abbreviation": "csf/lcs/lcr", + "Description": "cerebrospinal fluid" + }, + { + "abbreviation": "c-dec", + "Description": "connectivity directionally-encoded color" + }, + { + "abbreviation": "dec", + "Description": "directionally-encoded color" + }, + { + "abbreviation": "dwi", + "Description": "diffusion-weighted imaging" + }, + { + "abbreviation": "dmri", + "Description": "diffusion-weighted imaging" + }, + { + "abbreviation": "irmd", + "Description": "diffusion-weighted imaging" + }, + { + "abbreviation": "eap", + "Description": "ensemble average propagator" + }, + { + "abbreviation": "epi", + "Description": "echo-planar imaging" + }, + { + "abbreviation": "fast", + "Description": "fmrib\u2019s automated segmentation tool" + }, + { + "abbreviation": "flirt", + "Description": "fmrib\u2019s linear image registration tool" + }, + { + "abbreviation": "fmt", + "Description": "fast marching tractography" + }, + { + "abbreviation": "fsl", + "Description": "fmrib software library" + }, + { + "abbreviation": "grappa", + "Description": "generalized autocalibrating partially parallel acquisition" + }, + { + "abbreviation": "ifof", + "Description": "inferior fronto-occipital fasciculus" + }, + { + "abbreviation": "ipmi", + "Description": "information processing in medical imaging" + }, + { + "abbreviation": "ismrm", + "Description": "international society for magnetic resonance in medicine" + }, + { + "abbreviation": "miccai", + "Description": "medical image computing and computer assisted intervention" + }, + { + "abbreviation": "mprage", + "Description": "magnetization-prepared rapid acquisition with gradient-echo" + }, + { + "abbreviation": "irm", + "Description": "magnetic resonance imaging" + }, + { + "abbreviation": "mri", + "Description": "magnetic resonance imaging" + }, + { + "abbreviation": "odf", + "Description": "orientation distribution function" + }, + { + "abbreviation": "ohbm", + "Description": "organization for human brain mapping" + }, + { + "abbreviation": "pve", + "Description": "partial volume estimation" + }, + { + "abbreviation": "roi", + "Description": "region of interest" + }, + { + "abbreviation": "rtt", + "Description": "real-time tractography" + }, + { + "abbreviation": "sh", + "Description": "spherical harmonics" + }, + { + "abbreviation": "slf", + "Description": "superior longitudinal fasciculus" + }, + { + "abbreviation": "snr", + "Description": "signal-to-noise ratio" + }, + { + "abbreviation": "twi", + "Description": "track-weighted imaging" + }, + { + "abbreviation": "voi", + "Description": "volume of interest" + }, + { + "abbreviation": "ats", + "Description": "anterior traverse system" + }, + { + "abbreviation": "a123", + "Description": "area 1/2/3 (upper limb, head, and face region)" + }, + { + "abbreviation": "a4hf", + "Description": "area 4 (head and face region)" + }, + { + "abbreviation": "a4ul", + "Description": "area 4 (upper limb region)" + }, + { + "abbreviation": "a46", + "Description": "area 46" + }, + { + "abbreviation": "af", + "Description": "arcuate fasciculus" + }, + { + "abbreviation": "bls", + "Description": "basal longitudinal system" + }, + { + "abbreviation": "ca39", + "Description": "caudal area 39" + }, + { + "abbreviation": "cdl", + "Description": "caudal dorsolateral area" + }, + { + "abbreviation": "cvl", + "Description": "caudal ventrolateral area" + }, + { + "abbreviation": "cdl", + "Description": "caudolateral of area " + }, + { + "abbreviation": "csf", + "Description": "cerebral spinal fluid" + }, + { + "abbreviation": "ctoi", + "Description": "conservative tract of interest" + }, + { + "abbreviation": "da9/36", + "Description": "dorsal area 9/46" + }, + { + "abbreviation": "ddi", + "Description": "dorsal dysgranular insula" + }, + { + "abbreviation": "dl6", + "Description": "dorsolateral area 6" + }, + { + "abbreviation": "dl37", + "Description": "dorsolateral area 37 region" + }, + { + "abbreviation": "efc", + "Description": "extreme/external capsule fibre system" + }, + { + "abbreviation": "fodfs", + "Description": "fibre orientation distribution functions" + }, + { + "abbreviation": "fus", + "Description": "fusiform gyrus" + }, + { + "abbreviation": "hcp", + "Description": "human connectome project" + }, + { + "abbreviation": "ifg", + "Description": "inferior frontal gyrus" + }, + { + "abbreviation": "ils ", + "Description": "inferior longitudinal system " + }, + { + "abbreviation": "ipl", + "Description": "inferior parietal lobe" + }, + { + "abbreviation": "itg", + "Description": "inferior temporal gyrus" + }, + { + "abbreviation": "ins", + "Description": "insula" + }, + { + "abbreviation": "ipa ", + "Description": "intraparietal area " + }, + { + "abbreviation": "la", + "Description": "lateral area" + }, + { + "abbreviation": "locc", + "Description": "lateral occipital cortex" + }, + { + "abbreviation": "cin", + "Description": "limbic lobe/cingulum" + }, + { + "abbreviation": "mme", + "Description": "mean millimetre error" + }, + { + "abbreviation": "mvocc", + "Description": "medioventral occipital cortex" + }, + { + "abbreviation": "mlf", + "Description": "medial longitudinal fasciculus" + }, + { + "abbreviation": "mesls", + "Description": "mesial longitudinal system" + }, + { + "abbreviation": "mfg", + "Description": "middle frontal gyrus" + }, + { + "abbreviation": "midls", + "Description": "middle longitudinal systems" + }, + { + "abbreviation": "mlf", + "Description": "middle longitudinal fasciculus" + }, + { + "abbreviation": "mtg", + "Description": "middle temporal gyrus" + }, + { + "abbreviation": "mni", + "Description": "montreal neurological institute" + }, + { + "abbreviation": "opa", + "Description": "opercular area" + }, + { + "abbreviation": "ofg", + "Description": "orbital frontal gyrus" + }, + { + "abbreviation": "pag", + "Description": "paracentral gyrus" + }, + { + "abbreviation": "pft", + "Description": "particle-filter tractography" + }, + { + "abbreviation": "pocg", + "Description": "postcentral gyrus" + }, + { + "abbreviation": "pts", + "Description": "posterior traverse system" + }, + { + "abbreviation": "pcg", + "Description": "precentral gyrus" + }, + { + "abbreviation": "pcun", + "Description": "precuneus" + }, + { + "abbreviation": "rois", + "Description": "regions of interest" + }, + { + "abbreviation": "rda", + "Description": "rostrodorsal area" + }, + { + "abbreviation": "rva", + "Description": "rostroventral area" + }, + { + "abbreviation": "stoi", + "Description": "sublobe tract of interest" + }, + { + "abbreviation": "sfg", + "Description": "superior frontal gyrus" + }, + { + "abbreviation": "slf ", + "Description": "superior longitudinal fasciculus " + }, + { + "abbreviation": "sls ", + "Description": "superior longitudinal system" + }, + { + "abbreviation": "spl", + "Description": "superior parietal lobule" + }, + { + "abbreviation": "stl", + "Description": "superior temporal lobe" + }, + { + "abbreviation": "sma", + "Description": "supplementary motor area" + }, + { + "abbreviation": "tois", + "Description": "tracts of interest" + }, + { + "abbreviation": "t", + "Description": "tesla" + }, + { + "abbreviation": "uf", + "Description": "uncinate fasciculus" + }, + { + "abbreviation": "vtois", + "Description": "variable tract of interest" + }, + { + "abbreviation": "abs", + "Description": "atlas based segmentation" + }, + { + "abbreviation": "afd", + "Description": "apparent fibre density" + }, + { + "abbreviation": "ad", + "Description": "axial diffusivity" + }, + { + "abbreviation": "bids", + "Description": "brain imaging data structure" + }, + { + "abbreviation": "lcs", + "Description": "cerebrospinal fluid" + }, + { + "abbreviation": "dodf", + "Description": "diffusion orientation distribution function" + }, + { + "abbreviation": "flair", + "Description": "fluid-attenuated inversion recovery" + }, + { + "abbreviation": "frf", + "Description": "fibre response function" + }, + { + "abbreviation": "rd", + "Description": "radial diffusivity" + }, + { + "abbreviation": "rf", + "Description": "radio frequency" + }, + { + "abbreviation": "scil", + "Description": "sherbrooke connectivity imaging laboratory" + }, + { + "abbreviation": "sp", + "Description": "multiple sclerosis" + }, + { + "abbreviation": "cpu", + "Description": "central processing unit" + }, + { + "abbreviation": "frt", + "Description": "funk-radon transform" + }, + { + "abbreviation": "go", + "Description": "gigabyte" + }, + { + "abbreviation": "gpu", + "Description": "graphical processing unit" + }, + { + "abbreviation": "gru", + "Description": "gated recurrent unit" + }, + { + "abbreviation": "irm", + "Description": "magnetic resonance imaging" + }, + { + "abbreviation": "irmd", + "Description": "diffusion-weighted magnetic resonance imaging" + }, + { + "abbreviation": "lstm", + "Description": "long short-term memory network" + }, + { + "abbreviation": "md", + "Description": "mean diffusivity" + }, + { + "abbreviation": "ram", + "Description": "random access memory" + }, + { + "abbreviation": "rnn", + "Description": "recurrent neural network" + }, + { + "abbreviation": "3d-shore", + "Description": "three dimensional simple harmonic oscillator based reconstruction and estimation model" + }, + { + "abbreviation": "ae", + "Description": "angular error metric" + }, + { + "abbreviation": "cdf", + "Description": "cohen-daubechies-feauveau" + }, + { + "abbreviation": "cdsi", + "Description": "classical diffusion spectrum imaging model" + }, + { + "abbreviation": "cs", + "Description": "compressive sensing" + }, + { + "abbreviation": "csa", + "Description": "constant solid angle q-ball model" + }, + { + "abbreviation": "csd", + "Description": "constrained spherical deconvolution model" + }, + { + "abbreviation": "cv", + "Description": "cross validation" + }, + { + "abbreviation": "ddsi", + "Description": "diffusion spectrum imaging deconvolution model" + }, + { + "abbreviation": "dipy", + "Description": "diffusion in python software" + }, + { + "abbreviation": "dnc", + "Description": "difference in the number of fiber compartments metric" + }, + { + "abbreviation": "dsi", + "Description": "diffusion spectrum imaging model" + }, + { + "abbreviation": "dsi515", + "Description": "classical diffusion spectrum imaging acquisition scheme with 515 samples" + }, + { + "abbreviation": "dsistudio", + "Description": "dsi studio software" + }, + { + "abbreviation": "dti", + "Description": "diffusion tensor imaging model" + }, + { + "abbreviation": "dtk", + "Description": "diffusion toolkit software" + }, + { + "abbreviation": "dtwt", + "Description": "dual tree wavelet transform" + }, + { + "abbreviation": "dw", + "Description": "diffusion weighted" + }, + { + "abbreviation": "dwi", + "Description": "diffusion weighted imaging" + }, + { + "abbreviation": "dwt", + "Description": "discrete wavelet transform" + }, + { + "abbreviation": "fft", + "Description": "fast fourier transform" + }, + { + "abbreviation": "fodf", + "Description": "fiber orientation distribution function" + }, + { + "abbreviation": "ib", + "Description": "invalib bundles metric" + }, + { + "abbreviation": "idft", + "Description": "inverse discrete fourier transform" + }, + { + "abbreviation": "isbi", + "Description": "ieee international symposium on biomedical imaging" + }, + { + "abbreviation": "isbi2013", + "Description": "subset of the dataset from the hardi challenge at the conference isbi2013" + }, + { + "abbreviation": "isbi2013-full", + "Description": "dataset from the hardi challenge at the conference isbi2013" + }, + { + "abbreviation": "mgh-ucla hcp", + "Description": "(massachusetts general hospital - university of california, los angeles) human connectome project" + }, + { + "abbreviation": "nmse", + "Description": "normalized mean square error" + }, + { + "abbreviation": "odsi", + "Description": "optimal diffusion spectrum imaging model" + }, + { + "abbreviation": "pccoeff", + "Description": "pearson correlation coefficient" + }, + { + "abbreviation": "pdsi", + "Description": "plain diffusion spectrum imaging model" + }, + { + "abbreviation": "pgse", + "Description": "pulse-gradient spin-echo" + }, + { + "abbreviation": "qbi", + "Description": "q-ball imaging model" + }, + { + "abbreviation": "rip", + "Description": "restricted isometry property" + }, + { + "abbreviation": "sc", + "Description": "sampling scheme" + }, + { + "abbreviation": "sense", + "Description": "sensitivity encoding algorithm" + }, + { + "abbreviation": "swt", + "Description": "stationary wavelet transform" + }, + { + "abbreviation": "tv", + "Description": "total variation" + }, + { + "abbreviation": "vb", + "Description": "valid bundles metric" + }, + { + "abbreviation": "vccr", + "Description": "valid connections to connection ratio" + }, + { + "abbreviation": "wu-minn hcp", + "Description": "(washington university, university of minnesota, and oxford university) human connectome project" + }, + { + "abbreviation": "2d", + "Description": "two dimensions" + }, + { + "abbreviation": "adc", + "Description": "apparent diffusion coefficient" + }, + { + "abbreviation": "aim", + "Description": "medical imaging axis" + }, + { + "abbreviation": "eeg", + "Description": "electroencephalography" + }, + { + "abbreviation": "chus", + "Description": "centre hospitalier universitaire de sherbrooke" + }, + { + "abbreviation": "cims", + "Description": "centre d\u2019imagerie mol\u00e9culaire de sherbrooke" + }, + { + "abbreviation": "crchus", + "Description": "centre de recherche du centre hospitalier universitaire de sherbrooke" + }, + { + "abbreviation": "fmr", + "Description": "mass-stiffness flow" + }, + { + "abbreviation": "fcm", + "Description": "mean-curvature flow" + }, + { + "abbreviation": "hr", + "Description": "high resolution" + }, + { + "abbreviation": "irmf", + "Description": "functional magnetic resonance imaging" + }, + { + "abbreviation": "meg", + "Description": "magnetoencephalography" + }, + { + "abbreviation": "psf", + "Description": "point spread function" + }, + { + "abbreviation": "roi", + "Description": "regions of interest" + }, + { + "abbreviation": "rgb", + "Description": "red, green and blue" + }, + { + "abbreviation": "rmn", + "Description": "nuclear magnetic resonance" + }, + { + "abbreviation": "sdp", + "Description": "semi-definite positive" + }, + { + "abbreviation": "snr", + "Description": "signal to noise ratio" + }, + { + "abbreviation": "tms", + "Description": "transcranial magnetic stimulation" + }, + { + "abbreviation": "wm", + "Description": "white matter" + }, + { + "abbreviation": "ad", + "Description": "alzheimer\u2019s disease" + }, + { + "abbreviation": "adni", + "Description": "alzheimer\u2019s disease neuroimaging initiative" + }, + { + "abbreviation": "bst", + "Description": "bundle-specific tractography" + }, + { + "abbreviation": "cnn", + "Description": "convolutional neural network" + }, + { + "abbreviation": "csc", + "Description": "continuous map criterion" + }, + { + "abbreviation": "dci", + "Description": "diffusion compartment imaging" + }, + { + "abbreviation": "dki", + "Description": "diffusion kurtosis imaging" + }, + { + "abbreviation": "edp", + "Description": "partial differential equation" + }, + { + "abbreviation": "mact", + "Description": "mesh anatomically-constrained tractography" + }, + { + "abbreviation": "mci", + "Description": "mild cognitive impairment" + }, + { + "abbreviation": "nc", + "Description": "normal control group" + }, + { + "abbreviation": "pft", + "Description": "particle filtering tractography" + }, + { + "abbreviation": "ping", + "Description": "pediatric imaging, neurocognition, and genetics" + }, + { + "abbreviation": "pve", + "Description": "partial volume effect" + }, + { + "abbreviation": "se", + "Description": "special euclidean group" + }, + { + "abbreviation": "sc", + "Description": "subcortical structures" + }, + { + "abbreviation": "sf", + "Description": "spherical function" + }, + { + "abbreviation": "tod", + "Description": "tract orientation density" + }, + { + "abbreviation": "act", + "Description": "anatomically constrained tractography" + }, + { + "abbreviation": "ad", + "Description": "alzheimer's disease" + }, + { + "abbreviation": "adni", + "Description": "alzheimer's disease neuroimaging initiative" + }, + { + "abbreviation": "ba", + "Description": "bundle adjacency" + }, + { + "abbreviation": "balsa", + "Description": "brain analysis library of spatial maps and atlases" + }, + { + "abbreviation": "boi", + "Description": "bundle of interest" + }, + { + "abbreviation": "clarity", + "Description": "clear lipid-exchanged acrylamide-hybridized rigid imaging / immunostaining / in situ-hybridization-compatible tissue hydrogel" + }, + { + "abbreviation": "cmc", + "Description": "continuous map criterion" + }, + { + "abbreviation": "cpu", + "Description": "central processing unit" + }, + { + "abbreviation": "dsc", + "Description": "dice score coefficient" + }, + { + "abbreviation": "eadc", + "Description": "european alzheimer\u2019s disease consortium" + }, + { + "abbreviation": "fat", + "Description": "fronto aslant tract" + }, + { + "abbreviation": "frf", + "Description": "fiber response function" + }, + { + "abbreviation": "efod", + "Description": "enhanced fiber orientation distribution" + }, + { + "abbreviation": "harp", + "Description": "harmonized hippocampal protocol" + }, + { + "abbreviation": "hc", + "Description": "healthy control" + }, + { + "abbreviation": "hcp", + "Description": "human connectome project" + }, + { + "abbreviation": "ifof", + "Description": "inferior fronto-occipital fasciculus" + }, + { + "abbreviation": "iqr", + "Description": "interquartile range" + }, + { + "abbreviation": "mri", + "Description": "magnetic resonance imaging" + }, + { + "abbreviation": "irm", + "Description": "magnetic resonance imaging" + }, + { + "abbreviation": "dmri", + "Description": "diffusion magnetic resonance imaging" + }, + { + "abbreviation": "irmd", + "Description": "diffusion magnetic resonance imaging" + }, + { + "abbreviation": "fmri", + "Description": "functional magnetic resonance imaging" + }, + { + "abbreviation": "irmf", + "Description": "functional magnetic resonance imaging" + }, + { + "abbreviation": "mdf", + "Description": "minimal direct-flip distance" + }, + { + "abbreviation": "ml", + "Description": "machine learning" + }, + { + "abbreviation": "oct", + "Description": "optical coherence tomography" + }, + { + "abbreviation": "pft", + "Description": "particle filter tractography" + }, + { + "abbreviation": "pve", + "Description": "partial volume effect" + }, + { + "abbreviation": "pyt", + "Description": "pyramidal tract" + }, + { + "abbreviation": "qb", + "Description": "quickbundles" + }, + { + "abbreviation": "ram", + "Description": "random access memory" + }, + { + "abbreviation": "rb(x)", + "Description": "recobundles(x)" + }, + { + "abbreviation": "mci", + "Description": "mild cognitive impairment" + }, + { + "abbreviation": "nmr", + "Description": "nuclear magnetic resonance" + }, + { + "abbreviation": "set", + "Description": "surface enhanced tractography" + }, + { + "abbreviation": "sfof", + "Description": "superior fronto-occipital fasciculus" + }, + { + "abbreviation": "slr", + "Description": "streamlines linear registration" + }, + { + "abbreviation": "tdi", + "Description": "tract density imaging" + }, + { + "abbreviation": "todi", + "Description": "tract orientation density imaging" + } +] \ No newline at end of file diff --git a/scilpy-bot-scripts/json_files/knowledge_base.json b/scilpy-bot-scripts/json_files/knowledge_base.json index e5172d4c4..ee5f949f3 100644 --- a/scilpy-bot-scripts/json_files/knowledge_base.json +++ b/scilpy-bot-scripts/json_files/knowledge_base.json @@ -4,1183 +4,44233 @@ "name": "scil_NODDI_maps", "docstring": "Compute NODDI [1] maps using AMICO.\nMulti-shell DWI necessary.\n\nFormerly: scil_compute_NODDI.py", "help": "usage: scil_NODDI_maps.py [-h] [--mask MASK] [--out_dir OUT_DIR]\n [--tolerance tol] [--skip_b0_check]\n [--para_diff PARA_DIFF] [--iso_diff ISO_DIFF]\n [--lambda1 LAMBDA1] [--lambda2 LAMBDA2]\n [--save_kernels DIRECTORY | --load_kernels DIRECTORY]\n [--compute_only] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec\n\nCompute NODDI [1] maps using AMICO.\nMulti-shell DWI necessary.\n\nFormerly: scil_compute_NODDI.py\n\npositional arguments:\n in_dwi DWI file acquired with a NODDI compatible protocol (single-shell data not suited).\n in_bval b-values filename, in FSL format (.bval).\n in_bvec b-vectors filename, in FSL format (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Brain mask filename.\n --out_dir OUT_DIR Output directory for the NODDI results. [results]\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nModel options:\n --para_diff PARA_DIFF\n Axial diffusivity (AD) in the CC. [0.0017]\n --iso_diff ISO_DIFF Mean diffusivity (MD) in ventricles. [0.003]\n --lambda1 LAMBDA1 First regularization parameter. [0.5]\n --lambda2 LAMBDA2 Second regularization parameter. [0.001]\n\nKernels options:\n --save_kernels DIRECTORY\n Output directory for the COMMIT kernels.\n --load_kernels DIRECTORY\n Input directory where the COMMIT kernels are located.\n --compute_only Compute kernels only, --save_kernels must be used.\n\nReference:\n [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC.\n NODDI: practical in vivo neurite orientation dispersion\n and density imaging of the human brain.\n NeuroImage. 2012 Jul 16;61:1000-16.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "animal", + "human", + "human" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "matter", + "question", + "thinking", + "understand" + ], + [ + "orientation", + "orientation" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "parameter", + "parameters", + "parameter" + ], + [ + "methods", + "use" + ], + [ + "work", + "find" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "area", + "located" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "thinking", + "i" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "axial", + "axial" + ], + [ + "methods", + "using" + ], + [ + "future", + "current" + ], + [ + "exist", + "exists" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "vivo", + "vivo" + ], + [ + "imaging", + "imaging" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "area", + "work", + "where" + ], + [ + "result", + "results" + ], + [ + "order", + "allow" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "step", + "start" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "maps", + "maps" + ], + [ + "order", + "necessary" + ] + ], "keywords": [] }, { "name": "scil_NODDI_priors", "docstring": "Compute the axial (para_diff), radial (perp_diff), and mean (iso_diff)\ndiffusivity priors for NODDI.\n\nFormerly: scil_compute_NODDI_priors.py", "help": "usage: scil_NODDI_priors.py [-h] [--fa_min_single_fiber FA_MIN_SINGLE_FIBER]\n [--fa_max_ventricles FA_MAX_VENTRICLES]\n [--md_min_ventricles MD_MIN_VENTRICLES]\n [--roi_radius ROI_RADIUS]\n [--roi_center pos pos pos]\n [--out_txt_1fiber_para FILE]\n [--out_txt_1fiber_perp FILE]\n [--out_mask_1fiber FILE]\n [--out_txt_ventricles FILE]\n [--out_mask_ventricles FILE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_FA in_AD in_RD in_MD\n\nCompute the axial (para_diff), radial (perp_diff), and mean (iso_diff)\ndiffusivity priors for NODDI.\n\nFormerly: scil_compute_NODDI_priors.py\n\npositional arguments:\n in_FA Path to the FA volume.\n in_AD Path to the axial diffusivity (AD) volume.\n in_RD Path to the radial diffusivity (RD) volume.\n in_MD Path to the mean diffusivity (MD) volume.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics options:\n --fa_min_single_fiber FA_MIN_SINGLE_FIBER\n Minimal threshold of FA (voxels above that threshold are considered in \n the single fiber mask). [0.7]\n --fa_max_ventricles FA_MAX_VENTRICLES\n Maximal threshold of FA (voxels under that threshold are considered in \n the ventricles). [0.1]\n --md_min_ventricles MD_MIN_VENTRICLES\n Minimal threshold of MD in mm2/s (voxels above that threshold are considered \n for in the ventricles). [0.003]\n\nRegions options:\n --roi_radius ROI_RADIUS\n Radius of the region used to estimate the priors. The roi will be a cube spanning \n from ROI_CENTER in each direction. [20]\n --roi_center pos pos pos\n Center of the roi of size roi_radius used to estimate the priors; a 3-value coordinate. \n If not set, uses the center of the 3D volume.\n\nOutputs:\n --out_txt_1fiber_para FILE\n Output path for the text file containing the single fiber average value of AD.\n If not set, the file will not be saved.\n --out_txt_1fiber_perp FILE\n Output path for the text file containing the single fiber average value of RD.\n If not set, the file will not be saved.\n --out_mask_1fiber FILE\n Output path for single fiber mask. If not set, the mask will not be saved.\n --out_txt_ventricles FILE\n Output path for the text file containing the ventricles average value of MD.\n If not set, the file will not be saved.\n --out_mask_ventricles FILE\n Output path for the ventricule mask.\n If not set, the mask will not be saved.\n\nReference:\n [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC.\n NODDI: practical in vivo neurite orientation dispersion and density\n imaging of the human brain. NeuroImage. 2012 Jul 16;61:1000-16.\n", - "synonyms": [], + "synonyms": [ + [ + "animal", + "human", + "human" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "orientation", + "orientation" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "fibre", + "fiber" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "areas", + "region", + "regions", + "regions" + ], + [ + "direction", + "direction" + ], + [ + "order", + "set" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "average", + "average" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "axial", + "axial" + ], + [ + "methods", + "using" + ], + [ + "vivo", + "vivo" + ], + [ + "imaging", + "imaging" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "individual", + "each" + ], + [ + "larger", + "size", + "size" + ], + [ + "lack", + "minimal" + ], + [ + "level", + "above" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "area", + "areas", + "region", + "regions", + "region" + ] + ], "keywords": [] }, { "name": "scil_aodf_metrics", "docstring": "Script to compute various metrics derivated from asymmetric ODF.\n\nThese metrics include the asymmetric peak directions image, a number of fiber\ndirections (nufid) map [1], the asymmetry index (ASI) map [2] and an odd-power\nmap [3].\n\nThe asymmetric peak directions image contains peaks per hemisphere, considering\nantipodal sphere directions as distinct. On a symmetric signal, the number of\nasymmetric peaks extracted is then twice the number of symmetric peaks.\n\nThe nufid map is the asymmetric alternative to NuFO maps. It counts the\nnumber of asymmetric peaks extracted and ranges in [0..N] with N the maximum\nnumber of peaks.\n\nThe asymmetric index is a cosine-based metric in the range [0..1], with 0\ncorresponding to a perfectly symmetric signal and 1 to a perfectly asymmetric\nsignal.\n\nThe odd-power map is also in the range [0..1], with 0 corresponding to a\nperfectly symmetric signal and 1 to a perfectly anti-symmetric signal. It is\ngiven as the ratio of the L2-norm of odd SH coefficients on the L2-norm of all\nSH coefficients.\n\nFormerly: scil_compute_asym_odf_metrics.py", "help": "usage: scil_aodf_metrics.py [-h] [--mask MASK] [--asi_map ASI_MAP]\n [--odd_power_map ODD_POWER_MAP] [--peaks PEAKS]\n [--peak_values PEAK_VALUES]\n [--peak_indices PEAK_INDICES] [--nufid NUFID]\n [--not_all] [--at A_THRESHOLD] [--rt R_THRESHOLD]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--processes NBR]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh\n\nScript to compute various metrics derivated from asymmetric ODF.\n\nThese metrics include the asymmetric peak directions image, a number of fiber\ndirections (nufid) map [1], the asymmetry index (ASI) map [2] and an odd-power\nmap [3].\n\nThe asymmetric peak directions image contains peaks per hemisphere, considering\nantipodal sphere directions as distinct. On a symmetric signal, the number of\nasymmetric peaks extracted is then twice the number of symmetric peaks.\n\nThe nufid map is the asymmetric alternative to NuFO maps. It counts the\nnumber of asymmetric peaks extracted and ranges in [0..N] with N the maximum\nnumber of peaks.\n\nThe asymmetric index is a cosine-based metric in the range [0..1], with 0\ncorresponding to a perfectly symmetric signal and 1 to a perfectly asymmetric\nsignal.\n\nThe odd-power map is also in the range [0..1], with 0 corresponding to a\nperfectly symmetric signal and 1 to a perfectly anti-symmetric signal. It is\ngiven as the ratio of the L2-norm of odd SH coefficients on the L2-norm of all\nSH coefficients.\n\nFormerly: scil_compute_asym_odf_metrics.py\n\npositional arguments:\n in_sh Input SH image.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Optional mask.\n --asi_map ASI_MAP Output asymmetry index (ASI) map.\n --odd_power_map ODD_POWER_MAP\n Output odd power map.\n --peaks PEAKS Output filename for the extracted peaks.\n --peak_values PEAK_VALUES\n Output filename for the extracted peaks values.\n --peak_indices PEAK_INDICES\n Output filename for the generated peaks indices on the sphere.\n --nufid NUFID Output filename for the nufid file.\n --not_all If set, only saves the files specified using the file flags [False].\n --at A_THRESHOLD Absolute threshold on fODF amplitude. This value should be set to\n approximately 1.5 to 2 times the maximum fODF amplitude in isotropic voxels\n (ie. ventricles).\n Use scil_fodf_max_in_ventricles.py to find the maximal value.\n See [Dell'Acqua et al HBM 2013] [0.0].\n --rt R_THRESHOLD Relative threshold on fODF amplitude in percentage [0.1].\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Sphere to use for peak directions estimation [symmetric724].\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] C. Poirier and M. Descoteaux, \"Filtering Methods for Asymmetric ODFs:\nWhere and How Asymmetry Occurs in the White Matter.\" bioRxiv. 2022 Jan 1;\n2022.12.18.520881. doi: https://doi.org/10.1101/2022.12.18.520881\n\n[2] S. Cetin Karayumak, E. \u00d6zarslan, and G. Unal,\n\"Asymmetric Orientation Distribution Functions (AODFs) revealing intravoxel\ngeometry in diffusion MRI,\" Magnetic Resonance Imaging, vol. 49, pp. 145-158,\nJun. 2018, doi: https://doi.org/10.1016/j.mri.2018.03.006.\n\n[3] C. Poirier, E. St-Onge, and M. Descoteaux, \"Investigating the Occurence of\nAsymmetric Patterns in White Matter Fiber Orientation Distribution Functions\"\n[Abstract], In: Proc. Intl. Soc. Mag. Reson. Med. 29 (2021), 2021 May 15-20,\nVancouver, BC, Abstract number 0865.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "average", + "per" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "orientation", + "orientation" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "examining", + "involved", + "investigating" + ], + [ + "unique", + "distinct" + ], + [ + "methods", + "use" + ], + [ + "work", + "find" + ], + [ + "order", + "set" + ], + [ + "direction", + "directions" + ], + [ + "long", + "a" + ], + [ + "average", + "percentage" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "specific", + "variety", + "various" + ], + [ + "diffusion", + "diffusion" + ], + [ + "imaging", + "mri" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "variety", + "include" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "matter", + "question", + "thinking", + "true", + "work", + "working", + "how" + ], + [ + "long", + "with" + ], + [ + "possibility", + "considering" + ], + [ + "parameters", + "specified" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "signal", + "signal" + ], + [ + "imaging", + "imaging" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "result", + "may" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "area", + "work", + "where" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "analysis", + "methodology", + "methods", + "processes", + "methods" + ], + [ + "step", + "start" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "false", + "false" + ], + [ + "maps", + "maps" + ], + [ + "function", + "functions", + "functions" + ], + [ + "based", + "based" + ], + [ + "maps", + "map" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "hemisphere", + "hemisphere" + ] + ], "keywords": [] }, { "name": "scil_bids_validate", "docstring": "Create a json file from a BIDS dataset detailling all info\nneeded for tractoflow\n- DWI/rev_DWI\n- T1\n- fmap/sbref (based on IntendedFor entity)\n- Freesurfer (optional - could be one per participant\n or one per participant/session)\n\nThe BIDS dataset MUST be homogeneous.\nThe metadata need to be uniform across all participants/sessions/runs\n\nMandatory entity: IntendedFor\nSensitive entities: PhaseEncodingDirection, TotalReadoutTime, direction\n\nFormerly: scil_validate_bids.py", "help": "usage: scil_bids_validate.py [-h] [--bids_ignore BIDS_IGNORE] [--fs FS]\n [--clean] [--readout READOUT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bids out_json\n\nCreate a json file from a BIDS dataset detailling all info\nneeded for tractoflow\n- DWI/rev_DWI\n- T1\n- fmap/sbref (based on IntendedFor entity)\n- Freesurfer (optional - could be one per participant\n or one per participant/session)\n\nThe BIDS dataset MUST be homogeneous.\nThe metadata need to be uniform across all participants/sessions/runs\n\nMandatory entity: IntendedFor\nSensitive entities: PhaseEncodingDirection, TotalReadoutTime, direction\n\nFormerly: scil_validate_bids.py\n\npositional arguments:\n in_bids Input BIDS folder.\n out_json Output json file.\n\noptions:\n -h, --help show this help message and exit\n --bids_ignore BIDS_IGNORE\n If you want to ignore some subjects or some files, you\n can provide an extra bidsignore file.Check:\n https://github.com/bids-standard/bids-\n validator#bidsignore\n --fs FS Output freesurfer path. It will add keys wmparc and\n aparc+aseg.\n --clean If set, it will remove all the participants that are\n missing any information.\n --readout READOUT Default total readout time value [0.062].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "average", + "per" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "subject", + "subjects", + "subjects" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "participant", + "participant" + ], + [ + "direction", + "direction" + ], + [ + "order", + "set" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "create" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "action", + "clear", + "considered", + "future", + "matter", + "possibility", + "potential", + "question", + "result", + "specific", + "any" + ], + [ + "long", + "result", + "work", + "working", + "time" + ], + [ + "create", + "provide" + ], + [ + "methods", + "using" + ], + [ + "areas", + "across" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "participants", + "participants" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "create", + "lack", + "step", + "work", + "working", + "need" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "based", + "based" + ], + [ + "clear", + "result", + "work", + "could" + ], + [ + "increase", + "total", + "total" + ], + [ + "large", + "work", + "some" + ] + ], "keywords": [] }, { "name": "scil_bingham_metrics", "docstring": "Script to compute fODF lobe-specific metrics derived from a Bingham\ndistribution fit, as described in [1]. Resulting metrics are fiber density\n(FD), fiber spread (FS) and fiber fraction (FF) [2].\n\nThe Bingham coefficients volume comes from scil_fodf_to_bingham.py.\n\nA lobe's FD is the integral of the Bingham function on the sphere. It\nrepresents the density of fibers going through a given voxel for a given\nfODF lobe (fixel). A lobe's FS is the ratio of its FD on its maximum AFD. It\nis at its minimum for a sharp lobe and at its maximum for a wide lobe. A lobe's\nFF is the ratio of its FD on the total FD in the voxel.\n\nUsing 12 threads, the execution takes 10 minutes for FD estimation for a brain\nwith 1mm isotropic resolution. Other metrics take less than a second.\n\nFormerly: scil_compute_lobe_specific_fodf_metrics.py", "help": "usage: scil_bingham_metrics.py [-h] [--out_fd OUT_FD] [--out_fs OUT_FS]\n [--out_ff OUT_FF] [--not_all] [--mask MASK]\n [--nbr_integration_steps NBR_INTEGRATION_STEPS]\n [-v [{DEBUG,INFO,WARNING}]] [--processes NBR]\n [-f]\n in_bingham\n\nScript to compute fODF lobe-specific metrics derived from a Bingham\ndistribution fit, as described in [1]. Resulting metrics are fiber density\n(FD), fiber spread (FS) and fiber fraction (FF) [2].\n\nThe Bingham coefficients volume comes from scil_fodf_to_bingham.py.\n\nA lobe's FD is the integral of the Bingham function on the sphere. It\nrepresents the density of fibers going through a given voxel for a given\nfODF lobe (fixel). A lobe's FS is the ratio of its FD on its maximum AFD. It\nis at its minimum for a sharp lobe and at its maximum for a wide lobe. A lobe's\nFF is the ratio of its FD on the total FD in the voxel.\n\nUsing 12 threads, the execution takes 10 minutes for FD estimation for a brain\nwith 1mm isotropic resolution. Other metrics take less than a second.\n\nFormerly: scil_compute_lobe_specific_fodf_metrics.py\n\npositional arguments:\n in_bingham Input Bingham nifti image.\n\noptions:\n -h, --help show this help message and exit\n --out_fd OUT_FD Path to output fiber density. [fd.nii.gz]\n --out_fs OUT_FS Path to output fiber spread. [fs.nii.gz]\n --out_ff OUT_FF Path to fiber fraction file. [ff.nii.gz]\n --not_all Do not compute all metrics. Then, please provide the output paths of the files you need.\n --mask MASK Optional mask image. Only voxels inside the mask are computed.\n --nbr_integration_steps NBR_INTEGRATION_STEPS\n Number of integration steps along the theta axis for fiber density estimation. [50]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -f Force overwriting of the output files.\n\n[1] T. W. Riffert, J. Schreiber, A. Anwander, and T. R. Kn\u00f6sche, \u201cBeyond\n fractional anisotropy: Extraction of bundle-specific structural metrics\n from crossing fiber models,\u201d NeuroImage, vol. 100, pp. 176-191, Oct. 2014,\n doi: 10.1016/j.neuroimage.2014.06.015.\n\n[2] J. Schreiber, T. Riffert, A. Anwander, and T. R. Kn\u00f6sche, \u201cPlausibility\n Tracking: A method to evaluate anatomical connectivity and microstructural\n properties along fiber pathways,\u201d NeuroImage, vol. 90, pp. 163-178, Apr.\n 2014, doi: 10.1016/j.neuroimage.2014.01.002.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "methods", + "method" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "total", + "90" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "brain", + "brain" + ], + [ + "meaning", + "derived" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "specific", + "specific" + ], + [ + "thinking", + "you" + ], + [ + "step", + "steps" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "anatomical", + "anatomical" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "defined", + "function", + "functional", + "functions", + "function" + ], + [ + "clear", + "order", + "step", + "work", + "take" + ], + [ + "pathway", + "pathways", + "pathways" + ], + [ + "long", + "with" + ], + [ + "total", + "50" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "create", + "provide" + ], + [ + "represent", + "represents" + ], + [ + "methods", + "using" + ], + [ + "structural", + "structural" + ], + [ + "variety", + "work", + "other" + ], + [ + "area", + "main", + "along" + ], + [ + "long", + "than" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "highly", + "less" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "result", + "resulting" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "fibres", + "fibers" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "create", + "lack", + "step", + "work", + "working", + "need" + ], + [ + "connectivity", + "connectivity" + ], + [ + "step", + "start" + ], + [ + "work", + "all" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "total", + "100" + ], + [ + "lobe", + "lobes", + "occipital", + "parietal", + "lobe" + ], + [ + "attention", + "comes" + ], + [ + "step", + "thinking", + "going" + ], + [ + "bundles", + "bundle" + ], + [ + "data", + "tracking", + "tracking" + ], + [ + "examine", + "evaluate" + ], + [ + "increase", + "total", + "total" + ], + [ + "function", + "integral" + ] + ], "keywords": [] }, { "name": "scil_btensor_metrics", "docstring": "Script to compute microstructure metrics using the DIVIDE method. In order to\noperate, the script needs at leats two different types of b-tensor encodings.\nNote that custom encodings are not yet supported, so that only the linear\ntensor encoding (LTE, b_delta = 1), the planar tensor encoding\n(PTE, b_delta = -0.5), the spherical tensor encoding (STE, b_delta = 0) and\nthe cigar shape tensor encoding (b_delta = 0.5) are available. Moreover, all\nof `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names. Thus, this\nscript outputs the results from the DIVIDE fit or direct derivatives:\nmean diffusivity (MD), isotropic mean kurtosis (mk_i), anisotropic mean\nkurtosis (mk_a), total mean kurtosis (mk_t) and finally micro-FA (uFA).\nSpecific names can be specified using the\nfile flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output. The order parameter can also be computed from the uFA and a\nprecomputed FA, using separate input parameters.\n\n>>> scil_btensor_metrics.py --in_dwis LTE.nii.gz PTE.nii.gz STE.nii.gz\n --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs LTE.bvec PTE.bvec STE.bvec\n --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nIMPORTANT: If the script does not converge to a solution, it is probably due to\nnoise outside the brain. Thus, it is strongly recommanded to provide a brain\nmask with --mask.\n\nBased on Markus Nilsson, Filip Szczepankiewicz, Bj\u00f6rn Lampinen, Andr\u00e9 Ahlgren,\nJo\u00e3o P. de Almeida Martins, Samo Lasic, Carl-Fredrik Westin,\nand Daniel Topgaard. An open-source framework for analysis of multidimensional\ndiffusion MRI data implemented in MATLAB.\nProc. Intl. Soc. Mag. Reson. Med. (26), Paris, France, 2018.\n\nFormerly: scil_compute_divide.py", "help": "usage: scil_btensor_metrics.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals\n IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS\n [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5}\n [{0,1,-0.5,0.5} ...] [--mask MASK]\n [--tolerance tol] [--skip_b0_check]\n [--fit_iters FIT_ITERS]\n [--random_iters RANDOM_ITERS]\n [--do_weight_bvals] [--do_weight_pa]\n [--do_multiple_s0] [--op OP] [--fa FA]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n [-f] [--not_all] [--md file] [--ufa file]\n [--mk_i file] [--mk_a file] [--mk_t file]\n\nScript to compute microstructure metrics using the DIVIDE method. In order to\noperate, the script needs at leats two different types of b-tensor encodings.\nNote that custom encodings are not yet supported, so that only the linear\ntensor encoding (LTE, b_delta = 1), the planar tensor encoding\n(PTE, b_delta = -0.5), the spherical tensor encoding (STE, b_delta = 0) and\nthe cigar shape tensor encoding (b_delta = 0.5) are available. Moreover, all\nof `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names. Thus, this\nscript outputs the results from the DIVIDE fit or direct derivatives:\nmean diffusivity (MD), isotropic mean kurtosis (mk_i), anisotropic mean\nkurtosis (mk_a), total mean kurtosis (mk_t) and finally micro-FA (uFA).\nSpecific names can be specified using the\nfile flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output. The order parameter can also be computed from the uFA and a\nprecomputed FA, using separate input parameters.\n\n>>> scil_btensor_metrics.py --in_dwis LTE.nii.gz PTE.nii.gz STE.nii.gz\n --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs LTE.bvec PTE.bvec STE.bvec\n --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nIMPORTANT: If the script does not converge to a solution, it is probably due to\nnoise outside the brain. Thus, it is strongly recommanded to provide a brain\nmask with --mask.\n\nBased on Markus Nilsson, Filip Szczepankiewicz, Bj\u00f6rn Lampinen, Andr\u00e9 Ahlgren,\nJo\u00e3o P. de Almeida Martins, Samo Lasic, Carl-Fredrik Westin,\nand Daniel Topgaard. An open-source framework for analysis of multidimensional\ndiffusion MRI data implemented in MATLAB.\nProc. Intl. Soc. Mag. Reson. Med. (26), Paris, France, 2018.\n\nFormerly: scil_compute_divide.py\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n Path to the input diffusion volume for each b-tensor encoding type.\n --in_bvals IN_BVALS [IN_BVALS ...]\n Path to the bval file, in FSL format, for each b-tensor encoding type.\n --in_bvecs IN_BVECS [IN_BVECS ...]\n Path to the bvec file, in FSL format, for each b-tensor encoding type.\n --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...]\n Value of b_delta for each b-tensor encoding type, in the same order as dwi, bval and bvec inputs.\n --mask MASK Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --fit_iters FIT_ITERS\n The number of time the gamma fit will be done [1]\n --random_iters RANDOM_ITERS\n The number of iterations for the initial parameters search. [50]\n --do_weight_bvals If set, does not do a weighting on the bvalues in the gamma fit.\n --do_weight_pa If set, does not do a powder averaging weighting in the gamma fit.\n --do_multiple_s0 If set, does not take into account multiple baseline signals.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --not_all If set, only saves the files specified using the file flags. (Default: False)\n\nOrder parameter (OP):\n --op OP Output filename for the order parameter. The OP will not be output if this is not given. Computation of the OP also requires a precomputed FA map (given using --fa).\n --fa FA Path to a FA map. Needed for calculating the OP.\n\nFile flags:\n --md file Output filename for the MD.\n --ufa file Output filename for the microscopic FA.\n --mk_i file Output filename for the isotropic mean kurtosis.\n --mk_a file Output filename for the anisotropic mean kurtosis.\n --mk_t file Output filename for the total mean kurtosis.\n", - "synonyms": [], + "synonyms": [ + [ + "individual", + "specific", + "unique", + "variety", + "different" + ], + [ + "total", + "number" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "methods", + "method" + ], + [ + "lack", + "result", + "due" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "matter", + "question", + "thinking", + "understand" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "considered", + "are" + ], + [ + "order", + "order" + ], + [ + "left", + "into" + ], + [ + "brain", + "brain" + ], + [ + "result", + "moreover" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "parameter", + "parameters", + "parameter" + ], + [ + "methods", + "use" + ], + [ + "work", + "find" + ], + [ + "order", + "set" + ], + [ + "long", + "have" + ], + [ + "specific", + "specific" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "diffusion", + "diffusion" + ], + [ + "imaging", + "mri" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "probably" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "clear", + "order", + "step", + "work", + "take" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "process", + "solution" + ], + [ + "long", + "with" + ], + [ + "parameters", + "specified" + ], + [ + "clear", + "long", + "matter", + "result", + "work", + "so" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "long", + "result", + "work", + "working", + "time" + ], + [ + "total", + "50" + ], + [ + "create", + "provide" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "future", + "current" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "exist", + "exists" + ], + [ + "signal", + "signals" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "supported", + "supported" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "matter", + "question", + "does" + ], + [ + "result", + "results" + ], + [ + "considered", + "form", + "meaning", + "order", + "result", + "thus" + ], + [ + "order", + "allow" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "long", + "two" + ], + [ + "work", + "working", + "done" + ], + [ + "step", + "start" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "step", + "true", + "work", + "yet" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "false", + "false" + ], + [ + "based", + "based" + ], + [ + "forms", + "specific", + "variety", + "types" + ], + [ + "maps", + "map" + ], + [ + "clear", + "matter", + "question", + "thinking", + "sure" + ], + [ + "supported", + "strongly" + ], + [ + "increase", + "total", + "total" + ], + [ + "shape", + "structure", + "shape" + ], + [ + "considered", + "key", + "work", + "important" + ] + ], "keywords": [] }, { "name": "scil_bundle_clean_qbx_clusters", "docstring": "Render clusters sequentially to either accept or reject them based on\nvisual inspection. Useful for cleaning bundles for RBx, BST or for figures.\nThe VTK window does not handle well opacity of streamlines, this is a\nnormal rendering behavior.\nOften use in pair with scil_tractogram_qbx.py.\n\nKey mapping:\n- a/A: accept displayed clusters\n- r/R: reject displayed clusters\n- z/Z: Rewing one element\n- c/C: Stop rendering of the background concatenation of streamlines\n- q/Q: Early window exist, everything remaining will be rejected", "help": "usage: scil_bundle_clean_qbx_clusters.py [-h]\n [--out_accepted_dir OUT_ACCEPTED_DIR]\n [--out_rejected_dir OUT_REJECTED_DIR]\n [--min_cluster_size MIN_CLUSTER_SIZE]\n [--background_opacity BACKGROUND_OPACITY]\n [--background_linewidth BACKGROUND_LINEWIDTH]\n [--clusters_linewidth CLUSTERS_LINEWIDTH]\n [--reference REFERENCE]\n [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...]\n out_accepted out_rejected\n\n Render clusters sequentially to either accept or reject them based on\n visual inspection. Useful for cleaning bundles for RBx, BST or for figures.\n The VTK window does not handle well opacity of streamlines, this is a\n normal rendering behavior.\n Often use in pair with scil_tractogram_qbx.py.\n\n Key mapping:\n - a/A: accept displayed clusters\n - r/R: reject displayed clusters\n - z/Z: Rewing one element\n - c/C: Stop rendering of the background concatenation of streamlines\n - q/Q: Early window exist, everything remaining will be rejected\n\npositional arguments:\n in_bundles List of the clusters filename.\n out_accepted Filename of the concatenated accepted clusters.\n out_rejected Filename of the concatenated rejected clusters.\n\noptions:\n -h, --help show this help message and exit\n --out_accepted_dir OUT_ACCEPTED_DIR\n Directory to save all accepted clusters separately.\n --out_rejected_dir OUT_REJECTED_DIR\n Directory to save all rejected clusters separately.\n --min_cluster_size MIN_CLUSTER_SIZE\n Minimum cluster size for consideration [1].Must be at least 1.\n --background_opacity BACKGROUND_OPACITY\n Opacity of the background streamlines.Keep low between 0 and 0.5 [0.1].\n --background_linewidth BACKGROUND_LINEWIDTH\n Linewidth of the background streamlines [1].\n --clusters_linewidth CLUSTERS_LINEWIDTH\n Linewidth of the current cluster [1].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundles" + ], + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "proposed", + "rejected" + ], + [ + "left", + "result", + "when" + ], + [ + "invalid", + "valid", + "invalid" + ], + [ + "rendered", + "rendering", + "rendering" + ], + [ + "methods", + "use" + ], + [ + "exist", + "exist" + ], + [ + "maps", + "mapping" + ], + [ + "long", + "a" + ], + [ + "key", + "main", + "key" + ], + [ + "held", + "on" + ], + [ + "tool", + "useful" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "traditionally", + "often" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "rendered", + "rendering", + "render" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "visual", + "visual" + ], + [ + "methods", + "using" + ], + [ + "future", + "current" + ], + [ + "thinking", + "everything" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "high", + "higher", + "level", + "low" + ], + [ + "larger", + "size", + "size" + ], + [ + "matter", + "question", + "does" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "based", + "based" + ], + [ + "considered", + "experience", + "large", + "long", + "result", + "variety", + "work", + "working", + "well" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_bundle_compute_centroid", "docstring": "Compute a single bundle centroid, using an 'infinite' QuickBundles threshold.\n\nFormerly: scil_compute_centroid.py", "help": "usage: scil_bundle_compute_centroid.py [-h] [--nb_points NB_POINTS]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_centroid\n\nCompute a single bundle centroid, using an 'infinite' QuickBundles threshold.\n\nFormerly: scil_compute_centroid.py\n\npositional arguments:\n in_bundle Fiber bundle file.\n out_centroid Output centroid streamline filename.\n\noptions:\n -h, --help show this help message and exit\n --nb_points NB_POINTS\n Number of points defining the centroid streamline[20].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "streamline", + "streamline" + ], + [ + "long", + "a" + ], + [ + "work", + "and" + ], + [ + "methods", + "using" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "bundles", + "bundle" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "is" + ] + ], "keywords": [] }, { "name": "scil_bundle_compute_endpoints_map", "docstring": "Computes the endpoint map of a bundle. The endpoint map is simply a count of\nthe number of streamlines that start or end in each voxel.\n\nThe idea is to estimate the cortical area affected by the bundle (assuming\nstreamlines start/end in the cortex).\n\nNote: If the streamlines are not ordered the head/tail are random and not\nreally two coherent groups. Use the following script to order streamlines:\nscil_tractogram_uniformize_endpoints.py\n\nFormerly: scil_compute_endpoints_map.py", "help": "usage: scil_bundle_compute_endpoints_map.py [-h] [--swap] [--binary]\n [--nb_points NB_POINTS]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle endpoints_map_head\n endpoints_map_tail\n\nComputes the endpoint map of a bundle. The endpoint map is simply a count of\nthe number of streamlines that start or end in each voxel.\n\nThe idea is to estimate the cortical area affected by the bundle (assuming\nstreamlines start/end in the cortex).\n\nNote: If the streamlines are not ordered the head/tail are random and not\nreally two coherent groups. Use the following script to order streamlines:\nscil_tractogram_uniformize_endpoints.py\n\nFormerly: scil_compute_endpoints_map.py\n\npositional arguments:\n in_bundle Fiber bundle filename.\n endpoints_map_head Output endpoints map head filename.\n endpoints_map_tail Output endpoints map tail filename.\n\noptions:\n -h, --help show this help message and exit\n --swap Swap head<->tail convention. Can be useful when the reference is not in RAS.\n --binary Save outputs as a binary mask instead of a heat map.\n --nb_points NB_POINTS\n Number of points to consider at the extremities of the streamlines. [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "long", + "end" + ], + [ + "total", + "number" + ], + [ + "thinking", + "really" + ], + [ + "anatomy", + "anatomy" + ], + [ + "work", + "and" + ], + [ + "result", + "following" + ], + [ + "considered", + "are" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "order", + "order" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "cortex", + "thalamus", + "cortex" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "tool", + "useful" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "random", + "random" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "cortical", + "cortical" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "thinking", + "simply" + ], + [ + "order", + "work", + "instead" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "possibility", + "question", + "thinking", + "true", + "view", + "idea" + ], + [ + "individual", + "each" + ], + [ + "long", + "two" + ], + [ + "step", + "start" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "maps", + "map" + ], + [ + "bundles", + "bundle" + ], + [ + "area", + "areas", + "region", + "area" + ], + [ + "considered", + "possibility", + "question", + "step", + "consider" + ] + ], "keywords": [] }, { "name": "scil_bundle_diameter", "docstring": "Script to estimate the diameter of bundle(s) along their length.\nSee also scil_bundle_shape_measures.py, which prints a quick estimate of\nthe diameter (volume / length). The computation here is more complex and done\nfor each section of the bundle.\n\nThe script expects:\n- bundles with coherent endpoints from scil_tractogram_uniformize_endpoints.py\n- labels maps with around 5-50 points scil_bundle_label_map.py\n <5 is not enough, high risk of bad fit\n >50 is too much, high risk of bad fit\n- bundles that are close to a tube\n without major fanning in a single axis\n fanning is in 2 directions (uniform dispersion) good approximation\n\nThe scripts prints a JSON file with mean/std to be compatible with tractometry.\nWARNING: STD is in fact an ERROR measure from the fit and NOT an STD.\n\nSince the estimation and fit quality is not always intuitive for some bundles\nand the tube with varying diameter is not easy to color/visualize,\nthe script comes with its own VTK rendering to allow exploration of the data.\n(optional).\n\nFormerly: scil_estimate_bundles_diameter.py", "help": "usage: scil_bundle_diameter.py [-h]\n [--fitting_func {lin_up,lin_down,exp,inv,log}]\n [--show_rendering | --save_rendering OUT_FOLDER]\n [--wireframe] [--error_coloring]\n [--width WIDTH] [--opacity OPACITY]\n [--win_dims WIDTH HEIGHT] [--background R G B]\n [--reference REFERENCE] [--indent INDENT]\n [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...] in_labels\n [in_labels ...]\n\nScript to estimate the diameter of bundle(s) along their length.\nSee also scil_bundle_shape_measures.py, which prints a quick estimate of\nthe diameter (volume / length). The computation here is more complex and done\nfor each section of the bundle.\n\nThe script expects:\n- bundles with coherent endpoints from scil_tractogram_uniformize_endpoints.py\n- labels maps with around 5-50 points scil_bundle_label_map.py\n <5 is not enough, high risk of bad fit\n >50 is too much, high risk of bad fit\n- bundles that are close to a tube\n without major fanning in a single axis\n fanning is in 2 directions (uniform dispersion) good approximation\n\nThe scripts prints a JSON file with mean/std to be compatible with tractometry.\nWARNING: STD is in fact an ERROR measure from the fit and NOT an STD.\n\nSince the estimation and fit quality is not always intuitive for some bundles\nand the tube with varying diameter is not easy to color/visualize,\nthe script comes with its own VTK rendering to allow exploration of the data.\n(optional).\n\nFormerly: scil_estimate_bundles_diameter.py\n\npositional arguments:\n in_bundles List of tractography files.\n in_labels List of labels maps that match the bundles.\n\noptions:\n -h, --help show this help message and exit\n --fitting_func {lin_up,lin_down,exp,inv,log}\n Function to weigh points using their distance.\n [Default: None]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nVisualization options:\n --show_rendering Display VTK window (optional).\n --save_rendering OUT_FOLDER\n Save VTK render in the specified folder (optional)\n --wireframe Use wireframe for the tube rendering.\n --error_coloring Use the fitting error to color the tube.\n --width WIDTH Width of tubes or lines representing streamlines\n [Default: 0.2]\n --opacity OPACITY Opacity for the streamlines rendered with the tube.\n [Default: 0.2]\n --win_dims WIDTH HEIGHT\n The dimensions for the vtk window. [(1920, 1080)]\n --background R G B RBG values [0, 255] of the color of the background.\n [Default: [1, 1, 1]]\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "true", + "always" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "bundles", + "bundles" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "considered", + "are" + ], + [ + "long", + "work", + "more" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "attention", + "experience", + "long", + "result", + "work", + "much" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "long", + "too" + ], + [ + "rendered", + "rendering", + "rendering" + ], + [ + "methods", + "use" + ], + [ + "rendered", + "rendering", + "rendered" + ], + [ + "direction", + "directions" + ], + [ + "long", + "a" + ], + [ + "represent", + "representing" + ], + [ + "held", + "on" + ], + [ + "exploration", + "exploration" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "create", + "enough" + ], + [ + "key", + "main", + "major" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "rendered", + "rendering", + "render" + ], + [ + "held", + "in" + ], + [ + "defined", + "function", + "functional", + "functions", + "function" + ], + [ + "long", + "with" + ], + [ + "parameters", + "specified" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "total", + "50" + ], + [ + "clear", + "experience", + "thinking", + "true", + "good" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "area", + "main", + "along" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "create", + "work", + "own" + ], + [ + "individual", + "each" + ], + [ + "order", + "allow" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "work", + "working", + "done" + ], + [ + "complex", + "structure", + "structures", + "complex" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "maps", + "maps" + ], + [ + "lack", + "quality" + ], + [ + "clear", + "considered", + "difference", + "lack", + "matter", + "question", + "result", + "subject", + "thinking", + "true", + "view", + "fact" + ], + [ + "attention", + "comes" + ], + [ + "error", + "error" + ], + [ + "bundles", + "bundle" + ], + [ + "potential", + "risk" + ], + [ + "result", + "since" + ], + [ + "large", + "work", + "some" + ] + ], "keywords": [] }, { "name": "scil_bundle_filter_by_occurence", "docstring": "Use multiple versions of a same bundle and detect the most probable voxels by\nusing a threshold on the occurence, voxel-wise. With threshold 0.5, this is\na majority vote. This is useful to generate an average representation from\nbundles of a given population.\n\nIf streamlines originate from the same tractogram (ex, to compare various\nbundle clustering techniques), streamline-wise vote is available to find the\nstreamlines most often included in the bundle.\n\nFormerly: scil_perform_majority_vote.py", "help": "usage: scil_bundle_filter_by_occurence.py [-h] [--ratio_voxels [RATIO_VOXELS]]\n [--ratio_streamlines [RATIO_STREAMLINES]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...]\n output_prefix\n\nUse multiple versions of a same bundle and detect the most probable voxels by\nusing a threshold on the occurence, voxel-wise. With threshold 0.5, this is\na majority vote. This is useful to generate an average representation from\nbundles of a given population.\n\nIf streamlines originate from the same tractogram (ex, to compare various\nbundle clustering techniques), streamline-wise vote is available to find the\nstreamlines most often included in the bundle.\n\nFormerly: scil_perform_majority_vote.py\n\npositional arguments:\n in_bundles Input bundles filename(s). All tractograms must have identical headers.\n output_prefix Output prefix. Ex: my_path/voting_. The suffixes will be: streamlines.trk and voxels.nii.gz\n\noptions:\n -h, --help show this help message and exit\n --ratio_voxels [RATIO_VOXELS]\n Threshold on the ratio of bundles with at least one streamine in a \n given voxel to consider it as part of the 'gold standard'. Default if set: 0.5.\n --ratio_streamlines [RATIO_STREAMLINES]\n If all bundles come from the same tractogram, use this to generate \n a voting for streamlines too. The associated value is the threshold on the ratio of \n bundles including the streamline to consider it as part of the 'gold standard'. [0.5]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "considered", + "highly", + "long", + "work", + "most" + ], + [ + "bundles", + "bundles" + ], + [ + "create", + "generate" + ], + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "long", + "too" + ], + [ + "methods", + "use" + ], + [ + "work", + "find" + ], + [ + "order", + "set" + ], + [ + "long", + "have" + ], + [ + "long", + "a" + ], + [ + "methods", + "techniques" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "tool", + "useful" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "step", + "work", + "come" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "specific", + "variety", + "various" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "traditionally", + "often" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "average", + "average" + ], + [ + "population", + "population" + ], + [ + "methods", + "using" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "streamline", + "streamline" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "area", + "main", + "work", + "part" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "bundles", + "bundle" + ], + [ + "considered", + "possibility", + "question", + "step", + "consider" + ] + ], "keywords": [] }, { "name": "scil_bundle_generate_priors", "docstring": "Generation of priors and enhanced-FOD from an example/template bundle.\nThe bundle must have been cleaned thorougly before use. The E-FOD can then\nbe used for bundle-specific tractography, but not for FOD metrics.\n\nFormerly: scil_generate_priors_from_bundle.py", "help": "usage: scil_bundle_generate_priors.py [-h]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--todi_sigma {0,1,2,3,4}]\n [--sf_threshold SF_THRESHOLD]\n [--out_prefix OUT_PREFIX]\n [--out_dir OUT_DIR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle in_fodf in_mask\n\nGeneration of priors and enhanced-FOD from an example/template bundle.\nThe bundle must have been cleaned thorougly before use. The E-FOD can then\nbe used for bundle-specific tractography, but not for FOD metrics.\n\nFormerly: scil_generate_priors_from_bundle.py\n\npositional arguments:\n in_bundle Input bundle filename.\n in_fodf Input FOD filename.\n in_mask Mask to constrain the TODI spatial smoothing,\n for example a WM mask.\n\noptions:\n -h, --help show this help message and exit\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --todi_sigma {0,1,2,3,4}\n Smooth the orientation histogram.\n --sf_threshold SF_THRESHOLD\n Relative threshold for sf masking (0.0-1.0).\n --out_prefix OUT_PREFIX\n Add a prefix to all output filename, \n default is no prefix.\n --out_dir OUT_DIR Output directory for all generated files,\n default is current directory.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Rheault, Francois, et al. \"Bundle-specific tractography with\n incorporated anatomical and orientational priors.\"\n NeuroImage 186 (2019): 382-398\n \n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "orientation", + "orientation" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "methods", + "use" + ], + [ + "long", + "have" + ], + [ + "specific", + "specific" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "anatomical", + "anatomical" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "considered", + "involved", + "work", + "been" + ], + [ + "methods", + "using" + ], + [ + "future", + "current" + ], + [ + "left", + "before" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "work", + "all" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "bundles", + "bundle" + ], + [ + "spatial", + "temporal", + "spatial" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_bundle_label_map", "docstring": "Compute the label image (Nifti) from a centroid and tractograms (all\nrepresenting the same bundle). The label image represents the coverage of\nthe bundle, segmented into regions labelled from 0 to --nb_pts, starting from\nthe head, ending in the tail.\n\nEach voxel will have the label of its nearest centroid point.\n\nThe number of labels will be the same as the centroid's number of points.\n\nFormerly: scil_compute_bundle_voxel_label_map.py", "help": "usage: scil_bundle_label_map.py [-h] [--nb_pts NB_PTS] [--colormap COLORMAP]\n [--new_labelling] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...] in_centroid\n out_dir\n\nCompute the label image (Nifti) from a centroid and tractograms (all\nrepresenting the same bundle). The label image represents the coverage of\nthe bundle, segmented into regions labelled from 0 to --nb_pts, starting from\nthe head, ending in the tail.\n\nEach voxel will have the label of its nearest centroid point.\n\nThe number of labels will be the same as the centroid's number of points.\n\nFormerly: scil_compute_bundle_voxel_label_map.py\n\npositional arguments:\n in_bundles Fiber bundle file.\n in_centroid Centroid streamline corresponding to bundle.\n out_dir Directory to save all mapping and coloring files:\n - correlation_map.nii.gz\n - session_x/labels_map.nii.gz\n - session_x/distance_map.nii.gz\n - session_x/correlation_map.nii.gz\n - session_x/labels.trk\n - session_x/distance.trk\n - session_x/correlation.trk\n Where session_x is numbered with each bundle.\n\noptions:\n -h, --help show this help message and exit\n --nb_pts NB_PTS Number of divisions for the bundles.\n Default is the number of points of the centroid.\n --colormap COLORMAP Select the colormap for colored trk (data_per_point) [jet].\n --new_labelling Use the new labelling method (multi-centroids).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundles" + ], + [ + "planes", + "jet" + ], + [ + "methods", + "method" + ], + [ + "work", + "and" + ], + [ + "left", + "into" + ], + [ + "difference", + "point" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "areas", + "region", + "regions", + "regions" + ], + [ + "methods", + "use" + ], + [ + "long", + "have" + ], + [ + "maps", + "mapping" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "represent", + "representing" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "blue", + "colored" + ], + [ + "long", + "with" + ], + [ + "represent", + "represents" + ], + [ + "methods", + "using" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "area", + "work", + "where" + ], + [ + "individual", + "each" + ], + [ + "streamline", + "streamline" + ], + [ + "work", + "all" + ], + [ + "bundles", + "bundle" + ] + ], "keywords": [] }, { "name": "scil_bundle_mean_fixel_afd", "docstring": "Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps along a bundle.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof the bundle provided, averaged at every voxel.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_fixel_afd_from_bundles.py", "help": "usage: scil_bundle_mean_fixel_afd.py [-h] [--length_weighting]\n [--reference REFERENCE]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle in_fodf afd_mean_map\n\nCompute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps along a bundle.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof the bundle provided, averaged at every voxel.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_fixel_afd_from_bundles.py\n\npositional arguments:\n in_bundle Path of the bundle file.\n in_fodf Path of the fODF volume in spherical harmonics (SH).\n afd_mean_map Path of the output mean AFD map.\n\noptions:\n -h, --help show this help message and exit\n --length_weighting If set, will weigh the AFD values according to segment lengths. [False]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReference:\n [1] Raffelt, D., Tournier, JD., Rose, S., Ridgway, GR., Henderson, R.,\n Crozier, S., Salvado, O., & Connelly, A. (2012).\n Apparent Fibre Density: a novel measure for the analysis of\n diffusion-weighted magnetic resonance images. NeuroImage, 59(4),\n 3976--3994.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "work", + "and" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "fibre", + "fibres", + "fibre" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "clear", + "considered", + "create", + "form", + "manner", + "matter", + "result", + "subject", + "thinking", + "true", + "view", + "work", + "rather" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "diffusion", + "diffusion" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "methods", + "using" + ], + [ + "area", + "main", + "along" + ], + [ + "long", + "than" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "streamline", + "streamline" + ], + [ + "weighted", + "weighted" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "false", + "false" + ], + [ + "maps", + "maps" + ], + [ + "based", + "reported", + "according" + ], + [ + "based", + "based" + ], + [ + "maps", + "map" + ], + [ + "bundles", + "bundle" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_bundle_mean_fixel_afd_from_hdf5", "docstring": "Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps for every connections within a hdf5 (.h5) file.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof each connection, averaged at every voxel.\n\nPlease use a hdf5 (.h5) file containing decomposed connections\n\nFormerly: scil_compute_fixel_afd_from_hdf5.py", "help": "usage: scil_bundle_mean_fixel_afd_from_hdf5.py [-h] [--length_weighting]\n [--processes NBR]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_hdf5 in_fodf out_hdf5\n\nCompute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps for every connections within a hdf5 (.h5) file.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof each connection, averaged at every voxel.\n\nPlease use a hdf5 (.h5) file containing decomposed connections\n\nFormerly: scil_compute_fixel_afd_from_hdf5.py\n\npositional arguments:\n in_hdf5 HDF5 filename (.h5) containing decomposed connections.\n in_fodf Path of the fODF volume in spherical harmonics (SH).\n out_hdf5 Path of the output HDF5 filenames (.h5).\n\noptions:\n -h, --help show this help message and exit\n --length_weighting If set, will weigh the AFD values according to segment lengths. [False]\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReference:\n [1] Raffelt, D., Tournier, JD., Rose, S., Ridgway, GR., Henderson, R.,\n Crozier, S., Salvado, O., & Connelly, A. (2012).\n Apparent Fibre Density: a novel measure for the analysis of\n diffusion-weighted magnetic resonance images. NeuroImage,\n 59(4), 3976--3994.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "work", + "and" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "fibre", + "fibres", + "fibre" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "diffusion", + "diffusion" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "methods", + "using" + ], + [ + "area", + "main", + "along" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "individual", + "each" + ], + [ + "connect", + "connected", + "connection", + "connections", + "connections" + ], + [ + "streamline", + "streamline" + ], + [ + "weighted", + "weighted" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "step", + "start" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "connection", + "connections", + "connection" + ], + [ + "false", + "false" + ], + [ + "maps", + "maps" + ], + [ + "based", + "reported", + "according" + ], + [ + "based", + "based" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_bundle_mean_fixel_bingham_metric", "docstring": "Given a bundle and Bingham coefficients, compute the average Bingham\nmetric at each voxel intersected by the bundle. Intersected voxels are\nfound by computing the intersection between the voxel grid and each streamline\nin the input tractogram.\n\nThis script behaves like scil_compute_mean_fixel_afd_from_bundles.py for fODFs,\nbut here for Bingham distributions. These add the unique possibility to capture\nfixel-based fiber spread (FS) and fiber fraction (FF). FD from the bingham\nshould be \"equivalent\" to the AFD_fixel we are used to.\n\nBingham coefficients volume must come from scil_fodf_to_bingham.py\nand Bingham metrics comes from scil_bingham_metrics.py.\n\nBingham metrics are extracted from Bingham distributions fitted to fODF. There\nare as many values per voxel as there are lobes extracted. The values chosen\nfor a given voxelis the one belonging to the lobe better aligned with the\ncurrent streamline segment.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_mean_fixel_obe_metric_from_bundles.py", "help": "usage: scil_bundle_mean_fixel_bingham_metric.py [-h] [--length_weighting]\n [--max_theta MAX_THETA]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bundle in_bingham\n in_bingham_metric out_mean_map\n\nGiven a bundle and Bingham coefficients, compute the average Bingham\nmetric at each voxel intersected by the bundle. Intersected voxels are\nfound by computing the intersection between the voxel grid and each streamline\nin the input tractogram.\n\nThis script behaves like scil_compute_mean_fixel_afd_from_bundles.py for fODFs,\nbut here for Bingham distributions. These add the unique possibility to capture\nfixel-based fiber spread (FS) and fiber fraction (FF). FD from the bingham\nshould be \"equivalent\" to the AFD_fixel we are used to.\n\nBingham coefficients volume must come from scil_fodf_to_bingham.py\nand Bingham metrics comes from scil_bingham_metrics.py.\n\nBingham metrics are extracted from Bingham distributions fitted to fODF. There\nare as many values per voxel as there are lobes extracted. The values chosen\nfor a given voxelis the one belonging to the lobe better aligned with the\ncurrent streamline segment.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_mean_fixel_obe_metric_from_bundles.py\n\npositional arguments:\n in_bundle Path of the bundle file.\n in_bingham Path of the Bingham volume.\n in_bingham_metric Path of the Bingham metric (FD, FS, or FF) volume.\n out_mean_map Path of the output mean map.\n\noptions:\n -h, --help show this help message and exit\n --length_weighting If set, will weigh the FD values according to segment lengths.\n --max_theta MAX_THETA\n Maximum angle (in degrees) condition on lobe alignment. [60]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "average", + "per" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "lobe", + "lobes", + "lobes" + ], + [ + "considered", + "are" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "considered", + "create", + "form", + "manner", + "matter", + "result", + "subject", + "thinking", + "true", + "view", + "work", + "rather" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "experience", + "thinking", + "work", + "working", + "better" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "step", + "work", + "come" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "average", + "average" + ], + [ + "methods", + "using" + ], + [ + "future", + "current" + ], + [ + "long", + "than" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "degree", + "degrees" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "intersected", + "intersected" + ], + [ + "unique", + "variety", + "unique" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "large", + "work", + "many" + ], + [ + "represent", + "chosen" + ], + [ + "individual", + "each" + ], + [ + "streamline", + "streamline" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "clear", + "long", + "question", + "result", + "work", + "there" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "total", + "60" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "lobe", + "lobes", + "occipital", + "parietal", + "lobe" + ], + [ + "based", + "reported", + "according" + ], + [ + "based", + "based" + ], + [ + "attention", + "comes" + ], + [ + "maps", + "map" + ], + [ + "bundles", + "bundle" + ], + [ + "future", + "possibility", + "potential", + "question", + "possibility" + ] + ], "keywords": [] }, { "name": "scil_bundle_mean_std", "docstring": "Compute mean and std for each metric.\n\n- Default: For the whole bundle. This is achieved by averaging the metric\n values of all voxels occupied by the bundle.\n- Option --per_point: For all streamlines points in the bundle for each metric\n combination, along the bundle, i.e. for each point.\n **To create label_map and distance_map, see\n scil_bundle_label_map.py\n\nDensity weighting modifies the contribution of voxel with lower/higher\nstreamline count to reduce influence of spurious streamlines.\n\nFormerly: scil_compute_bundle_mean_std_per_point.py or\nscil_compute_bundle_mean_std.py", "help": "usage: scil_bundle_mean_std.py [-h] [--per_point in_labels | --include_dps]\n [--density_weighting]\n [--distance_weighting DISTANCE_NII]\n [--correlation_weighting CORRELATION_NII]\n [--out_json OUT_JSON] [--reference REFERENCE]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n in_bundle in_metrics [in_metrics ...]\n\nCompute mean and std for each metric.\n\n- Default: For the whole bundle. This is achieved by averaging the metric\n values of all voxels occupied by the bundle.\n- Option --per_point: For all streamlines points in the bundle for each metric\n combination, along the bundle, i.e. for each point.\n **To create label_map and distance_map, see\n scil_bundle_label_map.py\n\nDensity weighting modifies the contribution of voxel with lower/higher\nstreamline count to reduce influence of spurious streamlines.\n\nFormerly: scil_compute_bundle_mean_std_per_point.py or\nscil_compute_bundle_mean_std.py\n\npositional arguments:\n in_bundle Fiber bundle file to compute statistics on.\n in_metrics Nifti file to compute statistics on. Probably some tractometry measure(s) such as FA, MD, RD, ...\n\noptions:\n -h, --help show this help message and exit\n --per_point in_labels\n If set, computes the metrics per point instead of on the whole bundle.\n You must then give the label map (.nii.gz) of the corresponding fiber bundle.\n --include_dps Save values from data_per_streamline.\n Currently not offered with option --per_point.\n --density_weighting If set, weights statistics by the number of fibers passing through each voxel.\n --distance_weighting DISTANCE_NII\n If set, weights statistics by the inverse of the distance between a streamline and the centroid.\n --correlation_weighting CORRELATION_NII\n If set, weight statistics by the correlation strength between longitudinal data.\n --out_json OUT_JSON Path of the output file. If not given, the output is simply printed on screen.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "pass", + "passing" + ], + [ + "average", + "per" + ], + [ + "work", + "and" + ], + [ + "form", + "combination" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "difference", + "point" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "order", + "set" + ], + [ + "thinking", + "you" + ], + [ + "high", + "higher", + "increase", + "level", + "higher" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "thinking", + "i" + ], + [ + "considered", + "probably" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "create" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "longitudinal", + "transverse", + "longitudinal" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "methods", + "using" + ], + [ + "thinking", + "simply" + ], + [ + "view", + "see" + ], + [ + "order", + "work", + "instead" + ], + [ + "area", + "main", + "along" + ], + [ + "streamlines", + "streamlines" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "clear", + "give" + ], + [ + "fibres", + "fibers" + ], + [ + "individual", + "each" + ], + [ + "higher", + "lower" + ], + [ + "streamline", + "streamline" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "maps", + "map" + ], + [ + "increase", + "reduce" + ], + [ + "bundles", + "bundle" + ], + [ + "large", + "work", + "some" + ], + [ + "considered", + "specific", + "variety", + "such" + ] + ], "keywords": [] }, { "name": "scil_bundle_pairwise_comparison", "docstring": "Evaluate pair-wise similarity measures of bundles.\nAll tractograms must be in the same space (aligned to one reference).\n\nFor the voxel representation, the computed similarity measures are:\n bundle_adjacency_voxels, dice_voxels, w_dice_voxels, density_correlation\n volume_overlap, volume_overreach\nThe same measures are also evluated for the endpoints.\n\nFor the streamline representation, the computed similarity measures are:\n bundle_adjacency_streamlines, dice_streamlines, streamlines_count_overlap,\n streamlines_count_overreach\n\nFormerly: scil_evaluate_bundles_pairwise_agreement_measures.py", "help": "usage: scil_bundle_pairwise_comparison.py [-h] [--streamline_dice]\n [--bundle_adjency_no_overlap]\n [--disable_streamline_distance]\n [--single_compare SINGLE_COMPARE]\n [--keep_tmp] [--ratio]\n [--processes NBR]\n [--reference REFERENCE]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...] out_json\n\nEvaluate pair-wise similarity measures of bundles.\nAll tractograms must be in the same space (aligned to one reference).\n\nFor the voxel representation, the computed similarity measures are:\n bundle_adjacency_voxels, dice_voxels, w_dice_voxels, density_correlation\n volume_overlap, volume_overreach\nThe same measures are also evluated for the endpoints.\n\nFor the streamline representation, the computed similarity measures are:\n bundle_adjacency_streamlines, dice_streamlines, streamlines_count_overlap,\n streamlines_count_overreach\n\nFormerly: scil_evaluate_bundles_pairwise_agreement_measures.py\n\npositional arguments:\n in_bundles Path of the input bundles.\n out_json Path of the output json file.\n\noptions:\n -h, --help show this help message and exit\n --streamline_dice Compute streamline-wise dice coefficient.\n Tractograms must be identical [False].\n --bundle_adjency_no_overlap\n If set, do not count zeros in the average BA.\n --disable_streamline_distance\n Will not compute the streamlines distance \n [False].\n --single_compare SINGLE_COMPARE\n Compare inputs to this single file.\n --keep_tmp Will not delete the tmp folder at the end.\n --ratio Compute overlap and overreach as a ratio over the\n reference tractogram in a Tractometer-style way.\n Can only be used if also using the `single_compare` option.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "long", + "end" + ], + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundles" + ], + [ + "clear", + "long", + "step", + "thinking", + "view", + "work", + "working", + "way" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "considered", + "are" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "order", + "set" + ], + [ + "long", + "over" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "space", + "space" + ], + [ + "held", + "in" + ], + [ + "average", + "average" + ], + [ + "methods", + "using" + ], + [ + "streamlines", + "streamlines" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "streamline", + "streamline" + ], + [ + "similarity", + "similarity" + ], + [ + "step", + "start" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "false", + "false" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "examine", + "evaluate" + ] + ], "keywords": [] }, { "name": "scil_bundle_reject_outliers", "docstring": "Clean a bundle (inliers/outliers) using hiearchical clustering.\nhttp://archive.ismrm.org/2015/2844.html\n\nIf spurious streamlines are dense, it is possible they will not be recognized\nas outliers. Manual cleaning may be required to overcome this limitation.", "help": "usage: scil_bundle_reject_outliers.py [-h]\n [--remaining_bundle REMAINING_BUNDLE]\n [--alpha ALPHA] [--display_counts]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_bundle\n\nClean a bundle (inliers/outliers) using hiearchical clustering.\nhttp://archive.ismrm.org/2015/2844.html\n\nIf spurious streamlines are dense, it is possible they will not be recognized\nas outliers. Manual cleaning may be required to overcome this limitation.\n\npositional arguments:\n in_bundle Fiber bundle file to remove outliers from.\n out_bundle Fiber bundle without outliers.\n\noptions:\n -h, --help show this help message and exit\n --remaining_bundle REMAINING_BUNDLE\n Removed outliers.\n --alpha ALPHA Percent of the length of the tree that clusters of individual streamlines will be pruned. [0.6]\n --display_counts Print streamline count before and after filtering\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "order", + "required" + ], + [ + "work", + "and" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "considered", + "are" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "considered", + "recognized" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "methods", + "using" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "left", + "after" + ], + [ + "streamlines", + "streamlines" + ], + [ + "left", + "before" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "result", + "may" + ], + [ + "individual", + "individuals", + "individual" + ], + [ + "limitation", + "limitations", + "limitation" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "streamline", + "streamline" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "higher", + "increase", + "percent" + ], + [ + "bundles", + "bundle" + ] + ], "keywords": [] }, { "name": "scil_bundle_score_many_bundles_one_tractogram", "docstring": "This script is intended to score all bundles from a single tractogram. Each\nvalid bundle is compared to its ground truth.\nEx: It was used for the ISMRM 2015 Challenge scoring.\n\nSee also scil_bundle_score_same_bundle_many_segmentations.py to score many\nversions of a same bundle, compared to ONE ground truth / gold standard.\n\nThis script is the second part of script scil_score_tractogram, which also\nsegments the wholebrain tractogram into bundles first.\n\nHere we suppose that the bundles are already segmented and saved as follows:\n main_dir/\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (optional)\n segmented_WPC/*_wpc.trk (optional)\n IS.trk OR NC.trk (if segmented_IB is present)\n\nConfig file\n-----------\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n }\n}\n\nFormerly: scil_score_bundles.py", "help": "usage: scil_bundle_score_many_bundles_one_tractogram.py [-h] [--json_prefix p]\n [--gt_dir DIR]\n [--indent INDENT]\n [--sort_keys]\n [--reference REFERENCE]\n [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n gt_config bundles_dir\n\nThis script is intended to score all bundles from a single tractogram. Each\nvalid bundle is compared to its ground truth.\nEx: It was used for the ISMRM 2015 Challenge scoring.\n\nSee also scil_bundle_score_same_bundle_many_segmentations.py to score many\nversions of a same bundle, compared to ONE ground truth / gold standard.\n\nThis script is the second part of script scil_score_tractogram, which also\nsegments the wholebrain tractogram into bundles first.\n\nHere we suppose that the bundles are already segmented and saved as follows:\n main_dir/\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (optional)\n segmented_WPC/*_wpc.trk (optional)\n IS.trk OR NC.trk (if segmented_IB is present)\n\nConfig file\n-----------\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n }\n}\n\nFormerly: scil_score_bundles.py\n\nTractometry\n-----------\nGlobal connectivity metrics:\n\n- Computed by default:\n - VS: valid streamlines, belonging to a bundle (i.e. respecting all the\n criteria for that bundle; endpoints, limit_mask, gt_mask.).\n - IS: invalid streamlines. All other streamlines. IS = IC + NC.\n\n- Optional:\n - WPC: wrong path connections, streamlines connecting correct ROIs but not\n respecting the other criteria for that bundle. Such streamlines always\n exist but they are only saved separately if specified in the options.\n Else, they are merged back with the IS.\n By definition. WPC are only computed if \"limits masks\" are provided.\n - IC: invalid connections, streamlines joining an incorrect combination of\n ROIs. Use carefully, quality depends on the quality of your ROIs and no\n analysis is done on the shape of the streamlines.\n - NC: no connections. Invalid streamlines minus invalid connections.\n\n- Fidelity metrics:\n - OL: Overlap. Percentage of ground truth voxels containing streamline(s)\n for a given bundle.\n - OR: Overreach. Amount of voxels containing streamline(s) when they\n shouldn't, for a given bundle. We compute two versions :\n OR_pct_vs = divided by the total number of voxel covered by the bundle.\n (percentage of the voxels touched by VS).\n Values range between 0 and 100%. Values are not defined when we\n recovered no streamline for a bundle, but we set the OR_pct_vs to 0\n in that case.\n OR_pct_gt = divided by the total size of the ground truth bundle mask.\n Values could be higher than 100%.\n - f1 score: which is the same as the Dice score.\n\npositional arguments:\n gt_config .json dict configured as specified above.\n bundles_dir Directory containing all bundles.\n (Ex: Output directory for scil_score_tractogram).\n It is expected to contain a file IS.trk and \n files segmented_VB/*_VS.trk, with, possibly, files \n segmented_WPC/*_wpc.trk and segmented_IC/\n\noptions:\n -h, --help show this help message and exit\n --json_prefix p Prefix of the output json file. Ex: 'study_x_'.\n Suffix will be results.json. File will be saved inside bundles_dir.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nAdditions to gt_config:\n --gt_dir DIR Root path of the ground truth files listed in the gt_config.\n If not set, filenames in the config file are considered \n as absolute paths.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "true", + "always" + ], + [ + "global", + "global" + ], + [ + "bundles", + "bundles" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "form", + "combination" + ], + [ + "considered", + "are" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "left", + "into" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "limiting", + "limits" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "invalid", + "valid", + "invalid" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "lack", + "loss", + "result", + "result" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "invalid", + "valid", + "valid" + ], + [ + "exist", + "exist" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "high", + "higher", + "increase", + "level", + "higher" + ], + [ + "long", + "a" + ], + [ + "defined", + "definition" + ], + [ + "average", + "percentage" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "thinking", + "i" + ], + [ + "supported", + "support" + ], + [ + "future", + "will" + ], + [ + "order", + "intended" + ], + [ + "left", + "back" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "true", + "truth" + ], + [ + "long", + "with" + ], + [ + "parameters", + "specified" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "methods", + "using" + ], + [ + "variety", + "work", + "other" + ], + [ + "view", + "see" + ], + [ + "increase", + "total", + "amount" + ], + [ + "increase", + "expected" + ], + [ + "defined", + "function", + "defined" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "thinking", + "wrong" + ], + [ + "matter", + "thinking", + "else" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "large", + "work", + "many" + ], + [ + "considered", + "subsequently", + "was" + ], + [ + "considered", + "possibility", + "result", + "possibly" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "connect", + "connected", + "connection", + "connections", + "connections" + ], + [ + "larger", + "size", + "size" + ], + [ + "average", + "compared" + ], + [ + "streamline", + "streamline" + ], + [ + "total", + "100" + ], + [ + "result", + "results" + ], + [ + "level", + "above" + ], + [ + "work", + "working", + "done" + ], + [ + "long", + "two" + ], + [ + "connectivity", + "connectivity" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "area", + "main", + "work", + "part" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "lack", + "quality" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "connect", + "connected", + "connecting", + "connects", + "connecting" + ], + [ + "matter", + "question", + "case" + ], + [ + "create", + "created" + ], + [ + "bundles", + "bundle" + ], + [ + "clear", + "result", + "work", + "could" + ], + [ + "increase", + "total", + "total" + ], + [ + "shape", + "structure", + "shape" + ], + [ + "considered", + "specific", + "variety", + "such" + ] + ], "keywords": [] }, { "name": "scil_bundle_score_same_bundle_many_segmentations", "docstring": "This script is intended to score many versions of a same bundle, compared to\nONE ground truth / gold standard.\n\nSee also scil_bundle_score_many_bundles_one_tractogram.py to score all bundles\nfrom a single tractogram by comparing each valid bundle to its ground truth.\n\nAll tractograms must be in the same space (aligned to one reference).\nThe measures can be applied to a voxel-wise or streamline-wise representation.\n\nA gold standard must be provided for the desired representation.\nA gold standard would be a segmentation from an expert or a group of experts.\nIf only the streamline-wise representation is provided without a voxel-wise\ngold standard, it will be computed from the provided streamlines.\nAt least one of the two representations is required.\n\nThe gold standard tractogram is the tractogram (whole brain most likely) from\nwhich the segmentation is performed.\nThe gold standard tracking mask is the tracking mask used by the tractography\nalgorighm to generate the gold standard tractogram.\n\nThe computed binary classification measures are:\nsensitivity, specificity, precision, accuracy, dice, kappa, youden for both\nthe streamline and voxel representation (if provided).\n\nFormerly: scil_evaluate_bundles_binary_classification_measures.py", "help": "usage: scil_bundle_score_same_bundle_many_segmentations.py [-h]\n [--streamlines_measures GOLD_STANDARD_STREAMLINES TRACTOGRAM]\n [--voxels_measures GOLD_STANDARD_MASK TRACKING MASK]\n [--processes NBR]\n [--reference REFERENCE]\n [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bundles\n [in_bundles ...]\n out_json\n\nThis script is intended to score many versions of a same bundle, compared to\nONE ground truth / gold standard.\n\nSee also scil_bundle_score_many_bundles_one_tractogram.py to score all bundles\nfrom a single tractogram by comparing each valid bundle to its ground truth.\n\nAll tractograms must be in the same space (aligned to one reference).\nThe measures can be applied to a voxel-wise or streamline-wise representation.\n\nA gold standard must be provided for the desired representation.\nA gold standard would be a segmentation from an expert or a group of experts.\nIf only the streamline-wise representation is provided without a voxel-wise\ngold standard, it will be computed from the provided streamlines.\nAt least one of the two representations is required.\n\nThe gold standard tractogram is the tractogram (whole brain most likely) from\nwhich the segmentation is performed.\nThe gold standard tracking mask is the tracking mask used by the tractography\nalgorighm to generate the gold standard tractogram.\n\nThe computed binary classification measures are:\nsensitivity, specificity, precision, accuracy, dice, kappa, youden for both\nthe streamline and voxel representation (if provided).\n\nFormerly: scil_evaluate_bundles_binary_classification_measures.py\n\npositional arguments:\n in_bundles Path of the input bundles.\n out_json Path of the output json.\n\noptions:\n -h, --help show this help message and exit\n --streamlines_measures GOLD_STANDARD_STREAMLINES TRACTOGRAM\n The gold standard bundle and the original tractogram.\n --voxels_measures GOLD_STANDARD_MASK TRACKING MASK\n The gold standard mask and the original tracking mask.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "considered", + "potential", + "result", + "likely" + ], + [ + "anatomy", + "anatomy" + ], + [ + "considered", + "highly", + "long", + "work", + "most" + ], + [ + "bundles", + "bundles" + ], + [ + "create", + "generate" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "order", + "required" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "precision", + "precision" + ], + [ + "applied", + "applied" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "considered", + "are" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "invalid", + "valid", + "valid" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "order", + "intended" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "space", + "space" + ], + [ + "held", + "in" + ], + [ + "true", + "truth" + ], + [ + "reliability", + "accuracy" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "streamlines", + "streamlines" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "large", + "work", + "many" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "based", + "group" + ], + [ + "individual", + "each" + ], + [ + "streamline", + "streamline" + ], + [ + "average", + "compared" + ], + [ + "long", + "two" + ], + [ + "step", + "start" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "bundles", + "bundle" + ], + [ + "data", + "tracking", + "tracking" + ] + ], "keywords": [] }, { "name": "scil_bundle_shape_measures", "docstring": "Evaluate basic measurements of bundle(s).\n\nThe computed measures are:\n - volume_info: volume, volume_endpoints\n - streamlines_info: streamlines_count, avg_length (in mm or in number of\n point), average step size, min_length, max_length.\n ** You may also get this information with scil_tractogram_print_info.py.\n - shape_info: span, curl, diameter, elongation, surface area,\n irregularity, end surface area, radius, end surface irregularity,\n mean_curvature, fractal dimension.\n ** The diameter, here, is a simple estimation using volume / length.\n For a more complex calculation, see scil_bundle_diameter.py.\n\nWith more than one bundle, the measures are averaged over bundles. All\ntractograms must be in the same space.\n\nThe set average contains the average measures of all input bundles. The\nmeasures that are dependent on the streamline count are weighted by the number\nof streamlines of each bundle. Each of these average measure is computed by\nfirst summing the multiple of a measure and the streamline count of each\nbundle and divide the sum by the total number of streamlines. Thus, measures\nincluding length and span are essentially averages of all the streamlines.\nOther streamline-related set measure are computed with other set averages.\nWhereas bundle-related measures are computed as an average of all bundles.\nThese measures include volume and surface area.\n\nThe fractal dimension is dependent on the voxel size and the number of voxels.\nIf data comparison is performed, the bundles MUST be in same resolution.\n\nFormerly: scil_compute_bundle_volume.py or\nscil_evaluate_bundles_individual_measures.py", "help": "usage: scil_bundle_shape_measures.py [-h] [--out_json OUT_JSON]\n [--group_statistics] [--no_uniformize]\n [--reference REFERENCE] [--processes NBR]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...]\n\nEvaluate basic measurements of bundle(s).\n\nThe computed measures are:\n - volume_info: volume, volume_endpoints\n - streamlines_info: streamlines_count, avg_length (in mm or in number of\n point), average step size, min_length, max_length.\n ** You may also get this information with scil_tractogram_print_info.py.\n - shape_info: span, curl, diameter, elongation, surface area,\n irregularity, end surface area, radius, end surface irregularity,\n mean_curvature, fractal dimension.\n ** The diameter, here, is a simple estimation using volume / length.\n For a more complex calculation, see scil_bundle_diameter.py.\n\nWith more than one bundle, the measures are averaged over bundles. All\ntractograms must be in the same space.\n\nThe set average contains the average measures of all input bundles. The\nmeasures that are dependent on the streamline count are weighted by the number\nof streamlines of each bundle. Each of these average measure is computed by\nfirst summing the multiple of a measure and the streamline count of each\nbundle and divide the sum by the total number of streamlines. Thus, measures\nincluding length and span are essentially averages of all the streamlines.\nOther streamline-related set measure are computed with other set averages.\nWhereas bundle-related measures are computed as an average of all bundles.\nThese measures include volume and surface area.\n\nThe fractal dimension is dependent on the voxel size and the number of voxels.\nIf data comparison is performed, the bundles MUST be in same resolution.\n\nFormerly: scil_compute_bundle_volume.py or\nscil_evaluate_bundles_individual_measures.py\n\npositional arguments:\n in_bundles Path of the input bundles.\n\noptions:\n -h, --help show this help message and exit\n --out_json OUT_JSON Path of the output file. If not given, the output is simply printed on screen.\n --group_statistics Show average measures [False].\n --no_uniformize Do NOT automatically uniformize endpoints for theendpoints related metrics.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\nReferences:\n[1] Fang-Cheng Yeh. 2020.\n Shape analysis of the human association pathways. NeuroImage.\n", - "synonyms": [], + "synonyms": [ + [ + "long", + "end" + ], + [ + "total", + "number" + ], + [ + "animal", + "human", + "human" + ], + [ + "anatomy", + "anatomy" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "bundles", + "bundles" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "considered", + "are" + ], + [ + "long", + "work", + "more" + ], + [ + "difference", + "point" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "order", + "set" + ], + [ + "long", + "over" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "space", + "space" + ], + [ + "held", + "in" + ], + [ + "variety", + "include" + ], + [ + "pathway", + "pathways", + "pathways" + ], + [ + "long", + "with" + ], + [ + "average", + "average" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "action", + "process", + "step", + "step" + ], + [ + "contrast", + "comparison" + ], + [ + "specific", + "related" + ], + [ + "methods", + "using" + ], + [ + "variety", + "work", + "other" + ], + [ + "view", + "see" + ], + [ + "thinking", + "simply" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "result", + "may" + ], + [ + "fundamental", + "basic" + ], + [ + "individual", + "each" + ], + [ + "larger", + "size", + "size" + ], + [ + "streamline", + "streamline" + ], + [ + "weighted", + "weighted" + ], + [ + "considered", + "form", + "meaning", + "order", + "result", + "thus" + ], + [ + "complex", + "structure", + "structures", + "complex" + ], + [ + "association", + "association" + ], + [ + "step", + "start" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "false", + "false" + ], + [ + "bundles", + "bundle" + ], + [ + "examine", + "evaluate" + ], + [ + "area", + "areas", + "region", + "area" + ], + [ + "increase", + "total", + "total" + ], + [ + "shape", + "structure", + "shape" + ] + ], "keywords": [] }, { "name": "scil_bundle_uniformize_endpoints", "docstring": "Uniformize streamlines' endpoints according to a defined axis.\nUseful for tractometry or models creation.\n\nThe --auto option will automatically calculate the main orientation.\nIf the input bundle is poorly defined, it is possible heuristic will be wrong.\n\nThe default is to flip each streamline so their first point's coordinate in the\ndefined axis is smaller than their last point (--swap does the opposite).\n\nThe --target_roi option will use the barycenter of the target mask to define\nthe axis. The target mask can be a binary mask or an atlas. If an atlas is\nused, labels are expected in the form of --target_roi atlas.nii.gz 2 3 5:7.\n\nFormerly: scil_uniformize_streamlines_endpoints.py", "help": "usage: scil_bundle_uniformize_endpoints.py [-h]\n (--axis {x,y,z} | --auto | --centroid tractogram | --target_roi TARGET_ROI [TARGET_ROI ...])\n [--swap] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_bundle\n\nUniformize streamlines' endpoints according to a defined axis.\nUseful for tractometry or models creation.\n\nThe --auto option will automatically calculate the main orientation.\nIf the input bundle is poorly defined, it is possible heuristic will be wrong.\n\nThe default is to flip each streamline so their first point's coordinate in the\ndefined axis is smaller than their last point (--swap does the opposite).\n\nThe --target_roi option will use the barycenter of the target mask to define\nthe axis. The target mask can be a binary mask or an atlas. If an atlas is\nused, labels are expected in the form of --target_roi atlas.nii.gz 2 3 5:7.\n\nFormerly: scil_uniformize_streamlines_endpoints.py\n\npositional arguments:\n in_bundle Input path of the tractography file.\n out_bundle Output path of the uniformized file.\n\noptions:\n -h, --help show this help message and exit\n --axis {x,y,z} Match endpoints of the streamlines along this axis.\n SUGGESTION: Commissural = x, Association = y, Projection = z\n --auto Match endpoints of the streamlines along an automatically determined axis.\n --centroid tractogram\n Match endpoints of the streamlines to align it to a reference unique streamline (centroid).\n --target_roi TARGET_ROI [TARGET_ROI ...]\n Provide a target ROI: either a binary mask or a label map and the labels to use.\n Will align heads to be closest to the mask barycenter.\n (atlas: if no labels are provided, all labels will be used.\n --swap Swap head <-> tail convention. Can be useful when the reference is not in RAS.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "commissural", + "commissural" + ], + [ + "motor", + "auto" + ], + [ + "anatomy", + "anatomy" + ], + [ + "orientation", + "orientation" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "difference", + "point" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "direction", + "opposite" + ], + [ + "long", + "a" + ], + [ + "tool", + "useful" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "form", + "forms", + "form" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "last" + ], + [ + "held", + "in" + ], + [ + "atlas", + "atlas" + ], + [ + "clear", + "long", + "matter", + "result", + "work", + "so" + ], + [ + "create", + "provide" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "area", + "main", + "along" + ], + [ + "defined", + "function", + "defined" + ], + [ + "long", + "than" + ], + [ + "increase", + "expected" + ], + [ + "streamlines", + "streamlines" + ], + [ + "thinking", + "wrong" + ], + [ + "unique", + "variety", + "unique" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "projection", + "projection" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "matter", + "question", + "does" + ], + [ + "streamline", + "streamline" + ], + [ + "association", + "association" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "defined", + "define" + ], + [ + "clear", + "result", + "determined" + ], + [ + "based", + "reported", + "according" + ], + [ + "create", + "creation" + ], + [ + "key", + "main", + "main" + ], + [ + "maps", + "map" + ], + [ + "bundles", + "bundle" + ], + [ + "large", + "larger", + "smaller" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_bundle_volume_per_label", "docstring": "Compute bundle volume per label in mm3. This script supports anisotropic voxels\nresolution. Volume is estimated by counting the number of voxel occupied by\neach label and multiplying it by the volume of a single voxel.\n\nThe labels can be obtained by scil_bundle_label_map.py.\n\nThis estimation is typically performed at resolution around 1mm3.\n\nTo get the volume and other measures directly from the (whole) bundle, use\nscil_bundle_shape_measures.py.\n\nFormerly: scil_compute_bundle_volume_per_label.py", "help": "usage: scil_bundle_volume_per_label.py [-h] [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n voxel_label_map bundle_name\n\nCompute bundle volume per label in mm3. This script supports anisotropic voxels\nresolution. Volume is estimated by counting the number of voxel occupied by\neach label and multiplying it by the volume of a single voxel.\n\nThe labels can be obtained by scil_bundle_label_map.py.\n\nThis estimation is typically performed at resolution around 1mm3.\n\nTo get the volume and other measures directly from the (whole) bundle, use\nscil_bundle_shape_measures.py.\n\nFormerly: scil_compute_bundle_volume_per_label.py\n\npositional arguments:\n voxel_label_map Fiber bundle file.\n bundle_name Bundle name.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "average", + "per" + ], + [ + "work", + "and" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "total", + "estimated" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "supported", + "supports" + ], + [ + "methods", + "using" + ], + [ + "variety", + "work", + "other" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "bundles", + "bundle" + ] + ], "keywords": [] }, { "name": "scil_connectivity_compare_populations", "docstring": "Performs a network-based statistical comparison for populations g1 and g2. The\noutput is a matrix of the same size as the input connectivity matrices, with\np-values at each edge.\nAll input matrices must have the same shape (NxN). For paired t-test, both\ngroups must have the same number of observations.\n\nFor example, if you have streamline count weighted matrices for a MCI and a\ncontrol group and you want to investiguate differences in their connectomes:\n >>> scil_connectivity_compare_populations.py pval.npy\n --g1 MCI/*_sc.npy --g2 CTL/*_sc.npy\n\n--filtering_mask will simply multiply the binary mask to all input\nmatrices before performing the statistical comparison. Reduces the number of\nstatistical tests, useful when using --fdr or --bonferroni.\n\nFormerly: scil_compare_connectivity.py", "help": "usage: scil_connectivity_compare_populations.py [-h] --in_g1 IN_G1 [IN_G1 ...]\n --in_g2 IN_G2 [IN_G2 ...]\n [--tail {left,right,both}]\n [--paired]\n [--fdr | --bonferroni]\n [--p_threshold THRESH OUT_FILE]\n [--filtering_mask FILTERING_MASK]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n out_pval_matrix\n\nPerforms a network-based statistical comparison for populations g1 and g2. The\noutput is a matrix of the same size as the input connectivity matrices, with\np-values at each edge.\nAll input matrices must have the same shape (NxN). For paired t-test, both\ngroups must have the same number of observations.\n\nFor example, if you have streamline count weighted matrices for a MCI and a\ncontrol group and you want to investiguate differences in their connectomes:\n >>> scil_connectivity_compare_populations.py pval.npy\n --g1 MCI/*_sc.npy --g2 CTL/*_sc.npy\n\n--filtering_mask will simply multiply the binary mask to all input\nmatrices before performing the statistical comparison. Reduces the number of\nstatistical tests, useful when using --fdr or --bonferroni.\n\nFormerly: scil_compare_connectivity.py\n\npositional arguments:\n out_pval_matrix Output matrix (.npy) containing the edges p-value.\n\noptions:\n -h, --help show this help message and exit\n --in_g1 IN_G1 [IN_G1 ...]\n List of matrices for the first population (.npy).\n --in_g2 IN_G2 [IN_G2 ...]\n List of matrices for the second population (.npy).\n --tail {left,right,both}\n Enables specification of an alternative hypothesis:\n left: mean of g1 < mean of g2,\n right: mean of g2 < mean of g1,\n both: both means are not equal (default).\n --paired Use paired sample t-test instead of population t-test.\n --in_g1 and --in_g2 must be ordered the same way.\n --fdr Perform a false discovery rate (FDR) correction for the p-values.\n Uses the number of non-zero edges as number of tests (value between 0.01 and 0.1).\n --bonferroni Perform a Bonferroni correction for the p-values.\n Uses the number of non-zero edges as number of tests.\n --p_threshold THRESH OUT_FILE\n Threshold the final p-value matrix and save the binary matrix (.npy).\n --filtering_mask FILTERING_MASK\n Binary filtering mask (.npy) to apply before computing the measures.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Rubinov, Mikail, and Olaf Sporns. \"Complex network measures of brain\n connectivity: uses and interpretations.\" Neuroimage 52.3 (2010):\n 1059-1069.\n[2] Zalesky, Andrew, Alex Fornito, and Edward T. Bullmore. \"Network-based\n statistic: identifying differences in brain networks.\" Neuroimage 53.4\n (2010): 1197-1207.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "clear", + "long", + "step", + "thinking", + "view", + "work", + "working", + "way" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "methods", + "use" + ], + [ + "long", + "have" + ], + [ + "left", + "left" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "matrices", + "matrices" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "tool", + "useful" + ], + [ + "applied", + "apply" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "population", + "populations" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "discovery", + "discovery" + ], + [ + "higher", + "increase", + "rate" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "contrast", + "comparison" + ], + [ + "population", + "population" + ], + [ + "network", + "networks", + "networks" + ], + [ + "differences", + "differences" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "thinking", + "simply" + ], + [ + "order", + "work", + "instead" + ], + [ + "left", + "right" + ], + [ + "left", + "before" + ], + [ + "matrices", + "matrix" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "based", + "group" + ], + [ + "individual", + "each" + ], + [ + "larger", + "size", + "size" + ], + [ + "streamline", + "streamline" + ], + [ + "weighted", + "weighted" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "complex", + "structure", + "structures", + "complex" + ], + [ + "connectivity", + "connectivity" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "false", + "false" + ], + [ + "based", + "based" + ], + [ + "network", + "networks", + "network" + ], + [ + "meaning", + "order", + "result", + "step", + "true", + "means" + ], + [ + "shape", + "structure", + "shape" + ] + ], "keywords": [] }, { "name": "scil_connectivity_compute_matrices", "docstring": "This script computes a variety of measures in the form of connectivity\nmatrices. This script is made to follow\nscil_tractogram_segment_bundles_for_connectivity.py and\nuses the same labels list as input.\n\nThe script expects a folder containing all relevants bundles following the\nnaming convention LABEL1_LABEL2.trk and a text file containing the list of\nlabels that should be part of the matrices. The ordering of labels in the\nmatrices will follow the same order as the list.\nThis script only generates matrices in the form of array, does not visualize\nor reorder the labels (node).\n\nThe parameter --similarity expects a folder with density maps\n(LABEL1_LABEL2.nii.gz) following the same naming convention as the input\ndirectory.\nThe bundles should be averaged version in the same space. This will\ncompute the weighted-dice between each node and their homologuous average\nversion.\n\nThe parameters --metrics can be used more than once and expect a map (t1, fa,\netc.) in the same space and each will generate a matrix. The average value in\nthe volume occupied by the bundle will be reported in the matrices nodes.\n\nThe parameters --maps can be used more than once and expect a folder with\npre-computed maps (LABEL1_LABEL2.nii.gz) following the same naming convention\nas the input directory. Each will generate a matrix. The average non-zeros\nvalue in the map will be reported in the matrices nodes.\n\nThe parameters --lesion_load will compute 3 lesion(s) related matrices:\nlesion_count.npy, lesion_vol.npy, lesion_sc.npy and put it inside of a\nspecified folder. They represent the number of lesion, the total volume of\nlesion(s) and the total of streamlines going through the lesion(s) for of each\nconnection. Each connection can be seen as a 'bundle' and then something\nsimilar to scil_analyse_lesion_load.py is run for each 'bundle'.\n\nFormerly: scil_compute_connectivity.py", "help": "usage: scil_connectivity_compute_matrices.py [-h] [--volume OUT_FILE]\n [--streamline_count OUT_FILE]\n [--length OUT_FILE]\n [--similarity IN_FOLDER OUT_FILE]\n [--maps IN_FOLDER OUT_FILE]\n [--metrics IN_FILE OUT_FILE]\n [--lesion_load IN_FILE OUT_DIR]\n [--min_lesion_vol MIN_LESION_VOL]\n [--density_weighting]\n [--no_self_connection]\n [--include_dps OUT_DIR]\n [--force_labels_list FORCE_LABELS_LIST]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_hdf5 in_labels\n\nThis script computes a variety of measures in the form of connectivity\nmatrices. This script is made to follow\nscil_tractogram_segment_bundles_for_connectivity.py and\nuses the same labels list as input.\n\nThe script expects a folder containing all relevants bundles following the\nnaming convention LABEL1_LABEL2.trk and a text file containing the list of\nlabels that should be part of the matrices. The ordering of labels in the\nmatrices will follow the same order as the list.\nThis script only generates matrices in the form of array, does not visualize\nor reorder the labels (node).\n\nThe parameter --similarity expects a folder with density maps\n(LABEL1_LABEL2.nii.gz) following the same naming convention as the input\ndirectory.\nThe bundles should be averaged version in the same space. This will\ncompute the weighted-dice between each node and their homologuous average\nversion.\n\nThe parameters --metrics can be used more than once and expect a map (t1, fa,\netc.) in the same space and each will generate a matrix. The average value in\nthe volume occupied by the bundle will be reported in the matrices nodes.\n\nThe parameters --maps can be used more than once and expect a folder with\npre-computed maps (LABEL1_LABEL2.nii.gz) following the same naming convention\nas the input directory. Each will generate a matrix. The average non-zeros\nvalue in the map will be reported in the matrices nodes.\n\nThe parameters --lesion_load will compute 3 lesion(s) related matrices:\nlesion_count.npy, lesion_vol.npy, lesion_sc.npy and put it inside of a\nspecified folder. They represent the number of lesion, the total volume of\nlesion(s) and the total of streamlines going through the lesion(s) for of each\nconnection. Each connection can be seen as a 'bundle' and then something\nsimilar to scil_analyse_lesion_load.py is run for each 'bundle'.\n\nFormerly: scil_compute_connectivity.py\n\npositional arguments:\n in_hdf5 Input filename for the hdf5 container (.h5).\n Obtained from scil_tractogram_segment_bundles_for_connectivity.py.\n in_labels Labels file name (nifti).\n This generates a NxN connectivity matrix.\n\noptions:\n -h, --help show this help message and exit\n --volume OUT_FILE Output file for the volume weighted matrix (.npy).\n --streamline_count OUT_FILE\n Output file for the streamline count weighted matrix (.npy).\n --length OUT_FILE Output file for the length weighted matrix (.npy).\n --similarity IN_FOLDER OUT_FILE\n Input folder containing the averaged bundle density\n maps (.nii.gz) and output file for the similarity weighted matrix (.npy).\n --maps IN_FOLDER OUT_FILE\n Input folder containing pre-computed maps (.nii.gz)\n and output file for the weighted matrix (.npy).\n --metrics IN_FILE OUT_FILE\n Input (.nii.gz). and output file (.npy) for a metric weighted matrix.\n --lesion_load IN_FILE OUT_DIR\n Input binary mask (.nii.gz) and output directory for all lesion-related matrices.\n --min_lesion_vol MIN_LESION_VOL\n Minimum lesion volume in mm3 [7].\n --density_weighting Use density-weighting for the metric weightedmatrix.\n --no_self_connection Eliminate the diagonal from the matrices.\n --include_dps OUT_DIR\n Save matrices from data_per_streamline in the output directory.\n COMMIT-related values will be summed instead of averaged.\n Will always overwrite files.\n --force_labels_list FORCE_LABELS_LIST\n Path to a labels list (.txt) in case of missing labels in the atlas.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "true", + "always" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "bundles", + "bundles" + ], + [ + "create", + "generate" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "result", + "following" + ], + [ + "reported", + "reported" + ], + [ + "order", + "order" + ], + [ + "long", + "work", + "more" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "parameter", + "parameters", + "parameter" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "naming", + "naming" + ], + [ + "long", + "a" + ], + [ + "matrices", + "matrices" + ], + [ + "held", + "on" + ], + [ + "considered", + "seen" + ], + [ + "represent", + "represent" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "form", + "forms", + "form" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "space", + "space" + ], + [ + "held", + "in" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "parameters", + "specified" + ], + [ + "average", + "average" + ], + [ + "atlas", + "atlas" + ], + [ + "specific", + "related" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "clear", + "left", + "work", + "put" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "contrast", + "form", + "forms", + "larger", + "result", + "specific", + "variety", + "similar" + ], + [ + "order", + "work", + "instead" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "matrices", + "matrix" + ], + [ + "step", + "follow" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "left", + "long", + "work", + "once" + ], + [ + "matter", + "question", + "thinking", + "true", + "something" + ], + [ + "unique", + "variety", + "variety" + ], + [ + "individual", + "each" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "matter", + "question", + "does" + ], + [ + "streamline", + "streamline" + ], + [ + "weighted", + "weighted" + ], + [ + "similarity", + "similarity" + ], + [ + "clear", + "work", + "made" + ], + [ + "connectivity", + "connectivity" + ], + [ + "step", + "start" + ], + [ + "work", + "all" + ], + [ + "area", + "main", + "work", + "part" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "connection", + "connections", + "connection" + ], + [ + "maps", + "maps" + ], + [ + "matter", + "question", + "case" + ], + [ + "maps", + "map" + ], + [ + "step", + "thinking", + "going" + ], + [ + "bundles", + "bundle" + ], + [ + "increase", + "total", + "total" + ] + ], "keywords": [] }, { "name": "scil_connectivity_compute_pca", "docstring": "Script to compute PCA analysis on diffusion metrics. Output returned is all\nsignificant principal components (e.g. presenting eigenvalues > 1) in a\nconnectivity matrix format. This script can take into account all edges from\nevery subject in a population or only non-zero edges across all subjects.\n\nThe script can take directly as input a connectoflow output folder. Simply use\nthe --input_connectoflow flag. For other type of folder input, the script\nexpects a single folder containing all matrices for all subjects.\nExample:\n [in_folder]\n |--- sub-01_ad.npy\n |--- sub-01_md.npy\n |--- sub-02_ad.npy\n |--- sub-02_md.npy\n |--- ...\n\nThe plots, tables and principal components matrices will be outputted in the\ndesignated folder from the argument. If you want to move back your\nprincipal components matrices in your connectoflow output, you can use a\nsimilar bash command for all principal components:\nfor sub in `cat list_id.txt`;\ndo\n cp out_folder/${sub}_PC1.npy connectoflow_output/$sub/Compute_Connectivity/\ndone\n\nInterpretation of resulting principal components can be done by evaluating the\nloadings values for each metrics. A value near 0 means that this metric doesn't\ncontribute to this specific component whereas high positive or negative values\nmean a larger contribution. Components can then be labeled based on which\nmetric contributes the highest. For example, a principal component showing a\nhigh loading for afd_fixel and near 0 loading for all other metrics can be\ninterpreted as axonal density (see Gagnon et al. 2022 for this specific example\nor ref [3] for an introduction to PCA).\n\nEXAMPLE USAGE:\nscil_connectivity_compute_pca.py input_folder/ output_folder/\n --metrics ad fa md rd [...] --list_ids list_ids.txt", "help": "usage: scil_connectivity_compute_pca.py [-h] --metrics METRICS [METRICS ...]\n --list_ids FILE [--not_only_common]\n [--input_connectoflow]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_folder out_folder\n\nScript to compute PCA analysis on diffusion metrics. Output returned is all\nsignificant principal components (e.g. presenting eigenvalues > 1) in a\nconnectivity matrix format. This script can take into account all edges from\nevery subject in a population or only non-zero edges across all subjects.\n\nThe script can take directly as input a connectoflow output folder. Simply use\nthe --input_connectoflow flag. For other type of folder input, the script\nexpects a single folder containing all matrices for all subjects.\nExample:\n [in_folder]\n |--- sub-01_ad.npy\n |--- sub-01_md.npy\n |--- sub-02_ad.npy\n |--- sub-02_md.npy\n |--- ...\n\nThe plots, tables and principal components matrices will be outputted in the\ndesignated folder from the argument. If you want to move back your\nprincipal components matrices in your connectoflow output, you can use a\nsimilar bash command for all principal components:\nfor sub in `cat list_id.txt`;\ndo\n cp out_folder/${sub}_PC1.npy connectoflow_output/$sub/Compute_Connectivity/\ndone\n\nInterpretation of resulting principal components can be done by evaluating the\nloadings values for each metrics. A value near 0 means that this metric doesn't\ncontribute to this specific component whereas high positive or negative values\nmean a larger contribution. Components can then be labeled based on which\nmetric contributes the highest. For example, a principal component showing a\nhigh loading for afd_fixel and near 0 loading for all other metrics can be\ninterpreted as axonal density (see Gagnon et al. 2022 for this specific example\nor ref [3] for an introduction to PCA).\n\nEXAMPLE USAGE:\nscil_connectivity_compute_pca.py input_folder/ output_folder/\n --metrics ad fa md rd [...] --list_ids list_ids.txt\n\npositional arguments:\n in_folder Path to the input folder. See explanation above for its expected organization.\n out_folder Path to the output folder to export graphs, tables and principal \n components matrices.\n\noptions:\n -h, --help show this help message and exit\n --metrics METRICS [METRICS ...]\n Suffixes of all metrics to include in PCA analysis (ex: ad md fa rd). \n They must be immediately followed by the .npy extension.\n --list_ids FILE Path to a .txt file containing a list of all ids.\n --not_only_common If true, will include all edges from all subjects and not only \n common edges (Not recommended)\n --input_connectoflow If true, script will assume the input folder is a Connectoflow output.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Chamberland M, Raven EP, Genc S, Duffy K, Descoteaux M, Parker GD, Tax CMW,\n Jones DK. Dimensionality reduction of diffusion MRI measures for improved\n tractometry of the human brain. Neuroimage. 2019 Oct 15;200:89-100.\n doi: 10.1016/j.neuroimage.2019.06.020. Epub 2019 Jun 20. PMID: 31228638;\n PMCID: PMC6711466.\n[2] Gagnon A., Grenier G., Bocti C., Gillet V., Lepage J.-F., Baccarelli A. A.,\n Posner J., Descoteaux M., Takser L. (2022). White matter microstructural\n variability linked to differential attentional skills and impulsive behavior\n in a pediatric population. Cerebral Cortex.\n https://doi.org/10.1093/cercor/bhac180\n[3] https://towardsdatascience.com/what-are-pca-loadings-and-biplots-9a7897f2e559\n \n", - "synonyms": [], + "synonyms": [ + [ + "animal", + "human", + "human" + ], + [ + "positive", + "negative" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "left", + "into" + ], + [ + "variability", + "variability" + ], + [ + "axonal", + "axonal" + ], + [ + "subject", + "subjects", + "subjects" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "cortex", + "thalamus", + "cortex" + ], + [ + "left", + "from" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "methods", + "use" + ], + [ + "positive", + "positive" + ], + [ + "area", + "near" + ], + [ + "question", + "argument" + ], + [ + "thinking", + "you" + ], + [ + "specific", + "specific" + ], + [ + "examining", + "evaluating" + ], + [ + "highest", + "level", + "highest" + ], + [ + "long", + "a" + ], + [ + "clear", + "immediately" + ], + [ + "action", + "clear", + "step", + "move" + ], + [ + "matrices", + "matrices" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "diffusion", + "diffusion" + ], + [ + "imaging", + "mri" + ], + [ + "future", + "will" + ], + [ + "large", + "larger", + "size", + "larger" + ], + [ + "left", + "back" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "result", + "followed" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "forms", + "specific", + "common" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "experience", + "knowledge", + "learning", + "skills" + ], + [ + "clear", + "order", + "step", + "work", + "take" + ], + [ + "variety", + "include" + ], + [ + "indicating", + "showing" + ], + [ + "clear", + "matter", + "question", + "thinking", + "true", + "view", + "work", + "what" + ], + [ + "population", + "population" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "methods", + "using" + ], + [ + "contrast", + "form", + "forms", + "larger", + "result", + "specific", + "variety", + "similar" + ], + [ + "thinking", + "simply" + ], + [ + "variety", + "work", + "other" + ], + [ + "view", + "see" + ], + [ + "involved", + "linked" + ], + [ + "increase", + "expected" + ], + [ + "left", + "subsequently", + "returned" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "meaning", + "true", + "true" + ], + [ + "areas", + "across" + ], + [ + "matrices", + "matrix" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "result", + "resulting" + ], + [ + "considered", + "is" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "individual", + "each" + ], + [ + "increase", + "reduction" + ], + [ + "level", + "above" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "work", + "working", + "done" + ], + [ + "connectivity", + "connectivity" + ], + [ + "presented", + "presenting" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "total", + "100" + ], + [ + "systems", + "components" + ], + [ + "based", + "based" + ], + [ + "principal", + "principal" + ], + [ + "association", + "organization" + ], + [ + "matter", + "question", + "subject", + "subjects", + "subject" + ], + [ + "meaning", + "order", + "result", + "step", + "true", + "means" + ], + [ + "assigned", + "command" + ], + [ + "considered", + "greater", + "larger", + "potential", + "result", + "significant" + ] + ], "keywords": [] }, { "name": "scil_connectivity_filter", "docstring": "Script to facilitate filtering of connectivity matrices.\nThe same could be achieved through a complex sequence of\nscil_connectivity_math.py.\n\nCan be used with any connectivity matrix from\nscil_connectivity_compute_matrices.py.\n\nFor example, a simple filtering (Jasmeen style) would be:\nscil_connectivity_filter.py out_mask.npy\n --greater_than */sc.npy 1 0.90\n --lower_than */sim.npy 2 0.90\n --greater_than */len.npy 40 0.90 -v;\n\nThis will result in a binary mask where each node with a value of 1 represents\na node with at least 90% of the population having at least 1 streamline,\n90% of the population is similar to the average (2mm) and 90% of the\npopulation having at least 40mm of average streamlines length.\n\nAll operation are stricly > or <, there is no >= or <=.\n\n--greater_than or --lower_than expect the same convention:\n MATRICES_LIST VALUE_THR POPULATION_PERC\nIt is strongly recommended (but not enforced) that the same number of\nconnectivity matrices is used for each condition.\n\nThis script performs an intersection of all conditions, meaning that all\nconditions must be met in order not to be filtered.\nIf the user wants to manually handle the requirements, --keep_condition_count\ncan be used and manually binarized using scil_connectivity_math.py\n\nFormerly: scil_filter_connectivity.py", "help": "usage: scil_connectivity_filter.py [-h] [--lower_than [LOWER_THAN ...]]\n [--greater_than [GREATER_THAN ...]]\n [--keep_condition_count] [--inverse_mask]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_matrix_mask\n\nScript to facilitate filtering of connectivity matrices.\nThe same could be achieved through a complex sequence of\nscil_connectivity_math.py.\n\nCan be used with any connectivity matrix from\nscil_connectivity_compute_matrices.py.\n\nFor example, a simple filtering (Jasmeen style) would be:\nscil_connectivity_filter.py out_mask.npy\n --greater_than */sc.npy 1 0.90\n --lower_than */sim.npy 2 0.90\n --greater_than */len.npy 40 0.90 -v;\n\nThis will result in a binary mask where each node with a value of 1 represents\na node with at least 90% of the population having at least 1 streamline,\n90% of the population is similar to the average (2mm) and 90% of the\npopulation having at least 40mm of average streamlines length.\n\nAll operation are stricly > or <, there is no >= or <=.\n\n--greater_than or --lower_than expect the same convention:\n MATRICES_LIST VALUE_THR POPULATION_PERC\nIt is strongly recommended (but not enforced) that the same number of\nconnectivity matrices is used for each condition.\n\nThis script performs an intersection of all conditions, meaning that all\nconditions must be met in order not to be filtered.\nIf the user wants to manually handle the requirements, --keep_condition_count\ncan be used and manually binarized using scil_connectivity_math.py\n\nFormerly: scil_filter_connectivity.py\n\npositional arguments:\n out_matrix_mask Output mask (matrix) resulting from the provided conditions (.npy).\n\noptions:\n -h, --help show this help message and exit\n --lower_than [LOWER_THAN ...]\n Lower than condition using the VALUE_THR in at least POPULATION_PERC (from MATRICES_LIST).\n See description for more details.\n --greater_than [GREATER_THAN ...]\n Greater than condition using the VALUE_THR in at least POPULATION_PERC (from MATRICES_LIST).\n See description for more details.\n --keep_condition_count\n Report the number of condition(s) that pass/fail rather than a binary mask.\n --inverse_mask Inverse the final mask. 0 where all conditions are respected and 1 where at least one fail.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "application", + "database", + "user" + ], + [ + "total", + "number" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "total", + "90" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "order", + "order" + ], + [ + "considered", + "involved", + "result", + "having" + ], + [ + "long", + "work", + "more" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "conditions", + "conditions" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "lack", + "loss", + "result", + "result" + ], + [ + "clear", + "considered", + "create", + "form", + "manner", + "matter", + "result", + "subject", + "thinking", + "true", + "view", + "work", + "rather" + ], + [ + "long", + "a" + ], + [ + "total", + "40" + ], + [ + "matrices", + "matrices" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "action", + "clear", + "considered", + "future", + "matter", + "possibility", + "potential", + "question", + "result", + "specific", + "any" + ], + [ + "long", + "with" + ], + [ + "average", + "average" + ], + [ + "population", + "population" + ], + [ + "binary", + "binary" + ], + [ + "represent", + "represents" + ], + [ + "meaning", + "true", + "meaning" + ], + [ + "methods", + "using" + ], + [ + "contrast", + "form", + "forms", + "larger", + "result", + "specific", + "variety", + "similar" + ], + [ + "view", + "see" + ], + [ + "reported", + "report" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "matrices", + "matrix" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "result", + "resulting" + ], + [ + "considered", + "is" + ], + [ + "pass", + "pass" + ], + [ + "area", + "work", + "where" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "higher", + "lower" + ], + [ + "streamline", + "streamline" + ], + [ + "clear", + "long", + "question", + "result", + "work", + "there" + ], + [ + "complex", + "structure", + "structures", + "complex" + ], + [ + "connectivity", + "connectivity" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "supported", + "strongly" + ], + [ + "clear", + "result", + "work", + "could" + ], + [ + "greater", + "greater" + ] + ], "keywords": [] }, { "name": "scil_connectivity_graph_measures", "docstring": "Evaluate graph theory measures from connectivity matrices.\nA length weighted and a streamline count weighted matrix are required since\nsome measures require one or the other.\n\nThis script evaluates the measures one subject at the time. To generate a\npopulation dictionary (similarly to other scil_connectivity_*.py scripts), use\nthe --append_json option as well as using the same output filename.\n>>> for i in hcp/*/; do scil_connectivity_graph_measures.py ${i}/sc_prob.npy\n ${i}/len_prob.npy hcp_prob.json --append_json --avg_node_wise; done\n\nSome measures output one value per node, the default behavior is to list\nthem all into a list. To obtain only the average use the\n--avg_node_wise option.\n\nThe computed connectivity measures are:\ncentrality, modularity, assortativity, participation, clustering,\nnodal_strength, local_efficiency, global_efficiency, density, rich_club,\npath_length, edge_count, omega, sigma\n\nFor more details about the measures, please refer to\n- https://sites.google.com/site/bctnet/measures\n- https://github.com/aestrivex/bctpy/wiki\n\nThis script is under the GNU GPLv3 license, for more detail please refer to\nhttps://www.gnu.org/licenses/gpl-3.0.en.html\n\nFormerly: scil_evaluate_connectivity_graph_measures.py", "help": "usage: scil_connectivity_graph_measures.py [-h]\n [--filtering_mask FILTERING_MASK]\n [--avg_node_wise] [--append_json]\n [--small_world] [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_conn_matrix in_length_matrix\n out_json\n\nEvaluate graph theory measures from connectivity matrices.\nA length weighted and a streamline count weighted matrix are required since\nsome measures require one or the other.\n\nThis script evaluates the measures one subject at the time. To generate a\npopulation dictionary (similarly to other scil_connectivity_*.py scripts), use\nthe --append_json option as well as using the same output filename.\n>>> for i in hcp/*/; do scil_connectivity_graph_measures.py ${i}/sc_prob.npy\n ${i}/len_prob.npy hcp_prob.json --append_json --avg_node_wise; done\n\nSome measures output one value per node, the default behavior is to list\nthem all into a list. To obtain only the average use the\n--avg_node_wise option.\n\nThe computed connectivity measures are:\ncentrality, modularity, assortativity, participation, clustering,\nnodal_strength, local_efficiency, global_efficiency, density, rich_club,\npath_length, edge_count, omega, sigma\n\nFor more details about the measures, please refer to\n- https://sites.google.com/site/bctnet/measures\n- https://github.com/aestrivex/bctpy/wiki\n\nThis script is under the GNU GPLv3 license, for more detail please refer to\nhttps://www.gnu.org/licenses/gpl-3.0.en.html\n\nFormerly: scil_evaluate_connectivity_graph_measures.py\n\npositional arguments:\n in_conn_matrix Input connectivity matrix (.npy).\n Typically a streamline count weighted matrix.\n in_length_matrix Input length weighted matrix (.npy).\n out_json Path of the output json.\n\noptions:\n -h, --help show this help message and exit\n --filtering_mask FILTERING_MASK\n Binary filtering mask to apply before computing the measures.\n --avg_node_wise Return a single value for node-wise measures.\n --append_json If the file already exists, will append to the dictionary.\n --small_world Compute measure related to small worldness (omega and sigma).\n This option is much slower.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\n[1] Rubinov, Mikail, and Olaf Sporns. \"Complex network measures of brain\n connectivity: uses and interpretations.\" Neuroimage 52.3 (2010):\n 1059-1069.\n", - "synonyms": [], + "synonyms": [ + [ + "contrast", + "highly", + "similarly" + ], + [ + "create", + "generate" + ], + [ + "order", + "required" + ], + [ + "average", + "per" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "large", + "larger", + "small" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "left", + "into" + ], + [ + "long", + "work", + "more" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "attention", + "experience", + "long", + "result", + "work", + "much" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "meaning", + "refer" + ], + [ + "long", + "a" + ], + [ + "matrices", + "matrices" + ], + [ + "applied", + "apply" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "result", + "work", + "working", + "time" + ], + [ + "average", + "average" + ], + [ + "population", + "population" + ], + [ + "specific", + "related" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "variety", + "work", + "other" + ], + [ + "exist", + "exists" + ], + [ + "left", + "before" + ], + [ + "matrices", + "matrix" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "streamline", + "streamline" + ], + [ + "weighted", + "weighted" + ], + [ + "work", + "working", + "done" + ], + [ + "complex", + "structure", + "structures", + "complex" + ], + [ + "connectivity", + "connectivity" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "network", + "networks", + "network" + ], + [ + "examine", + "evaluate" + ], + [ + "matter", + "question", + "subject", + "subjects", + "subject" + ], + [ + "considered", + "experience", + "large", + "long", + "result", + "variety", + "work", + "working", + "well" + ], + [ + "result", + "since" + ], + [ + "large", + "work", + "some" + ] + ], "keywords": [] }, { "name": "scil_connectivity_hdf5_average_density_map", "docstring": "Compute a density map for each connection from a hdf5 file.\nTypically use after scil_tractogram_segment_bundles_for_connectivity.py in\norder to obtain the average density map of each connection to allow the use\nof --similarity in scil_connectivity_compute_matrices.py.\n\nThis script is parallelized, but will run much slower on non-SSD if too many\nprocesses are used. The output is a directory containing the thousands of\nconnections:\nout_dir/\n |-- LABEL1_LABEL1.nii.gz\n |-- LABEL1_LABEL2.nii.gz\n |-- [...]\n |-- LABEL90_LABEL90.nii.gz\n\nFormerly: scil_compute_hdf5_average_density_map.py", "help": "usage: scil_connectivity_hdf5_average_density_map.py [-h] [--binary]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_hdf5 [in_hdf5 ...]\n out_dir\n\nCompute a density map for each connection from a hdf5 file.\nTypically use after scil_tractogram_segment_bundles_for_connectivity.py in\norder to obtain the average density map of each connection to allow the use\nof --similarity in scil_connectivity_compute_matrices.py.\n\nThis script is parallelized, but will run much slower on non-SSD if too many\nprocesses are used. The output is a directory containing the thousands of\nconnections:\nout_dir/\n |-- LABEL1_LABEL1.nii.gz\n |-- LABEL1_LABEL2.nii.gz\n |-- [...]\n |-- LABEL90_LABEL90.nii.gz\n\nFormerly: scil_compute_hdf5_average_density_map.py\n\npositional arguments:\n in_hdf5 List of HDF5 filenames (.h5) from scil_tractogram_segment_bundles_for_connectivity.py.\n out_dir Path of the output directory.\n\noptions:\n -h, --help show this help message and exit\n --binary Binarize density maps before the population average.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "order", + "order" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "attention", + "experience", + "long", + "result", + "work", + "much" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "long", + "too" + ], + [ + "methods", + "use" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "average", + "average" + ], + [ + "population", + "population" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "left", + "after" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "left", + "before" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "large", + "work", + "many" + ], + [ + "individual", + "each" + ], + [ + "connect", + "connected", + "connection", + "connections", + "connections" + ], + [ + "order", + "allow" + ], + [ + "similarity", + "similarity" + ], + [ + "step", + "start" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "connection", + "connections", + "connection" + ], + [ + "maps", + "maps" + ], + [ + "maps", + "map" + ] + ], "keywords": [] }, { "name": "scil_connectivity_math", "docstring": "Performs an operation on a list of matrices. The supported operations are\nlisted below.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of matrices.\n> scil_connectivity_math.py multiplication mat.npy 10 mult_10.npy", "help": "usage: scil_connectivity_math.py [-h] [--data_type DATA_TYPE]\n [--exclude_background]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference}\n in_matrices [in_matrices ...] out_matrix\n\nPerforms an operation on a list of matrices. The supported operations are\nlisted below.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of matrices.\n> scil_connectivity_math.py multiplication mat.npy 10 mult_10.npy\n\n lower_threshold: MAT THRESHOLD\n All values below the threshold will be set to zero.\n All values above the threshold will be set to one.\n \n upper_threshold: MAT THRESHOLD\n All values below the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_eq: MAT THRESHOLD\n All values below the threshold will be set to zero.\n All values above or equal the threshold will be set to one.\n \n upper_threshold_eq: MAT THRESHOLD\n All values below or equal the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_otsu: MAT\n All values below or equal to the Otsu threshold will be set to zero.\n All values above the Otsu threshold will be set to one.\n (Otsu's method is an algorithm to perform automatic matrix thresholding\n of the background.)\n \n upper_threshold_otsu: MAT\n All values below the Otsu threshold will be set to one.\n All values above or equal to the Otsu threshold will be set to zero.\n Equivalent to lower_threshold_otsu followed by an inversion.\n \n lower_clip: MAT THRESHOLD\n All values below the threshold will be set to threshold.\n \n upper_clip: MAT THRESHOLD\n All values above the threshold will be set to threshold.\n \n absolute_value: MAT\n All negative values will become positive.\n \n round: MAT\n Round all decimal values to the closest integer.\n \n ceil: MAT\n Ceil all decimal values to the next integer.\n \n floor: MAT\n Floor all decimal values to the previous integer.\n \n normalize_sum: MAT\n Normalize the matrix so the sum of all values is one.\n \n normalize_max: MAT\n Normalize the matrix so the maximum value is one.\n \n log_10: MAT\n Apply a log (base 10) to all non zeros values of an matrix.\n \n log_e: MAT\n Apply a natural log to all non zeros values of an matrix.\n \n convert: MAT\n Perform no operation, but simply change the data type.\n \n invert: MAT\n Operation on binary matrix to interchange 0s and 1s in a binary mask.\n \n addition: MATs\n Add multiple matrices together.\n \n subtraction: MAT_1 MAT_2\n Subtract first matrix by the second (MAT_1 - MAT_2).\n \n multiplication: MATs\n Multiply multiple matrices together (danger of underflow and overflow)\n \n division: MAT_1 MAT_2\n Divide first matrix by the second (danger of underflow and overflow)\n Ignore zeros values, excluded from the operation.\n \n mean: MATs\n Compute the mean of matrices.\n If a single 4D matrix is provided, average along the last dimension.\n \n std: MATs\n Compute the standard deviation average of multiple matrices.\n If a single 4D matrix is provided, compute the STD along the last\n dimension.\n \n correlation: MATs\n Computes the correlation of the 3x3x3 neighborhood of each voxel, for\n all pair of input matrices. The final matrix is the average correlation\n (through all pairs).\n For a given pair of matrices\n - Background is considered as 0. May lead to very high correlations\n close to the border of the background regions, or very poor ones if the\n background in both matrices differ.\n - Images are zero-padded. For the same reason as higher, may lead to\n very high correlations if you have data close to the border of the\n matrix.\n - NaN values (if a voxel's neighborhood is entirely uniform; std 0) are\n replaced by\n - 0 if at least one neighborhood was entirely containing background.\n - 1 if the voxel's neighborhoods are uniform in both matrices\n - 0 if the voxel's neighborhoods is uniform in one matrix, but not\n the other.\n\n UPDATE AS OF VERSION 2.0: Random noise was previously added in the\n process to help avoid NaN values. Now replaced by either 0 or 1 as\n explained above.\n \n union: MATs\n Operation on binary matrix to keep voxels, that are non-zero, in at\n least one file.\n \n intersection: MATs\n Operation on binary matrix to keep the voxels, that are non-zero,\n are present in all files.\n \n difference: MAT_1 MAT_2\n Operation on binary matrix to keep voxels from the first file that are\n not in the second file (non-zeros).\n \n\npositional arguments:\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference}\n The type of operation to be performed on the matrices.\n in_matrices The list of matrices files or parameters.\n out_matrix Output matrix path.\n\noptions:\n -h, --help show this help message and exit\n --data_type DATA_TYPE\n Data type of the output image. Use the format: uint8, float16, int32.\n --exclude_background Does not affect the background of the original matrices.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "positive", + "negative" + ], + [ + "subsequently", + "previously" + ], + [ + "methods", + "method" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "clear", + "considered", + "highly", + "long", + "matter", + "true", + "work", + "very" + ], + [ + "working", + "together" + ], + [ + "long", + "work", + "working", + "now" + ], + [ + "possibility", + "danger" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "areas", + "region", + "regions", + "regions" + ], + [ + "positive", + "positive" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "long", + "have" + ], + [ + "thinking", + "you" + ], + [ + "areas", + "neighborhoods" + ], + [ + "high", + "higher", + "increase", + "level", + "higher" + ], + [ + "long", + "a" + ], + [ + "action", + "clear", + "future", + "result", + "step", + "change" + ], + [ + "voxel", + "voxel" + ], + [ + "matrices", + "matrices" + ], + [ + "held", + "on" + ], + [ + "applied", + "apply" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "lack", + "matter", + "possibility", + "question", + "result", + "thinking", + "true", + "reason" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "possibility", + "avoid" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "result", + "followed" + ], + [ + "random", + "random" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "last" + ], + [ + "held", + "in" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "average", + "average" + ], + [ + "clear", + "long", + "matter", + "result", + "work", + "so" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "difference", + "difference" + ], + [ + "algorithm", + "algorithm" + ], + [ + "binary", + "binary" + ], + [ + "thinking", + "simply" + ], + [ + "variety", + "work", + "other" + ], + [ + "methods", + "using" + ], + [ + "order", + "work", + "instead" + ], + [ + "process", + "processes", + "step", + "process" + ], + [ + "area", + "main", + "along" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "supported", + "supported" + ], + [ + "matrices", + "matrix" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "result", + "may" + ], + [ + "considered", + "subsequently", + "was" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "individual", + "each" + ], + [ + "matter", + "question", + "does" + ], + [ + "level", + "above" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "large", + "larger", + "variety", + "work", + "addition" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "future", + "held", + "step", + "next" + ], + [ + "area", + "neighborhood" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "large", + "work", + "some" + ], + [ + "considered", + "become" + ], + [ + "considered", + "specific", + "variety", + "such" + ] + ], "keywords": [] }, { "name": "scil_connectivity_normalize", "docstring": "Normalize a connectivity matrix coming from\nscil_tractogram_segment_bundles_for_connectivity.py.\n3 categories of normalization are available:\n-- Edge attributes\n - length: Multiply each edge by the average bundle length.\n Compensate for far away connections when using interface seeding.\n Cannot be used with inverse_length.\n\n - inverse_length: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n Cannot be used with length.\n\n - bundle_volume: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n\n-- Node attributes (Mutually exclusive)\n - parcel_volume: Divide each edge by the sum of node volume.\n Compensate for the likelihood of ending in the node.\n Compensate seeding bias when using interface seeding.\n\n - parcel_surface: Divide each edge by the sum of the node surface.\n Compensate for the likelihood of ending in the node.\n Compensate for seeding bias when using interface seeding.\n\n-- Matrix scaling (Mutually exclusive)\n - max_at_one: Maximum value of the matrix will be set to one.\n - sum_to_one: Ensure the sum of all edges weight is one\n - log_10: Apply a base 10 logarithm to all edges weight\n\nThe volume and length matrix should come from the\nscil_tractogram_segment_bundles_for_connectivity.py script.\n\nA review of the type of normalization is available in:\nColon-Perez, Luis M., et al. \"Dimensionless, scale-invariant, edge weight\nmetric for the study of complex structural networks.\" PLOS one 10.7 (2015).\n\nHowever, the proposed weighting of edge presented in this publication is not\nimplemented.\n\nFormerly: scil_normalize_connectivity.py", "help": "usage: scil_connectivity_normalize.py [-h]\n [--length LENGTH_MATRIX | --inverse_length LENGTH_MATRIX]\n [--bundle_volume VOLUME_MATRIX]\n [--parcel_volume ATLAS LABELS_LIST | --parcel_surface ATLAS LABELS_LIST]\n [--max_at_one | --sum_to_one | --log_10]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_matrix out_matrix\n\nNormalize a connectivity matrix coming from\nscil_tractogram_segment_bundles_for_connectivity.py.\n3 categories of normalization are available:\n-- Edge attributes\n - length: Multiply each edge by the average bundle length.\n Compensate for far away connections when using interface seeding.\n Cannot be used with inverse_length.\n\n - inverse_length: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n Cannot be used with length.\n\n - bundle_volume: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n\n-- Node attributes (Mutually exclusive)\n - parcel_volume: Divide each edge by the sum of node volume.\n Compensate for the likelihood of ending in the node.\n Compensate seeding bias when using interface seeding.\n\n - parcel_surface: Divide each edge by the sum of the node surface.\n Compensate for the likelihood of ending in the node.\n Compensate for seeding bias when using interface seeding.\n\n-- Matrix scaling (Mutually exclusive)\n - max_at_one: Maximum value of the matrix will be set to one.\n - sum_to_one: Ensure the sum of all edges weight is one\n - log_10: Apply a base 10 logarithm to all edges weight\n\nThe volume and length matrix should come from the\nscil_tractogram_segment_bundles_for_connectivity.py script.\n\nA review of the type of normalization is available in:\nColon-Perez, Luis M., et al. \"Dimensionless, scale-invariant, edge weight\nmetric for the study of complex structural networks.\" PLOS one 10.7 (2015).\n\nHowever, the proposed weighting of edge presented in this publication is not\nimplemented.\n\nFormerly: scil_normalize_connectivity.py\n\npositional arguments:\n in_matrix Input connectivity matrix. This is typically a streamline_count matrix (.npy).\n out_matrix Output normalized matrix (.npy).\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nEdge-wise options:\n --length LENGTH_MATRIX\n Length matrix used for edge-wise multiplication.\n --inverse_length LENGTH_MATRIX\n Length matrix used for edge-wise division.\n --bundle_volume VOLUME_MATRIX\n Volume matrix used for edge-wise division.\n --parcel_volume ATLAS LABELS_LIST\n Atlas and labels list for edge-wise division.\n --parcel_surface ATLAS LABELS_LIST\n Atlas and labels list for edge-wise division.\n\nScaling options:\n --max_at_one Scale matrix with maximum value at one.\n --sum_to_one Scale matrix with sum of all elements at one.\n --log_10 Apply a base 10 logarithm to the matrix.\n", - "synonyms": [], + "synonyms": [ + [ + "volume", + "volumes", + "volume" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "seeding", + "seeding" + ], + [ + "order", + "set" + ], + [ + "clear", + "considered", + "long", + "result", + "far" + ], + [ + "analysis", + "clinical", + "scientific", + "studies", + "study", + "study" + ], + [ + "long", + "a" + ], + [ + "applied", + "apply" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "step", + "work", + "come" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "presented", + "presented" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "proposed", + "proposed" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "atlas", + "atlas" + ], + [ + "average", + "average" + ], + [ + "network", + "networks", + "networks" + ], + [ + "structural", + "structural" + ], + [ + "methods", + "using" + ], + [ + "matrices", + "matrix" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "large", + "big" + ], + [ + "individual", + "each" + ], + [ + "connect", + "connected", + "connection", + "connections", + "connections" + ], + [ + "left", + "away" + ], + [ + "complex", + "structure", + "structures", + "complex" + ], + [ + "connectivity", + "connectivity" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "possibility", + "likelihood" + ], + [ + "bundles", + "bundle" + ], + [ + "clear", + "considered", + "result", + "however" + ], + [ + "exist", + "cannot" + ] + ], "keywords": [] }, { "name": "scil_connectivity_pairwise_agreement", "docstring": "Evaluate pair-wise similarity measures of connectivity matrix.\n\nThe computed similarity measures are:\nsum of square difference and pearson correlation coefficent\n\nFormerly: scil_evaluate_connectivity_pairwaise_agreement_measures.py", "help": "usage: scil_connectivity_pairwise_agreement.py [-h] [--single_compare matrix]\n [--normalize] [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_matrices [in_matrices ...]\n out_json\n\nEvaluate pair-wise similarity measures of connectivity matrix.\n\nThe computed similarity measures are:\nsum of square difference and pearson correlation coefficent\n\nFormerly: scil_evaluate_connectivity_pairwaise_agreement_measures.py\n\npositional arguments:\n in_matrices Path of the input matricies.\n out_json Path of the output json file.\n\noptions:\n -h, --help show this help message and exit\n --single_compare matrix\n Compare inputs to this single file.\n (Else, compute all pairs in in_matrices).\n --normalize If set, will normalize all matrices from zero to one.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "considered", + "are" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "order", + "set" + ], + [ + "matrices", + "matrices" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "difference", + "difference" + ], + [ + "methods", + "using" + ], + [ + "matrices", + "matrix" + ], + [ + "matter", + "thinking", + "else" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "similarity", + "similarity" + ], + [ + "connectivity", + "connectivity" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "examine", + "evaluate" + ] + ], "keywords": [] }, { "name": "scil_connectivity_print_filenames", "docstring": "Output the list of filenames using the coordinates from a binary connectivity\nmatrix. Typically used to move around files that are considered valid after\nthe scil_connectivity_filter.py script.\n\nExample:\n# Keep connections with more than 1000 streamlines for 100% of a population\nscil_connectivity_filter.py filtering_mask.npy\n --greater_than */streamlines_count.npy 1000 1.0\nscil_connectivity_print_filenames.py filtering_mask.npy\n labels_list.txt pass.txt\nfor file in $(cat pass.txt);\n do mv ${SOMEWHERE}/${FILE} ${SOMEWHERE_ELSE}/;\ndone\n\nFormerly: scil_print_connectivity_filenames.py", "help": "usage: scil_connectivity_print_filenames.py [-h] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_matrix labels_list out_txt\n\nOutput the list of filenames using the coordinates from a binary connectivity\nmatrix. Typically used to move around files that are considered valid after\nthe scil_connectivity_filter.py script.\n\nExample:\n# Keep connections with more than 1000 streamlines for 100% of a population\nscil_connectivity_filter.py filtering_mask.npy\n --greater_than */streamlines_count.npy 1000 1.0\nscil_connectivity_print_filenames.py filtering_mask.npy\n labels_list.txt pass.txt\nfor file in $(cat pass.txt);\n do mv ${SOMEWHERE}/${FILE} ${SOMEWHERE_ELSE}/;\ndone\n\nFormerly: scil_print_connectivity_filenames.py\n\npositional arguments:\n in_matrix Binary matrix in numpy (.npy) format.\n Typically from scil_connectivity_filter.py\n labels_list List saved by the decomposition script.\n out_txt Output text file containing all filenames.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "long", + "work", + "more" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "invalid", + "valid", + "valid" + ], + [ + "long", + "a" + ], + [ + "action", + "clear", + "step", + "move" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "population", + "population" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "left", + "after" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "matrices", + "matrix" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "pass", + "pass" + ], + [ + "connect", + "connected", + "connection", + "connections", + "connections" + ], + [ + "work", + "working", + "done" + ], + [ + "connectivity", + "connectivity" + ], + [ + "work", + "all" + ], + [ + "total", + "100" + ] + ], "keywords": [] }, { "name": "scil_connectivity_reorder_rois", "docstring": "Re-order one or many connectivity matrices using a text file format.\nThe first row are the (x) and the second row the (y), must be space separated.\nThe resulting matrix does not have to be square (support unequal number of\nx and y).\n\nThe values refer to the coordinates (starting at 0) in the matrix, but if the\n--labels_list parameter is used, the values will refer to the label which will\nbe converted to the appropriate coordinates. This file must be the same as the\none provided to the scil_tractogram_segment_bundles_for_connectivity.py.\n\nTo subsequently use scil_visualize_connectivity.py with a lookup table, you\nmust use a label-based reording json and use --labels_list.\n\nYou can also use the Optimal Leaf Ordering(OLO) algorithm to transform a\nsparse matrix into an ordering that reduces the matrix bandwidth. The output\nfile can then be re-used with --in_ordering. Only one input can be used with\nthis option, we recommand an average streamline count or volume matrix.\n\nFormerly: scil_reorder_connectivity.py", "help": "usage: scil_connectivity_reorder_rois.py [-h]\n (--in_ordering IN_ORDERING | --optimal_leaf_ordering OUT_FILE)\n [--out_suffix OUT_SUFFIX]\n [--out_dir OUT_DIR]\n [--labels_list LABELS_LIST]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_matrices [in_matrices ...]\n\nRe-order one or many connectivity matrices using a text file format.\nThe first row are the (x) and the second row the (y), must be space separated.\nThe resulting matrix does not have to be square (support unequal number of\nx and y).\n\nThe values refer to the coordinates (starting at 0) in the matrix, but if the\n--labels_list parameter is used, the values will refer to the label which will\nbe converted to the appropriate coordinates. This file must be the same as the\none provided to the scil_tractogram_segment_bundles_for_connectivity.py.\n\nTo subsequently use scil_visualize_connectivity.py with a lookup table, you\nmust use a label-based reording json and use --labels_list.\n\nYou can also use the Optimal Leaf Ordering(OLO) algorithm to transform a\nsparse matrix into an ordering that reduces the matrix bandwidth. The output\nfile can then be re-used with --in_ordering. Only one input can be used with\nthis option, we recommand an average streamline count or volume matrix.\n\nFormerly: scil_reorder_connectivity.py\n\npositional arguments:\n in_matrices Connectivity matrices in .npy or .txt format.\n\noptions:\n -h, --help show this help message and exit\n --in_ordering IN_ORDERING\n Txt file with the first row as x and second as y.\n --optimal_leaf_ordering OUT_FILE\n Output a text file with an ordering that alignsstructures along the diagonal.\n --out_suffix OUT_SUFFIX\n Suffix for the output matrix filename.\n --out_dir OUT_DIR Output directory for the re-ordered matrices.\n --labels_list LABELS_LIST\n List saved by the decomposition script,\n --in_ordering must contain labels rather than coordinates (.txt).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Rubinov, Mikail, and Olaf Sporns. \"Complex network measures of brain\n connectivity: uses and interpretations.\" Neuroimage 52.3 (2010):\n 1059-1069.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "considered", + "are" + ], + [ + "order", + "order" + ], + [ + "left", + "into" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "parameter", + "parameters", + "parameter" + ], + [ + "clear", + "considered", + "create", + "form", + "manner", + "matter", + "result", + "subject", + "thinking", + "true", + "view", + "work", + "rather" + ], + [ + "methods", + "use" + ], + [ + "long", + "have" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "meaning", + "refer" + ], + [ + "long", + "a" + ], + [ + "matrices", + "matrices" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "space", + "space" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "average", + "average" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "algorithm", + "algorithm" + ], + [ + "methods", + "using" + ], + [ + "area", + "main", + "along" + ], + [ + "long", + "than" + ], + [ + "subsequently", + "subsequently" + ], + [ + "matrices", + "matrix" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "result", + "resulting" + ], + [ + "considered", + "is" + ], + [ + "large", + "work", + "many" + ], + [ + "matter", + "question", + "does" + ], + [ + "streamline", + "streamline" + ], + [ + "manner", + "specific", + "appropriate" + ], + [ + "complex", + "structure", + "structures", + "complex" + ], + [ + "connectivity", + "connectivity" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "based", + "based" + ], + [ + "network", + "networks", + "network" + ] + ], "keywords": [] }, { "name": "scil_denoising_nlmeans", "docstring": "Script to denoise a dataset with the Non Local Means algorithm.\n\nFormerly: scil_run_nlmeans.py", "help": "usage: scil_denoising_nlmeans.py [-h] [--mask] [--sigma float] [--log LOGFILE]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_image out_image number_coils\n\nScript to denoise a dataset with the Non Local Means algorithm.\n\nFormerly: scil_run_nlmeans.py\n\npositional arguments:\n in_image Path of the image file to denoise.\n out_image Path to save the denoised image file.\n number_coils Number of receiver coils of the scanner.\n Use number_coils=1 in the case of a SENSE (GE, Philips) reconstruction and \n number_coils >= 1 for GRAPPA reconstruction (Siemens). number_coils=4 works well for the 1.5T\n in Sherbrooke. Use number_coils=0 if the noise is considered Gaussian distributed.\n\noptions:\n -h, --help show this help message and exit\n --mask Path to a binary mask. Only the data inside the mask will be used for computations\n --sigma float The standard deviation of the noise to use instead of computing it automatically.\n --log LOGFILE If supplied, name of the text file to store the logs.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "experience", + "perception", + "thinking", + "true", + "sense" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "algorithm", + "algorithm" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "order", + "work", + "instead" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "work", + "works" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "step", + "start" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "matter", + "question", + "case" + ], + [ + "considered", + "experience", + "large", + "long", + "result", + "variety", + "work", + "working", + "well" + ], + [ + "meaning", + "order", + "result", + "step", + "true", + "means" + ] + ], "keywords": [] }, { "name": "scil_dki_metrics", "docstring": "Script to compute the Diffusion Kurtosis Imaging (DKI) and Mean Signal DKI\n(MSDKI) metrics. DKI is a multi-shell diffusion model. The input DWI needs\nto be multi-shell, i.e. multi-bvalued.\n\nSince the diffusion kurtosis model involves the estimation of a large number\nof parameters and since the non-Gaussian components of the diffusion signal\nare more sensitive to artefacts, you should really denoise your DWI volume\nbefore using this DKI script (e.g. scil_denoising_nlmeans.py). Moreover, to\nremove biases due to fiber dispersion, fiber crossings and other mesoscopic\nproperties of the underlying tissue, MSDKI does a powder-average of DWI for all\ndirections, thus removing the orientational dependencies and creating an\nalternative mean kurtosis map.\n\nDKI is also known to be vulnerable to artefacted voxels induced by the\nlow radial diffusivities of aligned white matter (CC, CST voxels). Since it is\nvery hard to capture non-Gaussian information due to the low decays in radial\ndirection, its kurtosis estimates have very low robustness.\nNoisy kurtosis estimates tend to be negative and its absolute values can have\norder of magnitudes higher than the typical kurtosis values. Consequently,\nthese negative kurtosis values will heavily propagate to the mean and radial\nkurtosis metrics. This is well-reported in [Rafael Henriques MSc thesis 2012,\nchapter 3]. Two ways to overcome this issue: i) compute the kurtosis values\nfrom powder-averaged MSDKI, and ii) perform 3D Gaussian smoothing. On\npowder-averaged signal decays, you don't have this low diffusivity issue and\nyour kurtosis estimates have much higher precision (additionally they are\nindependent to the fODF).\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section. If --not_all is set, only the metrics specified\nexplicitly by the flags will be output.\n\nThis script directly comes from the DIPY example gallery and references\ntherein.\n[1] examples_built/reconst_dki/#example-reconst-dki\n[2] examples_built/reconst_msdki/#example-reconst-msdki\n\nFormerly: scil_compute_kurtosis_metrics.py", "help": "usage: scil_dki_metrics.py [-h] [--mask MASK] [--tolerance tol]\n [--skip_b0_check] [--min_k MIN_K] [--max_k MAX_K]\n [--smooth SMOOTH] [--not_all] [--ak file]\n [--mk file] [--rk file] [--msk file]\n [--dki_fa file] [--dki_md file] [--dki_ad file]\n [--dki_rd file] [--dki_residual file] [--msd file]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec\n\nScript to compute the Diffusion Kurtosis Imaging (DKI) and Mean Signal DKI\n(MSDKI) metrics. DKI is a multi-shell diffusion model. The input DWI needs\nto be multi-shell, i.e. multi-bvalued.\n\nSince the diffusion kurtosis model involves the estimation of a large number\nof parameters and since the non-Gaussian components of the diffusion signal\nare more sensitive to artefacts, you should really denoise your DWI volume\nbefore using this DKI script (e.g. scil_denoising_nlmeans.py). Moreover, to\nremove biases due to fiber dispersion, fiber crossings and other mesoscopic\nproperties of the underlying tissue, MSDKI does a powder-average of DWI for all\ndirections, thus removing the orientational dependencies and creating an\nalternative mean kurtosis map.\n\nDKI is also known to be vulnerable to artefacted voxels induced by the\nlow radial diffusivities of aligned white matter (CC, CST voxels). Since it is\nvery hard to capture non-Gaussian information due to the low decays in radial\ndirection, its kurtosis estimates have very low robustness.\nNoisy kurtosis estimates tend to be negative and its absolute values can have\norder of magnitudes higher than the typical kurtosis values. Consequently,\nthese negative kurtosis values will heavily propagate to the mean and radial\nkurtosis metrics. This is well-reported in [Rafael Henriques MSc thesis 2012,\nchapter 3]. Two ways to overcome this issue: i) compute the kurtosis values\nfrom powder-averaged MSDKI, and ii) perform 3D Gaussian smoothing. On\npowder-averaged signal decays, you don't have this low diffusivity issue and\nyour kurtosis estimates have much higher precision (additionally they are\nindependent to the fODF).\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section. If --not_all is set, only the metrics specified\nexplicitly by the flags will be output.\n\nThis script directly comes from the DIPY example gallery and references\ntherein.\n[1] examples_built/reconst_dki/#example-reconst-dki\n[2] examples_built/reconst_msdki/#example-reconst-msdki\n\nFormerly: scil_compute_kurtosis_metrics.py\n\npositional arguments:\n in_dwi Path of the input multi-shell DWI dataset.\n in_bval Path of the b-value file, in FSL format.\n in_bvec Path of the b-vector file, in FSL format.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computations and reconstruction.\n [Default: None]\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --min_k MIN_K Minimum kurtosis value in the output maps \n (ak, mk, rk). In theory, -3/7 is the min kurtosis \n limit for regions that consist of water confined \n to spherical pores (see DIPY example and \n documentation) [Default: 0.0].\n --max_k MAX_K Maximum kurtosis value in the output maps \n (ak, mk, rk). In theory, 10 is the max kurtosis\n limit for regions that consist of water confined\n to spherical pores (see DIPY example and \n documentation) [Default: 3.0].\n --smooth SMOOTH Smooth input DWI with a 3D Gaussian filter with \n full-width-half-max (fwhm). Kurtosis fitting is \n sensitive and outliers occur easily. According to\n tests on HCP, CB_Brain, Penthera3T, this smoothing\n is thus turned ON by default with fwhm=2.5. \n [Default: 2.5].\n --not_all If set, will only save the metrics explicitly \n specified using the other metrics flags. \n [Default: not set].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics files flags:\n --ak file Output filename for the axial kurtosis.\n --mk file Output filename for the mean kurtosis.\n --rk file Output filename for the radial kurtosis.\n --msk file Output filename for the mean signal kurtosis.\n --dki_fa file Output filename for the fractional anisotropy from DKI.\n --dki_md file Output filename for the mean diffusivity from DKI.\n --dki_ad file Output filename for the axial diffusivity from DKI.\n --dki_rd file Output filename for the radial diffusivity from DKI.\n\nQuality control files flags:\n --dki_residual file Output filename for the map of the residual of the tensor fit.\n Note. In previous versions, the resulting map was normalized. \n It is not anymore.\n --msd file Output filename for the mean signal diffusion (powder-average).\n", - "synonyms": [], + "synonyms": [ + [ + "parameter", + "vector" + ], + [ + "positive", + "negative" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "reported", + "reported" + ], + [ + "long", + "work", + "more" + ], + [ + "fibre", + "fiber" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "order", + "set" + ], + [ + "long", + "have" + ], + [ + "specific", + "specific" + ], + [ + "direction", + "directions" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "diffusion", + "diffusion" + ], + [ + "contrast", + "typical" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "exist", + "exists" + ], + [ + "long", + "than" + ], + [ + "imaging", + "imaging" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "considered", + "form", + "meaning", + "order", + "result", + "thus" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "attention", + "comes" + ], + [ + "maps", + "map" + ], + [ + "fundamental", + "underlying" + ], + [ + "long", + "full" + ], + [ + "work", + "also" + ], + [ + "considered", + "known" + ], + [ + "precision", + "precision" + ], + [ + "result", + "moreover" + ], + [ + "left", + "from" + ], + [ + "create", + "creating" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "create", + "ways" + ], + [ + "thinking", + "you" + ], + [ + "limiting", + "limit" + ], + [ + "long", + "a" + ], + [ + "future", + "will" + ], + [ + "parameters", + "specified" + ], + [ + "average", + "average" + ], + [ + "view", + "see" + ], + [ + "signal", + "signal" + ], + [ + "result", + "resulting" + ], + [ + "considered", + "subsequently", + "was" + ], + [ + "matter", + "question", + "does" + ], + [ + "comprised", + "comprising", + "consist" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "lack", + "quality" + ], + [ + "thinking", + "really" + ], + [ + "lack", + "result", + "due" + ], + [ + "brain", + "tissue" + ], + [ + "considered", + "are" + ], + [ + "attention", + "experience", + "long", + "result", + "work", + "much" + ], + [ + "areas", + "region", + "regions", + "regions" + ], + [ + "work", + "find" + ], + [ + "left", + "half" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "thinking", + "i" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "methods", + "using" + ], + [ + "variety", + "work", + "other" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "left", + "before" + ], + [ + "high", + "higher", + "level", + "low" + ], + [ + "matter", + "question", + "subject", + "issue" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "long", + "two" + ], + [ + "based", + "reported", + "according" + ], + [ + "total", + "number" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "matter", + "question", + "thinking", + "understand" + ], + [ + "work", + "and" + ], + [ + "clear", + "considered", + "highly", + "long", + "matter", + "true", + "work", + "very" + ], + [ + "order", + "order" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "direction", + "direction" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "high", + "higher", + "increase", + "level", + "higher" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "large", + "larger", + "large" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "axial", + "axial" + ], + [ + "binary", + "binary" + ], + [ + "future", + "current" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "order", + "allow" + ], + [ + "maps", + "maps" + ], + [ + "systems", + "components" + ], + [ + "considered", + "experience", + "large", + "long", + "result", + "variety", + "work", + "working", + "well" + ], + [ + "result", + "since" + ], + [ + "left", + "turned" + ] + ], "keywords": [] }, { "name": "scil_dti_convert_tensors", "docstring": "Conversion of tensors (the 6 values from the triangular matrix) between various\nsoftware standards. We cannot discover the input format type, user must know\nhow the tensors were created.", "help": "usage: scil_dti_convert_tensors.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file out_file in_format out_format\n\nConversion of tensors (the 6 values from the triangular matrix) between various\nsoftware standards. We cannot discover the input format type, user must know\nhow the tensors were created.\n\n Dipy's order is [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://github.com/dipy/dipy/blob/master/dipy/reconst/dti.py#L1639\n \n MRTRIX's order is : [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz]\n Shape: [i, j , k, 6].\n Ref: https://mrtrix.readthedocs.io/en/dev/reference/commands/dwi2tensor.html\n \n ANTS's order ('nifti format') is : [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz].\n Shape: [i, j , k, 1, 6] (Careful, file is 5D).\n Ref: https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software\n \n FSL's order is [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide\n (Also used for the Fibernavigator)\n \n\npositional arguments:\n in_file Input tensors filename.\n out_file Output tensors filename.\n in_format Input format. Choices: ['fsl', 'nifti', 'mrtrix', 'dipy']\n out_format Output format. Choices: ['fsl', 'nifti', 'mrtrix', 'dipy']\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "application", + "database", + "user" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "order", + "order" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "matter", + "question", + "thinking", + "true", + "know" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "application", + "systems", + "software" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "specific", + "variety", + "various" + ], + [ + "thinking", + "i" + ], + [ + "diffusion", + "diffusion" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "matter", + "question", + "thinking", + "true", + "work", + "working", + "how" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "methods", + "using" + ], + [ + "variety", + "work", + "other" + ], + [ + "matrices", + "matrix" + ], + [ + "considered", + "is" + ], + [ + "create", + "created" + ], + [ + "shape", + "structure", + "shape" + ], + [ + "exist", + "cannot" + ] + ], "keywords": [] }, { "name": "scil_dti_metrics", "docstring": "Script to compute all of the Diffusion Tensor Imaging (DTI) metrics.\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section.\n\nIf --not_all is set, only the metrics specified explicitly by the flags\nwill be output. The available metrics are:\n\nfractional anisotropy (FA), geodesic anisotropy (GA), axial diffusivisty (AD),\nradial diffusivity (RD), mean diffusivity (MD), mode, red-green-blue colored\nFA (rgb), principal tensor e-vector and tensor coefficients (dxx, dxy, dxz,\ndyy, dyz, dzz).\n\nFor all the quality control metrics such as residual, physically implausible\nsignals, pulsation and misalignment artifacts, see\n[J-D Tournier, S. Mori, A. Leemans. Diffusion Tensor Imaging and Beyond.\nMRM 2011].\n\nFormerly: scil_compute_dti_metrics.py", "help": "usage: scil_dti_metrics.py [-h] [-f] [--mask MASK] [--method method_name]\n [--not_all] [--ad file] [--evecs file]\n [--evals file] [--fa file] [--ga file] [--md file]\n [--mode file] [--norm file] [--rgb file]\n [--rd file] [--tensor file]\n [--tensor_format {fsl,nifti,mrtrix,dipy}]\n [--non-physical file] [--pulsation string]\n [--residual file] [--b0_threshold thr]\n [--skip_b0_check] [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval in_bvec\n\nScript to compute all of the Diffusion Tensor Imaging (DTI) metrics.\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section.\n\nIf --not_all is set, only the metrics specified explicitly by the flags\nwill be output. The available metrics are:\n\nfractional anisotropy (FA), geodesic anisotropy (GA), axial diffusivisty (AD),\nradial diffusivity (RD), mean diffusivity (MD), mode, red-green-blue colored\nFA (rgb), principal tensor e-vector and tensor coefficients (dxx, dxy, dxz,\ndyy, dyz, dzz).\n\nFor all the quality control metrics such as residual, physically implausible\nsignals, pulsation and misalignment artifacts, see\n[J-D Tournier, S. Mori, A. Leemans. Diffusion Tensor Imaging and Beyond.\nMRM 2011].\n\nFormerly: scil_compute_dti_metrics.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n\noptions:\n -h, --help show this help message and exit\n -f Force overwriting of the output files.\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computations and reconstruction. (Default: None)\n --method method_name Tensor fit method.\n WLS for weighted least squares\n LS for ordinary least squares\n NLLS for non-linear least-squares\n restore for RESTORE robust tensor fitting. (Default: WLS)\n --not_all If set, will only save the metrics explicitly specified using the other metrics flags. (Default: not set).\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nMetrics files flags:\n --ad file Output filename for the axial diffusivity.\n --evecs file Output filename for the eigenvectors of the tensor.\n --evals file Output filename for the eigenvalues of the tensor.\n --fa file Output filename for the fractional anisotropy.\n --ga file Output filename for the geodesic anisotropy.\n --md file Output filename for the mean diffusivity.\n --mode file Output filename for the mode.\n --norm file Output filename for the tensor norm.\n --rgb file Output filename for the colored fractional anisotropy.\n --rd file Output filename for the radial diffusivity.\n --tensor file Output filename for the tensor coefficients.\n --tensor_format {fsl,nifti,mrtrix,dipy}\n Format used for the tensors saved in --tensor file.(default: fsl)\n \n Dipy's order is [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://github.com/dipy/dipy/blob/master/dipy/reconst/dti.py#L1639\n \n MRTRIX's order is : [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz]\n Shape: [i, j , k, 6].\n Ref: https://mrtrix.readthedocs.io/en/dev/reference/commands/dwi2tensor.html\n \n ANTS's order ('nifti format') is : [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz].\n Shape: [i, j , k, 1, 6] (Careful, file is 5D).\n Ref: https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software\n \n FSL's order is [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide\n (Also used for the Fibernavigator)\n \n\nQuality control files flags:\n --non-physical file Output filename for the voxels with physically implausible signals \n where the mean of b=0 images is below one or more diffusion-weighted images.\n --pulsation string Standard deviation map across all diffusion-weighted images and across b=0 images if more than one is available.\n Shows pulsation and misalignment artifacts.\n --residual file Output filename for the map of the residual of the tensor fit.\n", - "synonyms": [], + "synonyms": [ + [ + "parameter", + "vector" + ], + [ + "step", + "continue" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "methods", + "method" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "matter", + "question", + "thinking", + "understand" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "considered", + "are" + ], + [ + "order", + "order" + ], + [ + "long", + "work", + "more" + ], + [ + "blue", + "dark", + "green", + "red", + "white", + "blue" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "methods", + "use" + ], + [ + "work", + "find" + ], + [ + "order", + "set" + ], + [ + "specific", + "specific" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "application", + "systems", + "software" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "blue", + "green", + "red", + "white", + "red" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "blue", + "colored" + ], + [ + "blue", + "green", + "red", + "white", + "green" + ], + [ + "long", + "with" + ], + [ + "parameters", + "specified" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "axial", + "axial" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "variety", + "work", + "other" + ], + [ + "view", + "see" + ], + [ + "exist", + "exists" + ], + [ + "long", + "than" + ], + [ + "signal", + "signals" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "imaging", + "imaging" + ], + [ + "areas", + "across" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "area", + "work", + "where" + ], + [ + "lack", + "minimal" + ], + [ + "weighted", + "weighted" + ], + [ + "order", + "allow" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "lack", + "quality" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "maps", + "map" + ], + [ + "principal", + "principal" + ], + [ + "shape", + "structure", + "shape" + ], + [ + "considered", + "specific", + "variety", + "such" + ] + ], "keywords": [] }, { "name": "scil_dwi_apply_bias_field", "docstring": "Apply bias field correction to DWI. This script doesn't compute the bias\nfield itself. It ONLY applies an existing bias field. Please use the ANTs\nN4BiasFieldCorrection executable to compute the bias field.\n\nFormerly: scil_apply_bias_field_on_dwi.py", "help": "usage: scil_dwi_apply_bias_field.py [-h] [--mask MASK]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bias_field out_name\n\nApply bias field correction to DWI. This script doesn't compute the bias\nfield itself. It ONLY applies an existing bias field. Please use the ANTs\nN4BiasFieldCorrection executable to compute the bias field.\n\nFormerly: scil_apply_bias_field_on_dwi.py\n\npositional arguments:\n in_dwi DWI Nifti image.\n in_bias_field Bias field Nifti image.\n out_name Corrected DWI Nifti image.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Apply bias field correction only in the region defined by the mask.\n If this is not given, the bias field is still only applied only in non-background data \n (i.e. where the dwi is not 0).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "applied", + "applied" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "applied", + "apply" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "thinking", + "i" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "methods", + "using" + ], + [ + "defined", + "function", + "defined" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "area", + "work", + "where" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "clear", + "long", + "work", + "still" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "area", + "areas", + "region", + "regions", + "region" + ] + ], "keywords": [] }, { "name": "scil_dwi_compute_snr", "docstring": "Script to compute signal to noise ratio (SNR) in a region of interest (ROI)\nof a DWI volume.\n\nIt will compute the SNR for all DWI volumes of the input image seperately.\nThe output will contain the SNR which is the ratio of\nmean(signal) / std(noise).\nThe mean of the signal is computed inside the mask.\nThe standard deviation of the noise is estimated inside the noise_mask\nor inside the same mask if a noise_map is provided.\nIf it's not supplied, it will be estimated using the data outside the brain,\ncomputed with Dipy medotsu\n\nIf verbose is True, the SNR for every DWI volume will be output.\n\nThis works best in a well-defined ROI such as the corpus callosum.\nIt is heavily dependent on the ROI and its quality.\n\nWe highly recommend using a noise_map if you can acquire one.\nSee refs [1, 2] that describe the noise map acquisition.\n[1] St-Jean, et al (2016). Non Local Spatial and Angular Matching...\n https://doi.org/10.1016/j.media.2016.02.010\n[2] Reymbaut, et al (2021). Magic DIAMOND...\n https://doi.org/10.1016/j.media.2021.101988\n\nFormerly: scil_snr_in_roi.py", "help": "usage: scil_dwi_compute_snr.py [-h]\n [--noise_mask NOISE_MASK | --noise_map NOISE_MAP]\n [--b0_thr B0_THR] [--out_basename OUT_BASENAME]\n [--split_shells] [--indent INDENT]\n [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_mask\n\nScript to compute signal to noise ratio (SNR) in a region of interest (ROI)\nof a DWI volume.\n\nIt will compute the SNR for all DWI volumes of the input image seperately.\nThe output will contain the SNR which is the ratio of\nmean(signal) / std(noise).\nThe mean of the signal is computed inside the mask.\nThe standard deviation of the noise is estimated inside the noise_mask\nor inside the same mask if a noise_map is provided.\nIf it's not supplied, it will be estimated using the data outside the brain,\ncomputed with Dipy medotsu\n\nIf verbose is True, the SNR for every DWI volume will be output.\n\nThis works best in a well-defined ROI such as the corpus callosum.\nIt is heavily dependent on the ROI and its quality.\n\nWe highly recommend using a noise_map if you can acquire one.\nSee refs [1, 2] that describe the noise map acquisition.\n[1] St-Jean, et al (2016). Non Local Spatial and Angular Matching...\n https://doi.org/10.1016/j.media.2016.02.010\n[2] Reymbaut, et al (2021). Magic DIAMOND...\n https://doi.org/10.1016/j.media.2021.101988\n\nFormerly: scil_snr_in_roi.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n in_mask Binary mask of the region used to estimate SNR.\n\noptions:\n -h, --help show this help message and exit\n --b0_thr B0_THR All b-values with values less than or equal to b0_thr are considered as b0s i.e. without diffusion weighting. [0.0]\n --out_basename OUT_BASENAME\n Path and prefix for the various saved file.\n --split_shells SNR will be split into shells.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMasks options:\n --noise_mask NOISE_MASK\n Binary mask used to estimate the noise from the DWI.\n --noise_map NOISE_MAP\n Noise map.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "volume", + "volumes", + "volume" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "work", + "and" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "considered", + "are" + ], + [ + "left", + "into" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "total", + "estimated" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "higher", + "interest" + ], + [ + "considered", + "highly", + "highly" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "thinking", + "i" + ], + [ + "specific", + "variety", + "various" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "defined", + "function", + "defined" + ], + [ + "long", + "than" + ], + [ + "highly", + "less" + ], + [ + "meaning", + "true", + "true" + ], + [ + "work", + "works" + ], + [ + "signal", + "signal" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "callosum", + "callosum" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "volume", + "volumes", + "volumes" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "corpus", + "corpus" + ], + [ + "lack", + "quality" + ], + [ + "area", + "areas", + "region", + "regions", + "region" + ], + [ + "maps", + "map" + ], + [ + "spatial", + "temporal", + "spatial" + ], + [ + "considered", + "experience", + "large", + "long", + "result", + "variety", + "work", + "working", + "well" + ], + [ + "considered", + "specific", + "variety", + "such" + ] + ], "keywords": [] }, { "name": "scil_dwi_concatenate", "docstring": "Concatenate DWI, bval and bvecs together. File must be specified in matching\norder. Default data type will be the same as the first input DWI.\n\nFormerly: scil_concatenate_dwi.py", "help": "usage: scil_dwi_concatenate.py [-h] [--in_dwis IN_DWIS [IN_DWIS ...]]\n [--in_bvals IN_BVALS [IN_BVALS ...]]\n [--in_bvecs IN_BVECS [IN_BVECS ...]]\n [--data_type DATA_TYPE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_dwi out_bval out_bvec\n\nConcatenate DWI, bval and bvecs together. File must be specified in matching\norder. Default data type will be the same as the first input DWI.\n\nFormerly: scil_concatenate_dwi.py\n\npositional arguments:\n out_dwi The name of the output DWI file.\n out_bval The name of the output b-values file (.bval).\n out_bvec The name of the output b-vectors file (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n The DWI file (.nii) to concatenate.\n --in_bvals IN_BVALS [IN_BVALS ...]\n The b-values files in FSL format (.bval).\n --in_bvecs IN_BVECS [IN_BVECS ...]\n The b-vectors files in FSL format (.bvec).\n --data_type DATA_TYPE\n Data type of the output image. Use the format: uint8, int16, int/float32, int/float64.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "order", + "order" + ], + [ + "working", + "together" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "parameters", + "specified" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "methods", + "using" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ] + ], "keywords": [] }, { "name": "scil_dwi_convert_FDF", "docstring": "Converts a Varian FDF file or directory to a nifti file.\nIf the procpar contains diffusion information, it will be saved as bval and\nbvec in the same folder as the output file.\n\nex: scil_dwi_convert_FDF.py semsdw/b0_folder/ semsdw/dwi_folder/ dwi.nii.gz --bval dwi.bval --bvec dwi.bvec -f\n\nFormerly: scil_convert_fdf.py", "help": "usage: scil_dwi_convert_FDF.py [-h] [--bval BVAL] [--bvec BVEC]\n [--flip dimension [dimension ...]]\n [--swap dimension [dimension ...]]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_b0_path in_dwi_path out_path\n\nConverts a Varian FDF file or directory to a nifti file.\nIf the procpar contains diffusion information, it will be saved as bval and\nbvec in the same folder as the output file.\n\nex: scil_dwi_convert_FDF.py semsdw/b0_folder/ semsdw/dwi_folder/ dwi.nii.gz --bval dwi.bval --bvec dwi.bvec -f\n\nFormerly: scil_convert_fdf.py\n\npositional arguments:\n in_b0_path Path to the b0 FDF file or folder to convert.\n in_dwi_path Path to the DWI FDF file or folder to convert.\n out_path Path to the nifti file to write on disk.\n\noptions:\n -h, --help show this help message and exit\n --bval BVAL Path to the bval file to write on disk.\n --bvec BVEC Path to the bvec file to write on disk.\n --flip dimension [dimension ...]\n The axes you want to flip. eg: to flip the x and y axes use: x y. [None]\n --swap dimension [dimension ...]\n The axes you want to swap. eg: to swap the x and y axes use: x y. [None]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "diffusion", + "diffusion" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "methods", + "using" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ] + ], "keywords": [] }, { "name": "scil_dwi_detect_volume_outliers", "docstring": "This script simply finds the 3 closest angular neighbors of each direction\n(per shell) and compute the voxel-wise correlation.\nIf the angles or correlations to neighbors are below the shell average (by\nargs.std_scale x STD) it will flag the volume as a potential outlier.\n\nThis script supports multi-shells, but each shell is independant and detected\nusing the --b0_threshold parameter.\n\nThis script can be run before any processing to identify potential problem\nbefore launching pre-processing.", "help": "usage: scil_dwi_detect_volume_outliers.py [-h] [--std_scale STD_SCALE]\n [--b0_threshold thr]\n [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval in_bvec\n\nThis script simply finds the 3 closest angular neighbors of each direction\n(per shell) and compute the voxel-wise correlation.\nIf the angles or correlations to neighbors are below the shell average (by\nargs.std_scale x STD) it will flag the volume as a potential outlier.\n\nThis script supports multi-shells, but each shell is independant and detected\nusing the --b0_threshold parameter.\n\nThis script can be run before any processing to identify potential problem\nbefore launching pre-processing.\n\npositional arguments:\n in_dwi The DWI file (.nii) to concatenate.\n in_bval The b-values files in FSL format (.bval).\n in_bvec The b-vectors files in FSL format (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --std_scale STD_SCALE\n How many deviation from the mean are required to be considered an outlier. [2.0]\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [], + "synonyms": [ + [ + "step", + "continue" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "order", + "required" + ], + [ + "average", + "per" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "matter", + "question", + "thinking", + "understand" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "parameter", + "parameters", + "parameter" + ], + [ + "methods", + "use" + ], + [ + "direction", + "direction" + ], + [ + "work", + "find" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "processing", + "processing" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "action", + "clear", + "considered", + "future", + "matter", + "possibility", + "potential", + "question", + "result", + "specific", + "any" + ], + [ + "matter", + "question", + "thinking", + "true", + "work", + "working", + "how" + ], + [ + "long", + "with" + ], + [ + "average", + "average" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "supported", + "supports" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "methods", + "using" + ], + [ + "thinking", + "simply" + ], + [ + "exist", + "exists" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "question", + "problem" + ], + [ + "left", + "before" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "large", + "work", + "many" + ], + [ + "future", + "possibility", + "potential", + "potential" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "lack", + "minimal" + ], + [ + "order", + "allow" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ] + ], "keywords": [] }, { "name": "scil_dwi_extract_b0", "docstring": "Extract B0s from DWI, based on the bval and bvec information.\n\nThe default behavior is to save the first b0 of the series.\n\nFormerly: scil_extract_b0.py", "help": "usage: scil_dwi_extract_b0.py [-h]\n [--all | --mean | --cluster-mean | --cluster-first]\n [--block-size INT] [--single-image]\n [--b0_threshold thr] [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_b0\n\nExtract B0s from DWI, based on the bval and bvec information.\n\nThe default behavior is to save the first b0 of the series.\n\nFormerly: scil_extract_b0.py\n\npositional arguments:\n in_dwi DWI Nifti image.\n in_bval b-values filename, in FSL format (.bval).\n in_bvec b-values filename, in FSL format (.bvec).\n out_b0 Output b0 file(s).\n\noptions:\n -h, --help show this help message and exit\n --block-size INT, -s INT\n Load the data using this block size. Useful\n when the data is too large to be loaded in memory.\n --single-image If output b0 volume has multiple time points, only outputs a single \n image instead of a numbered series of images.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nOptions in the case of multiple b0s.:\n --all Extract all b0s. Index number will be appended to the output file.\n --mean Extract mean b0.\n --cluster-mean Extract mean of each continuous cluster of b0s.\n --cluster-first Extract first b0 of each continuous cluster of b0s.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "step", + "continue" + ], + [ + "memory", + "memory" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "matter", + "question", + "thinking", + "understand" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "long", + "too" + ], + [ + "image", + "images" + ], + [ + "methods", + "use" + ], + [ + "work", + "find" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "tool", + "useful" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "thinking", + "i" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "large", + "larger", + "large" + ], + [ + "long", + "with" + ], + [ + "long", + "result", + "work", + "working", + "time" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "methods", + "using" + ], + [ + "order", + "work", + "instead" + ], + [ + "exist", + "exists" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "individual", + "each" + ], + [ + "larger", + "size", + "size" + ], + [ + "lack", + "minimal" + ], + [ + "order", + "allow" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "matter", + "question", + "case" + ], + [ + "based", + "based" + ] + ], "keywords": [] }, { "name": "scil_dwi_extract_shell", "docstring": "Extracts the DWI volumes that are on specific b-value shells. Many shells\ncan be extracted at once by specifying multiple b-values. The extracted\nvolumes are in the same order as in the original file.\n\nIf the b-values of a shell are not all identical, use the --tolerance\nargument to adjust the accepted interval. For example, a b-value of 2000\nand a tolerance of 20 will extract all volumes with a b-values from 1980 to\n2020.\n\nFiles that are too large to be loaded in memory can still be processed by\nsetting the --block-size argument. A block size of X means that X DWI volumes\nare loaded at a time for processing.\n\nFormerly: scil_extract_dwi_shell.py", "help": "usage: scil_dwi_extract_shell.py [-h] [--out_indices OUT_INDICES]\n [--block-size INT] [--tolerance INT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_bvals_to_extract\n [in_bvals_to_extract ...] out_dwi out_bval\n out_bvec\n\nExtracts the DWI volumes that are on specific b-value shells. Many shells\ncan be extracted at once by specifying multiple b-values. The extracted\nvolumes are in the same order as in the original file.\n\nIf the b-values of a shell are not all identical, use the --tolerance\nargument to adjust the accepted interval. For example, a b-value of 2000\nand a tolerance of 20 will extract all volumes with a b-values from 1980 to\n2020.\n\nFiles that are too large to be loaded in memory can still be processed by\nsetting the --block-size argument. A block size of X means that X DWI volumes\nare loaded at a time for processing.\n\nFormerly: scil_extract_dwi_shell.py\n\npositional arguments:\n in_dwi The DW image file to split.\n in_bval The b-values file in FSL format (.bval).\n in_bvec The b-vectors file in FSL format (.bvec).\n in_bvals_to_extract The list of b-values to extract. For example 0 2000.\n out_dwi The name of the output DWI file.\n out_bval The name of the output b-value file (.bval).\n out_bvec The name of the output b-vector file (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --out_indices OUT_INDICES\n Optional filename for valid indices in input dwi volume\n --block-size INT, -s INT\n Loads the data using this block size. Useful\n when the data is too large to be loaded in memory.\n --tolerance INT, -t INT\n The tolerated gap between the b-values to extract\n and the actual b-values. [20]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "parameter", + "vector" + ], + [ + "memory", + "memory" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "order", + "order" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "clear", + "long", + "too" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "question", + "argument" + ], + [ + "invalid", + "valid", + "valid" + ], + [ + "specific", + "specific" + ], + [ + "processing", + "processing" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "tool", + "useful" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "large", + "larger", + "large" + ], + [ + "long", + "with" + ], + [ + "long", + "result", + "work", + "working", + "time" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "methods", + "using" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "left", + "long", + "work", + "once" + ], + [ + "considered", + "is" + ], + [ + "large", + "work", + "many" + ], + [ + "specific", + "actual" + ], + [ + "larger", + "size", + "size" + ], + [ + "volume", + "volumes", + "volumes" + ], + [ + "clear", + "long", + "work", + "still" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "meaning", + "order", + "result", + "step", + "true", + "means" + ] + ], "keywords": [] }, { "name": "scil_dwi_powder_average", "docstring": "Script to compute powder average (mean diffusion weighted image) from set of\ndiffusion images.\n\nBy default will output an average image calculated from all images with\nnon-zero bvalue.\n\nSpecify --bvalue to output an image for a single shell\n\nScript currently does not take into account the diffusion gradient directions\nbeing averaged.\n\nFormerly: scil_compute_powder_average.py", "help": "usage: scil_dwi_powder_average.py [-h] [-f] [--mask file] [--b0_thr B0_THR]\n [--shells SHELLS [SHELLS ...]]\n [--shell_thr SHELL_THR]\n [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval out_avg\n\nScript to compute powder average (mean diffusion weighted image) from set of\ndiffusion images.\n\nBy default will output an average image calculated from all images with\nnon-zero bvalue.\n\nSpecify --bvalue to output an image for a single shell\n\nScript currently does not take into account the diffusion gradient directions\nbeing averaged.\n\nFormerly: scil_compute_powder_average.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n out_avg Path of the output file.\n\noptions:\n -h, --help show this help message and exit\n -f Force overwriting of the output files.\n --mask file Path to a binary mask.\n Only data inside the mask will be used for powder avg. (Default: None)\n --b0_thr B0_THR Exclude b0 volumes from powder average with bvalue less than specified threshold.\n (Default: remove volumes with bvalue < 50\n --shells SHELLS [SHELLS ...]\n bvalue (shells) to include in powder average passed as a list \n (e.g. --shells 1000 2000). If not specified will include all volumes with a non-zero bvalue.\n --shell_thr SHELL_THR\n Include volumes with bvalue +- the specified threshold.\n (Default: [50]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [], + "synonyms": [ + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "considered", + "involved", + "being" + ], + [ + "left", + "into" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "order", + "set" + ], + [ + "direction", + "directions" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "clear", + "order", + "step", + "work", + "take" + ], + [ + "variety", + "include" + ], + [ + "long", + "with" + ], + [ + "average", + "average" + ], + [ + "parameters", + "specified" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "total", + "50" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "long", + "than" + ], + [ + "highly", + "less" + ], + [ + "pass", + "passed" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "matter", + "question", + "does" + ], + [ + "weighted", + "weighted" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "volume", + "volumes", + "volumes" + ], + [ + "work", + "all" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ] + ], "keywords": [] }, { "name": "scil_dwi_prepare_eddy_command", "docstring": "Prepare a typical command for eddy and create the necessary files. When using\nmultiple acquisitions and/or opposite phase directions, images, b-values and\nb-vectors should be merged together using scil_dwi_concatenate.py. If using\ntopup prior to calling this script, images should be concatenated in the same\norder as the b0s used with prepare_topup.\n\nFormerly: scil_prepare_eddy_command.py", "help": "usage: scil_dwi_prepare_eddy_command.py [-h] [--n_reverse N_REVERSE]\n [--topup TOPUP]\n [--topup_params TOPUP_PARAMS]\n [--eddy_cmd {eddy_openmp,eddy_cuda,eddy_cuda8.0,eddy_cuda9.1,eddy_cuda10.2,eddy,eddy_cpu}]\n [--b0_thr B0_THR]\n [--encoding_direction {x,y,z}]\n [--readout READOUT]\n [--slice_drop_correction]\n [--lsr_resampling]\n [--out_directory OUT_DIRECTORY]\n [--out_prefix OUT_PREFIX]\n [--out_script] [--fix_seed]\n [--eddy_options EDDY_OPTIONS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bvals in_bvecs in_mask\n\nPrepare a typical command for eddy and create the necessary files. When using\nmultiple acquisitions and/or opposite phase directions, images, b-values and\nb-vectors should be merged together using scil_dwi_concatenate.py. If using\ntopup prior to calling this script, images should be concatenated in the same\norder as the b0s used with prepare_topup.\n\nFormerly: scil_prepare_eddy_command.py\n\npositional arguments:\n in_dwi Input DWI Nifti image. If using multiple acquisition and/or opposite phase directions, please merge in the same order as for prepare_topup using scil_dwi_concatenate.py.\n in_bvals Input b-values file in FSL format.\n in_bvecs Input b-vectors file in FSL format.\n in_mask Binary brain mask.\n\noptions:\n -h, --help show this help message and exit\n --n_reverse N_REVERSE\n Number of reverse phase volumes included in the DWI image [0].\n --topup TOPUP Topup output name. If given, apply topup during eddy.\n Should be the same as --out_prefix from scil_dwi_prepare_topup_command.py.\n --topup_params TOPUP_PARAMS\n Parameters file (typically named acqparams) used to run topup.\n --eddy_cmd {eddy_openmp,eddy_cuda,eddy_cuda8.0,eddy_cuda9.1,eddy_cuda10.2,eddy,eddy_cpu}\n Eddy command [eddy_openmp].\n --b0_thr B0_THR All b-values with values less than or equal to b0_thr are considered\n as b0s i.e. without diffusion weighting [20].\n --encoding_direction {x,y,z}\n Acquisition direction, default is AP-PA [y].\n --readout READOUT Total readout time from the DICOM metadata [0.062].\n --slice_drop_correction\n If set, will activate eddy's outlier correction,\n which includes slice drop correction.\n --lsr_resampling Perform least-square resampling, allowing eddy to combine forward and reverse phase acquisitions for better reconstruction. Only works if directions and b-values are identical in both phase direction.\n --out_directory OUT_DIRECTORY\n Output directory for eddy files [.].\n --out_prefix OUT_PREFIX\n Prefix of the eddy-corrected DWI [dwi_eddy_corrected].\n --out_script If set, will output a .sh script (eddy.sh).\n else, will output the lines to the terminal [False].\n --fix_seed If set, will use the fixed seed strategy for eddy.\n Enhances reproducibility.\n --eddy_options EDDY_OPTIONS\n Additional options you want to use to run eddy.\n Add these options using quotes (i.e. \"--ol_nstd=6 --mb=4\").\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "order", + "order" + ], + [ + "working", + "together" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "direction", + "direction" + ], + [ + "order", + "set" + ], + [ + "thinking", + "you" + ], + [ + "direction", + "opposite" + ], + [ + "higher", + "increase", + "drop" + ], + [ + "direction", + "directions" + ], + [ + "long", + "a" + ], + [ + "experience", + "thinking", + "work", + "working", + "better" + ], + [ + "applied", + "apply" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "create" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "contrast", + "typical" + ], + [ + "held", + "in" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "long", + "result", + "work", + "working", + "time" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "long", + "than" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "highly", + "less" + ], + [ + "work", + "works" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "matter", + "thinking", + "else" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "step", + "forward" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "volume", + "volumes", + "volumes" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "false", + "false" + ], + [ + "increase", + "total", + "total" + ], + [ + "assigned", + "command" + ], + [ + "order", + "necessary" + ] + ], "keywords": [] }, { "name": "scil_dwi_prepare_topup_command", "docstring": "Prepare a typical command for topup and create the necessary files.\nThe reversed b0 must be in a different file.\n\nFormerly: scil_prepare_topup_command.py", "help": "usage: scil_dwi_prepare_topup_command.py [-h] [--config CONFIG] [--synb0]\n [--encoding_direction {x,y,z}]\n [--readout READOUT]\n [--out_b0s OUT_B0S]\n [--out_directory OUT_DIRECTORY]\n [--out_prefix OUT_PREFIX]\n [--out_params OUT_PARAMS]\n [--out_script]\n [--topup_options TOPUP_OPTIONS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_forward_b0 in_reverse_b0\n\nPrepare a typical command for topup and create the necessary files.\nThe reversed b0 must be in a different file.\n\nFormerly: scil_prepare_topup_command.py\n\npositional arguments:\n in_forward_b0 Input b0 Nifti image with forward phase encoding.\n in_reverse_b0 Input b0 Nifti image with reversed phase encoding.\n\noptions:\n -h, --help show this help message and exit\n --config CONFIG Topup config file [b02b0.cnf].\n --synb0 If set, will use SyNb0 custom acqparams file.\n --encoding_direction {x,y,z}\n Acquisition direction of the forward b0 image, default is AP [y].\n --readout READOUT Total readout time from the DICOM metadata [0.062].\n --out_b0s OUT_B0S Output fused b0 file [fused_b0s.nii.gz].\n --out_directory OUT_DIRECTORY\n Output directory for topup files [.].\n --out_prefix OUT_PREFIX\n Prefix of the topup results [topup_results].\n --out_params OUT_PARAMS\n Filename for the acquisition parameters file [acqparams.txt].\n --out_script If set, will output a .sh script (topup.sh).\n else, will output the lines to the terminal [False].\n --topup_options TOPUP_OPTIONS\n Additional options you want to use to run topup.\n Add these options using quotes (i.e. \"--fwhm=6 --miter=4\").\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "individual", + "specific", + "unique", + "variety", + "different" + ], + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "direction", + "direction" + ], + [ + "order", + "set" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "create" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "contrast", + "typical" + ], + [ + "held", + "in" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "long", + "with" + ], + [ + "long", + "result", + "work", + "working", + "time" + ], + [ + "methods", + "using" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "matter", + "thinking", + "else" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "step", + "forward" + ], + [ + "result", + "results" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "false", + "false" + ], + [ + "increase", + "total", + "total" + ], + [ + "assigned", + "command" + ], + [ + "order", + "necessary" + ] + ], "keywords": [] }, { "name": "scil_dwi_reorder_philips", "docstring": "Re-order gradient according to original table (Philips)\nThis script is not needed for version 5.6 and higher\n\nFormerly: scil_reorder_dwi_philips.py", "help": "usage: scil_dwi_reorder_philips.py [-h] [--json JSON]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_table\n out_basename\n\nRe-order gradient according to original table (Philips)\nThis script is not needed for version 5.6 and higher\n\nFormerly: scil_reorder_dwi_philips.py\n\npositional arguments:\n in_dwi Input dwi file.\n in_bval Input bval FSL format.\n in_bvec Input bvec FSL format.\n in_table Original philips table - first line is skipped.\n out_basename Basename output file.\n\noptions:\n -h, --help show this help message and exit\n --json JSON If you give a json file, it will check if you need to reorder your Philips dwi.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "order", + "order" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "thinking", + "you" + ], + [ + "high", + "higher", + "increase", + "level", + "higher" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "methods", + "using" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "clear", + "give" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "create", + "lack", + "step", + "work", + "working", + "need" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "based", + "reported", + "according" + ] + ], "keywords": [] }, { "name": "scil_dwi_split_by_indices", "docstring": "Splits the DWI image at certain indices along the last dimension (b-values).\nMany indices can be given at once by specifying multiple values. The splited\nvolumes are in the same order as in the original file. Also outputs the\ncorresponding .bval and .bvec files.\n\nThis script can be useful for splitting images at places where a b-value\nextraction does not work. For instance, if one wants to split the x first\nb-1500s from the rest of the b-1500s in an image, simply put x as an index.\n\nFormerly: scil_split_image.py", "help": "usage: scil_dwi_split_by_indices.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_basename\n split_indices [split_indices ...]\n\nSplits the DWI image at certain indices along the last dimension (b-values).\nMany indices can be given at once by specifying multiple values. The splited\nvolumes are in the same order as in the original file. Also outputs the\ncorresponding .bval and .bvec files.\n\nThis script can be useful for splitting images at places where a b-value\nextraction does not work. For instance, if one wants to split the x first\nb-1500s from the rest of the b-1500s in an image, simply put x as an index.\n\nFormerly: scil_split_image.py\n\npositional arguments:\n in_dwi The DW image file to split.\n in_bval The b-values file in FSL format (.bval).\n in_bvec The b-vectors file in FSL format (.bvec).\n out_basename The basename of the output files. Indices number will be appended to out_basename. For example, if split_indices were 3 10, the files would be saved as out_basename_0_2, out_basename_3_10, out_basename_11_20, where the size of the last dimension is 21 in this example.\n split_indices The list of indices where to split the image. For example 3 10. This would split the image in three parts, such as [:3], [3:10], [10:]. Indices must be in increasing order.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "considered", + "are" + ], + [ + "order", + "order" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "work", + "working", + "work" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "image", + "images" + ], + [ + "long", + "a" + ], + [ + "tool", + "useful" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "last" + ], + [ + "held", + "in" + ], + [ + "areas", + "places" + ], + [ + "clear", + "left", + "work", + "put" + ], + [ + "methods", + "using" + ], + [ + "thinking", + "simply" + ], + [ + "contrast", + "specific", + "subject", + "instance" + ], + [ + "area", + "main", + "along" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "left", + "rest" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "left", + "long", + "work", + "once" + ], + [ + "clear", + "considered", + "individual", + "lack", + "matter", + "result", + "specific", + "subject", + "certain" + ], + [ + "large", + "work", + "many" + ], + [ + "area", + "work", + "where" + ], + [ + "larger", + "size", + "size" + ], + [ + "matter", + "question", + "does" + ], + [ + "volume", + "volumes", + "volumes" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "areas", + "parts" + ], + [ + "greater", + "higher", + "increase", + "lack", + "increasing" + ], + [ + "considered", + "specific", + "variety", + "such" + ] + ], "keywords": [] }, { "name": "scil_dwi_to_sh", "docstring": "Script to compute the SH coefficient directly on the raw DWI signal.\n\nFormerly: scil_compute_sh_from_signal.py", "help": "usage: scil_dwi_to_sh.py [-h] [--sh_order SH_ORDER]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--smooth SMOOTH] [--use_attenuation] [--mask MASK]\n [--b0_threshold thr] [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_sh\n\nScript to compute the SH coefficient directly on the raw DWI signal.\n\nFormerly: scil_compute_sh_from_signal.py\n\npositional arguments:\n in_dwi Path of the dwi volume.\n in_bval Path of the b-value file, in FSL format.\n in_bvec Path of the b-vector file, in FSL format.\n out_sh Name of the output SH file to save.\n\noptions:\n -h, --help show this help message and exit\n --sh_order SH_ORDER SH order to fit (int). [4]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --smooth SMOOTH Lambda-regularization coefficient in the SH fit (float). [0.006]\n --use_attenuation If set, will use signal attenuation before fitting the SH (i.e. divide by the b0).\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computations and reconstruction \n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "parameter", + "vector" + ], + [ + "step", + "continue" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "matter", + "question", + "thinking", + "understand" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "order", + "order" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "work", + "find" + ], + [ + "order", + "set" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "exist", + "exists" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "signal", + "signal" + ], + [ + "left", + "before" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "lack", + "minimal" + ], + [ + "order", + "allow" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_fodf_max_in_ventricles", "docstring": "Script to compute the maximum fODF in the ventricles. The ventricules are\nestimated from an MD and FA threshold.\n\nThis allows to clip the noise of fODF using an absolute thresold.\n\nFormerly: scil_compute_fodf_max_in_ventricles.py", "help": "usage: scil_fodf_max_in_ventricles.py [-h] [--fa_threshold FA_THRESHOLD]\n [--md_threshold MD_THRESHOLD]\n [--max_value_output file]\n [--mask_output file] [--small_dims]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n fODFs FA MD\n\nScript to compute the maximum fODF in the ventricles. The ventricules are\nestimated from an MD and FA threshold.\n\nThis allows to clip the noise of fODF using an absolute thresold.\n\nFormerly: scil_compute_fodf_max_in_ventricles.py\n\npositional arguments:\n fODFs Path of the fODF volume in spherical harmonics (SH).\n FA Path to the FA volume.\n MD Path to the mean diffusivity (MD) volume.\n\noptions:\n -h, --help show this help message and exit\n --fa_threshold FA_THRESHOLD\n Maximal threshold of FA (voxels under that threshold are considered \n for evaluation. [0.1]).\n --md_threshold MD_THRESHOLD\n Minimal threshold of MD in mm2/s (voxels above that threshold are \n considered for evaluation. [0.003]).\n --max_value_output file\n Output path for the text file containing the value. If not set the \n file will not be saved.\n --mask_output file Output path for the ventricule mask. If not set, the mask \n will not be saved.\n --small_dims If set, takes the full range of data to search the max fodf amplitude \n in ventricles. Useful when the data has small dimensions.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Dell'Acqua, Flavio, et al. \"Can spherical deconvolution provide more\n information than fiber orientations? Hindrance modulated orientational\n anisotropy, a true-tract specific index to characterize white matter\n diffusion.\" Human brain mapping 34.10 (2013): 2464-2483.\n", - "synonyms": [], + "synonyms": [ + [ + "animal", + "human", + "human" + ], + [ + "tract", + "tracts", + "tract" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "large", + "larger", + "small" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "long", + "work", + "more" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "fibre", + "fiber" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "order", + "set" + ], + [ + "total", + "estimated" + ], + [ + "specific", + "specific" + ], + [ + "maps", + "mapping" + ], + [ + "long", + "a" + ], + [ + "tool", + "useful" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "application", + "allows" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "create", + "provide" + ], + [ + "methods", + "using" + ], + [ + "long", + "than" + ], + [ + "meaning", + "true", + "true" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "lack", + "minimal" + ], + [ + "level", + "above" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "long", + "full" + ] + ], "keywords": [] }, { "name": "scil_fodf_memsmt", "docstring": "Script to compute multi-encoding multi-shell multi-tissue (memsmt)\nConstrained Spherical Deconvolution ODFs.\n\nIn order to operate, the script only needs the data from one type of b-tensor\nencoding. However, giving only a spherical one will not produce good fODFs, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable.\n\nAll of `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\n>>> scil_fodf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_fodf.py", "help": "usage: scil_fodf_memsmt.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals\n IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS\n [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5}\n [{0,1,-0.5,0.5} ...] [--sh_order int] [--mask MASK]\n [--tolerance tol] [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--not_all] [--wm_out_fODF file]\n [--gm_out_fODF file] [--csf_out_fODF file]\n [--vf file] [--vf_rgb file]\n in_wm_frf in_gm_frf in_csf_frf\n\nScript to compute multi-encoding multi-shell multi-tissue (memsmt)\nConstrained Spherical Deconvolution ODFs.\n\nIn order to operate, the script only needs the data from one type of b-tensor\nencoding. However, giving only a spherical one will not produce good fODFs, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable.\n\nAll of `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\n>>> scil_fodf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_fodf.py\n\npositional arguments:\n in_wm_frf Text file of WM response function.\n in_gm_frf Text file of GM response function.\n in_csf_frf Text file of CSF response function.\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n Path to the input diffusion volume for each b-tensor encoding type.\n --in_bvals IN_BVALS [IN_BVALS ...]\n Path to the bval file, in FSL format, for each b-tensor encoding type.\n --in_bvecs IN_BVECS [IN_BVECS ...]\n Path to the bvec file, in FSL format, for each b-tensor encoding type.\n --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...]\n Value of b_delta for each b-tensor encoding type, in the same order as dwi, bval and bvec inputs.\n --sh_order int SH order used for the CSD. (Default: 8)\n --mask MASK Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --not_all If set, only saves the files specified using the file flags. (Default: False)\n\nFile flags:\n --wm_out_fODF file Output filename for the WM fODF coefficients.\n --gm_out_fODF file Output filename for the GM fODF coefficients.\n --csf_out_fODF file Output filename for the CSF fODF coefficients.\n --vf file Output filename for the volume fractions map.\n --vf_rgb file Output filename for the volume fractions map in rgb.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "true", + "always" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "matter", + "question", + "thinking", + "understand" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "brain", + "tissue" + ], + [ + "considered", + "are" + ], + [ + "clear", + "considered", + "highly", + "long", + "matter", + "true", + "work", + "very" + ], + [ + "order", + "order" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "papers", + "paper" + ], + [ + "work", + "working", + "work" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "work", + "find" + ], + [ + "order", + "set" + ], + [ + "clear", + "matter", + "true", + "seems" + ], + [ + "long", + "have" + ], + [ + "specific", + "specific" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "diffusion", + "diffusion" + ], + [ + "imaging", + "mri" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "action", + "response" + ], + [ + "defined", + "function", + "functional", + "functions", + "function" + ], + [ + "create", + "produce" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "parameters", + "specified" + ], + [ + "clear", + "long", + "matter", + "result", + "work", + "so" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "clear", + "experience", + "thinking", + "true", + "good" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "variety", + "work", + "other" + ], + [ + "future", + "current" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "exist", + "exists" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "supported", + "supported" + ], + [ + "variance", + "variance" + ], + [ + "shape", + "shapes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "considered", + "form", + "meaning", + "order", + "result", + "thus" + ], + [ + "order", + "allow" + ], + [ + "step", + "start" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "step", + "true", + "work", + "yet" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "false", + "false" + ], + [ + "based", + "based" + ], + [ + "maps", + "map" + ], + [ + "clear", + "matter", + "question", + "thinking", + "sure" + ], + [ + "clear", + "considered", + "result", + "however" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "shape", + "structure", + "shape" + ], + [ + "indicating", + "suggest" + ], + [ + "considered", + "key", + "work", + "important" + ] + ], "keywords": [] }, { "name": "scil_fodf_metrics", "docstring": "Script to compute the maximum Apparent Fiber Density (AFD), the fiber ODFs\norientations, values and indices (peaks, peak_values, peak_indices), the Number\nof Fiber Orientations (NuFO) maps from fiber ODFs and the RGB map.\n\nAFD_max map is the maximal fODF amplitude for each voxel.\n\nNuFO is the the number of maxima of the fODF with an ABSOLUTE amplitude above\nthe threshold set using --at, AND an amplitude above the RELATIVE threshold\nset using --rt.\n\nThe --at argument should be set to a value which is 1.5 times the maximal\nvalue of the fODF in the ventricules. This can be obtained with the\nscil_fodf_max_in_ventricles.py script.\n\nIf the --abs_peaks_and_values argument is set, the peaks are all normalized\nand the peak_values are equal to the actual fODF amplitude of the peaks. By\ndefault, the script max-normalizes the peak_values for each voxel and\nmultiplies the peaks by peak_values.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Raffelt et al. NeuroImage 2012] and [Dell'Acqua et al HBM 2013] for the\ndefinitions.\n\nFormerly: scil_compute_fodf_metrics.py", "help": "usage: scil_fodf_metrics.py [-h] [--sphere string] [--mask] [--at A_THRESHOLD]\n [--rt R_THRESHOLD] [--abs_peaks_and_values]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [--processes NBR] [-f]\n [--not_all] [--afd_max file] [--afd_total file]\n [--afd_sum file] [--nufo file] [--rgb file]\n [--peaks file] [--peak_values file]\n [--peak_indices file]\n in_fODF\n\nScript to compute the maximum Apparent Fiber Density (AFD), the fiber ODFs\norientations, values and indices (peaks, peak_values, peak_indices), the Number\nof Fiber Orientations (NuFO) maps from fiber ODFs and the RGB map.\n\nAFD_max map is the maximal fODF amplitude for each voxel.\n\nNuFO is the the number of maxima of the fODF with an ABSOLUTE amplitude above\nthe threshold set using --at, AND an amplitude above the RELATIVE threshold\nset using --rt.\n\nThe --at argument should be set to a value which is 1.5 times the maximal\nvalue of the fODF in the ventricules. This can be obtained with the\nscil_fodf_max_in_ventricles.py script.\n\nIf the --abs_peaks_and_values argument is set, the peaks are all normalized\nand the peak_values are equal to the actual fODF amplitude of the peaks. By\ndefault, the script max-normalizes the peak_values for each voxel and\nmultiplies the peaks by peak_values.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Raffelt et al. NeuroImage 2012] and [Dell'Acqua et al HBM 2013] for the\ndefinitions.\n\nFormerly: scil_compute_fodf_metrics.py\n\npositional arguments:\n in_fODF Path of the fODF volume in spherical harmonics (SH).\n\noptions:\n -h, --help show this help message and exit\n --sphere string Discrete sphere to use in the processing [repulsion724].\n --mask Path to a binary mask. Only the data inside the mask\n will beused for computations and reconstruction [None].\n --at A_THRESHOLD Absolute threshold on fODF amplitude. This value should be set to\n approximately 1.5 to 2 times the maximum fODF amplitude in isotropic voxels\n (ie. ventricles).\n Use scil_fodf_max_in_ventricles.py to find the maximal value.\n See [Dell'Acqua et al HBM 2013] [0.0].\n --rt R_THRESHOLD Relative threshold on fODF amplitude in percentage [0.1].\n --abs_peaks_and_values\n If set, the peak_values are not max-normalized for each voxel, \n but rather they keep the actual fODF amplitude of the peaks. \n Also, the peaks are given as unit directions instead of being proportional to peak_values. [False]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -f Force overwriting of the output files.\n --not_all If set, only saves the files specified using the file flags [False].\n\nFile flags:\n --afd_max file Output filename for the AFD_max map.\n --afd_total file Output filename for the AFD_total map(SH coeff = 0).\n --afd_sum file Output filename for the sum of all peak contributions\n (sum of fODF lobes on the sphere).\n --nufo file Output filename for the NuFO map.\n --rgb file Output filename for the RGB map.\n --peaks file Output filename for the extracted peaks.\n --peak_values file Output filename for the extracted peaks values.\n --peak_indices file Output filename for the generated peaks indices on the sphere.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "lobe", + "lobes", + "lobes" + ], + [ + "considered", + "are" + ], + [ + "considered", + "involved", + "being" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "clear", + "considered", + "create", + "form", + "manner", + "matter", + "result", + "subject", + "thinking", + "true", + "view", + "work", + "rather" + ], + [ + "methods", + "use" + ], + [ + "work", + "find" + ], + [ + "question", + "argument" + ], + [ + "order", + "set" + ], + [ + "specific", + "specific" + ], + [ + "processing", + "processing" + ], + [ + "direction", + "directions" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "average", + "percentage" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "parameters", + "specified" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "order", + "work", + "instead" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "specific", + "actual" + ], + [ + "individual", + "each" + ], + [ + "level", + "above" + ], + [ + "step", + "start" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "false", + "false" + ], + [ + "maps", + "maps" + ], + [ + "maps", + "map" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_fodf_msmt", "docstring": "Script to compute Multishell Multi-tissue Constrained Spherical Deconvolution\nODFs.\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical\ndeconvolution for improved analysis of multi-shell diffusion\nMRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_fodf.py", "help": "usage: scil_fodf_msmt.py [-h] [--sh_order int] [--mask] [--tolerance tol]\n [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [--not_all] [--wm_out_fODF file]\n [--gm_out_fODF file] [--csf_out_fODF file]\n [--vf file] [--vf_rgb file]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_wm_frf in_gm_frf in_csf_frf\n\nScript to compute Multishell Multi-tissue Constrained Spherical Deconvolution\nODFs.\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical\ndeconvolution for improved analysis of multi-shell diffusion\nMRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_fodf.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bval file, in FSL format.\n in_bvec Path of the bvec file, in FSL format.\n in_wm_frf Text file of WM response function.\n in_gm_frf Text file of GM response function.\n in_csf_frf Text file of CSF response function.\n\noptions:\n -h, --help show this help message and exit\n --sh_order int SH order used for the CSD. (Default: 8)\n --mask Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --not_all If set, only saves the files specified using the file flags. (Default: False)\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nFile flags:\n --wm_out_fODF file Output filename for the WM fODF coefficients.\n --gm_out_fODF file Output filename for the GM fODF coefficients.\n --csf_out_fODF file Output filename for the CSF fODF coefficients.\n --vf file Output filename for the volume fractions map.\n --vf_rgb file Output filename for the volume fractions map in rgb.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "matter", + "question", + "thinking", + "understand" + ], + [ + "work", + "and" + ], + [ + "brain", + "tissue" + ], + [ + "order", + "order" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "work", + "find" + ], + [ + "order", + "set" + ], + [ + "specific", + "specific" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "diffusion", + "diffusion" + ], + [ + "imaging", + "mri" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "action", + "response" + ], + [ + "defined", + "function", + "functional", + "functions", + "function" + ], + [ + "long", + "with" + ], + [ + "parameters", + "specified" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "future", + "current" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "exist", + "exists" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "order", + "allow" + ], + [ + "step", + "start" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "false", + "false" + ], + [ + "based", + "based" + ], + [ + "maps", + "map" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_fodf_ssst", "docstring": "Script to compute Constrained Spherical Deconvolution (CSD) fiber ODFs.\n\nSee [Tournier et al. NeuroImage 2007]\n\nFormerly: scil_compute_ssst_fodf.py", "help": "usage: scil_fodf_ssst.py [-h] [--sh_order int] [--mask] [--b0_threshold thr]\n [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec frf_file out_fODF\n\nScript to compute Constrained Spherical Deconvolution (CSD) fiber ODFs.\n\nSee [Tournier et al. NeuroImage 2007]\n\nFormerly: scil_compute_ssst_fodf.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n frf_file Path of the FRF file\n out_fODF Output path for the fiber ODF coefficients.\n\noptions:\n -h, --help show this help message and exit\n --sh_order int SH order used for the CSD. (Default: 8)\n --mask Path to a binary mask. Only the data inside the mask will be used \n for computations and reconstruction.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "step", + "continue" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "matter", + "question", + "thinking", + "understand" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "order", + "order" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "work", + "find" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "exist", + "exists" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "lack", + "minimal" + ], + [ + "order", + "allow" + ], + [ + "step", + "start" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_fodf_to_bingham", "docstring": "Script for fitting a Bingham distribution to each fODF lobe, as described\nin [1].\n\nThe Bingham fit is saved, with each Bingham distribution described by 7\ncoefficients (for example, for a maximum number of lobes of 5, the number\nof coefficients is 7 x 5 = 35 -- less than the number of coefficients for\nSH of maximum order 8).\n\nUsing 12 threads, the execution takes approximately 30 minutes for a brain with\n1mm isotropic resolution.\n\nFormerly: scil_fit_bingham_to_fodf.py", "help": "usage: scil_fodf_to_bingham.py [-h] [--max_lobes MAX_LOBES] [--at AT]\n [--rt RT] [--min_sep_angle MIN_SEP_ANGLE]\n [--max_fit_angle MAX_FIT_ANGLE] [--mask MASK]\n [-v [{DEBUG,INFO,WARNING}]] [--processes NBR]\n [-f]\n in_sh out_bingham\n\nScript for fitting a Bingham distribution to each fODF lobe, as described\nin [1].\n\nThe Bingham fit is saved, with each Bingham distribution described by 7\ncoefficients (for example, for a maximum number of lobes of 5, the number\nof coefficients is 7 x 5 = 35 -- less than the number of coefficients for\nSH of maximum order 8).\n\nUsing 12 threads, the execution takes approximately 30 minutes for a brain with\n1mm isotropic resolution.\n\nFormerly: scil_fit_bingham_to_fodf.py\n\npositional arguments:\n in_sh Input SH image.\n out_bingham Output Bingham functions image.\n\noptions:\n -h, --help show this help message and exit\n --max_lobes MAX_LOBES\n Maximum number of lobes per voxel to extract. [5]\n --at AT Absolute threshold for peaks extraction. [0.0]\n --rt RT Relative threshold for peaks extraction. [0.1]\n --min_sep_angle MIN_SEP_ANGLE\n Minimum separation angle between two peaks. [25.0]\n --max_fit_angle MAX_FIT_ANGLE\n Maximum distance in degrees around a peak direction for fitting the Bingham function. [15.0]\n --mask MASK Optional mask file. Only SH inside the mask are fitted.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -f Force overwriting of the output files.\n\n[1] T. W. Riffert, J. Schreiber, A. Anwander, and T. R. Kn\u00f6sche, \u201cBeyond\n fractional anisotropy: Extraction of bundle-specific structural metrics\n from crossing fiber models,\u201d NeuroImage, vol. 100, pp. 176-191, Oct. 2014,\n doi: 10.1016/j.neuroimage.2014.06.015.\n\n[2] J. Schreiber, T. Riffert, A. Anwander, and T. R. Kn\u00f6sche, \u201cPlausibility\n Tracking: A method to evaluate anatomical connectivity and microstructural\n properties along fiber pathways,\u201d NeuroImage, vol. 90, pp. 163-178, Apr.\n 2014, doi: 10.1016/j.neuroimage.2014.01.002.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "methods", + "method" + ], + [ + "average", + "per" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "total", + "90" + ], + [ + "work", + "and" + ], + [ + "lobe", + "lobes", + "lobes" + ], + [ + "considered", + "are" + ], + [ + "order", + "order" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "fibre", + "fiber" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "direction", + "direction" + ], + [ + "specific", + "specific" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "anatomical", + "anatomical" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "defined", + "function", + "functional", + "functions", + "function" + ], + [ + "pathway", + "pathways", + "pathways" + ], + [ + "long", + "with" + ], + [ + "structural", + "structural" + ], + [ + "methods", + "using" + ], + [ + "area", + "main", + "along" + ], + [ + "long", + "than" + ], + [ + "highly", + "less" + ], + [ + "degree", + "degrees" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "individual", + "each" + ], + [ + "long", + "two" + ], + [ + "step", + "start" + ], + [ + "connectivity", + "connectivity" + ], + [ + "total", + "100" + ], + [ + "lobe", + "lobes", + "occipital", + "parietal", + "lobe" + ], + [ + "function", + "functions", + "functions" + ], + [ + "bundles", + "bundle" + ], + [ + "data", + "tracking", + "tracking" + ], + [ + "examine", + "evaluate" + ] + ], "keywords": [] }, { "name": "scil_freewater_maps", "docstring": "Compute Free Water maps [1] using AMICO.\nThis script supports both single and multi-shell data.\n\nFormerly: scil_compute_freewater.py", "help": "usage: scil_freewater_maps.py [-h] [--mask MASK] [--out_dir OUT_DIR]\n [--b_thr B_THR] [--para_diff PARA_DIFF]\n [--iso_diff ISO_DIFF]\n [--perp_diff_min PERP_DIFF_MIN]\n [--perp_diff_max PERP_DIFF_MAX]\n [--lambda1 LAMBDA1] [--lambda2 LAMBDA2]\n [--save_kernels DIRECTORY | --load_kernels DIRECTORY]\n [--compute_only] [--mouse] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec\n\nCompute Free Water maps [1] using AMICO.\nThis script supports both single and multi-shell data.\n\nFormerly: scil_compute_freewater.py\n\npositional arguments:\n in_dwi DWI file.\n in_bval b-values filename, in FSL format (.bval).\n in_bvec b-vectors filename, in FSL format (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Brain mask filename.\n --out_dir OUT_DIR Output directory for the Free Water results. [results]\n --b_thr B_THR Limit value to consider that a b-value is on an\n existing shell. Above this limit, the b-value is\n placed on a new shell. This includes b0s values.\n --mouse If set, use mouse fitting profile.\n --processes NBR Number of sub-processes to start. Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n\nModel options:\n --para_diff PARA_DIFF\n Axial diffusivity (AD) in the CC. [0.0015]\n --iso_diff ISO_DIFF Mean diffusivity (MD) in ventricles. [0.003]\n --perp_diff_min PERP_DIFF_MIN\n Radial diffusivity (RD) minimum. [0.0001]\n --perp_diff_max PERP_DIFF_MAX\n Radial diffusivity (RD) maximum. [0.0007]\n --lambda1 LAMBDA1 First regularization parameter. [0.0]\n --lambda2 LAMBDA2 Second regularization parameter. [0.25]\n\nKernels options:\n --save_kernels DIRECTORY\n Output directory for the COMMIT kernels.\n --load_kernels DIRECTORY\n Input directory where the COMMIT kernels are located.\n --compute_only Compute kernels only, --save_kernels must be used.\n\nReference:\n [1] Pasternak 0, Sochen N, Gur Y, Intrator N, Assaf Y.\n Free water elimination and mapping from diffusion mri.\n Magn Reson Med. 62 (3) (2009) 717-730.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "parameter", + "parameters", + "parameter" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "limiting", + "limit" + ], + [ + "maps", + "mapping" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "area", + "located" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "imaging", + "mri" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "supported", + "supports" + ], + [ + "axial", + "axial" + ], + [ + "methods", + "using" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "area", + "work", + "where" + ], + [ + "result", + "results" + ], + [ + "level", + "above" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "step", + "start" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "maps", + "maps" + ], + [ + "considered", + "possibility", + "question", + "step", + "consider" + ] + ], "keywords": [] }, { "name": "scil_freewater_priors", "docstring": "Synonym for scil_NODDI_priors.py", "help": "usage: scil_freewater_priors.py [-h]\n [--fa_min_single_fiber FA_MIN_SINGLE_FIBER]\n [--fa_max_ventricles FA_MAX_VENTRICLES]\n [--md_min_ventricles MD_MIN_VENTRICLES]\n [--roi_radius ROI_RADIUS]\n [--roi_center pos pos pos]\n [--out_txt_1fiber_para FILE]\n [--out_txt_1fiber_perp FILE]\n [--out_mask_1fiber FILE]\n [--out_txt_ventricles FILE]\n [--out_mask_ventricles FILE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_FA in_AD in_RD in_MD\n\nCompute the axial (para_diff), radial (perp_diff), and mean (iso_diff)\ndiffusivity priors for NODDI.\n\nFormerly: scil_compute_NODDI_priors.py\n\npositional arguments:\n in_FA Path to the FA volume.\n in_AD Path to the axial diffusivity (AD) volume.\n in_RD Path to the radial diffusivity (RD) volume.\n in_MD Path to the mean diffusivity (MD) volume.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics options:\n --fa_min_single_fiber FA_MIN_SINGLE_FIBER\n Minimal threshold of FA (voxels above that threshold are considered in \n the single fiber mask). [0.7]\n --fa_max_ventricles FA_MAX_VENTRICLES\n Maximal threshold of FA (voxels under that threshold are considered in \n the ventricles). [0.1]\n --md_min_ventricles MD_MIN_VENTRICLES\n Minimal threshold of MD in mm2/s (voxels above that threshold are considered \n for in the ventricles). [0.003]\n\nRegions options:\n --roi_radius ROI_RADIUS\n Radius of the region used to estimate the priors. The roi will be a cube spanning \n from ROI_CENTER in each direction. [20]\n --roi_center pos pos pos\n Center of the roi of size roi_radius used to estimate the priors; a 3-value coordinate. \n If not set, uses the center of the 3D volume.\n\nOutputs:\n --out_txt_1fiber_para FILE\n Output path for the text file containing the single fiber average value of AD.\n If not set, the file will not be saved.\n --out_txt_1fiber_perp FILE\n Output path for the text file containing the single fiber average value of RD.\n If not set, the file will not be saved.\n --out_mask_1fiber FILE\n Output path for single fiber mask. If not set, the mask will not be saved.\n --out_txt_ventricles FILE\n Output path for the text file containing the ventricles average value of MD.\n If not set, the file will not be saved.\n --out_mask_ventricles FILE\n Output path for the ventricule mask.\n If not set, the mask will not be saved.\n\nReference:\n [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC.\n NODDI: practical in vivo neurite orientation dispersion and density\n imaging of the human brain. NeuroImage. 2012 Jul 16;61:1000-16.\n", - "synonyms": [], + "synonyms": [ + [ + "animal", + "human", + "human" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "orientation", + "orientation" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "fibre", + "fiber" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "areas", + "region", + "regions", + "regions" + ], + [ + "direction", + "direction" + ], + [ + "order", + "set" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "average", + "average" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "axial", + "axial" + ], + [ + "methods", + "using" + ], + [ + "vivo", + "vivo" + ], + [ + "imaging", + "imaging" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "individual", + "each" + ], + [ + "larger", + "size", + "size" + ], + [ + "lack", + "minimal" + ], + [ + "level", + "above" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "area", + "areas", + "region", + "regions", + "region" + ] + ], "keywords": [] }, { "name": "scil_frf_mean", "docstring": "Compute the mean Fiber Response Function from a set of individually\ncomputed Response Functions.\n\nThe FRF files are obtained from scil_frf_ssst.py, scil_frf_msmt.py in the\ncase of multi-shell data or scil_frf_memsmt.py in the case of multi-encoding\nmulti-shell data.\n\nFormerly: scil_compute_mean_frf.py", "help": "usage: scil_frf_mean.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n list [list ...] file\n\nCompute the mean Fiber Response Function from a set of individually\ncomputed Response Functions.\n\nThe FRF files are obtained from scil_frf_ssst.py, scil_frf_msmt.py in the\ncase of multi-shell data or scil_frf_memsmt.py in the case of multi-encoding\nmulti-shell data.\n\nFormerly: scil_compute_mean_frf.py\n\npositional arguments:\n list List of FRF filepaths.\n file Path of the output mean FRF file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "long", + "a" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "work", + "and" + ], + [ + "held", + "on" + ], + [ + "methods", + "using" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "considered", + "are" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "function", + "functions", + "functions" + ], + [ + "matter", + "question", + "case" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "action", + "response" + ], + [ + "left", + "result", + "when" + ], + [ + "held", + "in" + ], + [ + "considered", + "is" + ], + [ + "defined", + "function", + "functional", + "functions", + "function" + ], + [ + "order", + "set" + ] + ], "keywords": [] }, { "name": "scil_frf_memsmt", "docstring": "Script to estimate response functions for multi-encoding multi-shell\nmulti-tissue (memsmt) constrained spherical deconvolution. In order to operate,\nthe script only needs the data from one type of b-tensor encoding. However,\ngiving only a spherical one will not produce good fiber response functions, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable. Moreover, all of `--in_dwis`, `--in_bvals`, `--in_bvecs` and\n`--in_bdeltas` must have the same number of arguments. Be sure to keep the\nsame order of encodings throughout all these inputs and to set `--in_bdeltas`\naccordingly (IMPORTANT).\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n\nIn the wm, we compute the response function in each voxels where\nthe FA is superior at threshold_fa_wm.\n\nIn the gm (or csf), we compute the response function in each voxels where\nthe FA is below at threshold_fa_gm (or threshold_fa_csf) and where\nthe MD is below threshold_md_gm (or threshold_md_csf).\n\n>>> scil_frf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_frf.py", "help": "usage: scil_frf_memsmt.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals\n IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS\n [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5}\n [{0,1,-0.5,0.5} ...] [--mask MASK]\n [--mask_wm MASK_WM] [--mask_gm MASK_GM]\n [--mask_csf MASK_CSF] [--fa_thr_wm FA_THR_WM]\n [--fa_thr_gm FA_THR_GM] [--fa_thr_csf FA_THR_CSF]\n [--md_thr_gm MD_THR_GM] [--md_thr_csf MD_THR_CSF]\n [--min_nvox MIN_NVOX] [--tolerance tol]\n [--skip_b0_check] [--dti_bval_limit DTI_BVAL_LIMIT]\n [--roi_radii ROI_RADII [ROI_RADII ...]]\n [--roi_center tuple(3) tuple(3) tuple(3)]\n [--wm_frf_mask file] [--gm_frf_mask file]\n [--csf_frf_mask file] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n out_wm_frf out_gm_frf out_csf_frf\n\nScript to estimate response functions for multi-encoding multi-shell\nmulti-tissue (memsmt) constrained spherical deconvolution. In order to operate,\nthe script only needs the data from one type of b-tensor encoding. However,\ngiving only a spherical one will not produce good fiber response functions, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable. Moreover, all of `--in_dwis`, `--in_bvals`, `--in_bvecs` and\n`--in_bdeltas` must have the same number of arguments. Be sure to keep the\nsame order of encodings throughout all these inputs and to set `--in_bdeltas`\naccordingly (IMPORTANT).\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n\nIn the wm, we compute the response function in each voxels where\nthe FA is superior at threshold_fa_wm.\n\nIn the gm (or csf), we compute the response function in each voxels where\nthe FA is below at threshold_fa_gm (or threshold_fa_csf) and where\nthe MD is below threshold_md_gm (or threshold_md_csf).\n\n>>> scil_frf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_frf.py\n\npositional arguments:\n out_wm_frf Path to the output WM frf file, in .txt format.\n out_gm_frf Path to the output GM frf file, in .txt format.\n out_csf_frf Path to the output CSF frf file, in .txt format.\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n Path to the input diffusion volume for each b-tensor encoding type.\n --in_bvals IN_BVALS [IN_BVALS ...]\n Path to the bval file, in FSL format, for each b-tensor encoding type.\n --in_bvecs IN_BVECS [IN_BVECS ...]\n Path to the bvec file, in FSL format, for each b-tensor encoding type.\n --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...]\n Value of b_delta for each b-tensor encoding type, in the same order as \n dwi, bval and bvec inputs.\n --mask MASK Path to a binary mask. Only the data inside the mask will be used for\n computations and reconstruction. Useful if no tissue masks are available.\n --mask_wm MASK_WM Path to the input WM mask file, used to improve the final WM frf mask.\n --mask_gm MASK_GM Path to the input GM mask file, used to improve the final GM frf mask.\n --mask_csf MASK_CSF Path to the input CSF mask file, used to improve the final CSF frf mask.\n --fa_thr_wm FA_THR_WM\n If supplied, use this threshold to select single WM fiber voxels from \n the FA inside the WM mask defined by mask_wm. \n Each voxel above this threshold will be selected. [0.7]\n --fa_thr_gm FA_THR_GM\n If supplied, use this threshold to select GM voxels from the FA inside \n the GM mask defined by mask_gm. \n Each voxel below this threshold will be selected. [0.2]\n --fa_thr_csf FA_THR_CSF\n If supplied, use this threshold to select CSF voxels from the FA inside \n the CSF mask defined by mask_csf. \n Each voxel below this threshold will be selected. [0.1]\n --md_thr_gm MD_THR_GM\n If supplied, use this threshold to select GM voxels from the MD inside \n the GM mask defined by mask_gm. \n Each voxel below this threshold will be selected. [0.0007]\n --md_thr_csf MD_THR_CSF\n If supplied, use this threshold to select CSF voxels from the MD inside \n the CSF mask defined by mask_csf. \n Each voxel below this threshold will be selected. [0.003]\n --min_nvox MIN_NVOX Minimal number of voxels needed for each tissue masks in order to \n proceed to frf estimation. [100]\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --dti_bval_limit DTI_BVAL_LIMIT\n The highest b-value taken for the DTI model. [1200]\n --roi_radii ROI_RADII [ROI_RADII ...]\n If supplied, use those radii to select a cuboid roi to estimate the \n response functions. The roi will be a cuboid spanning from the middle of \n the volume in each direction with the different radii. The type is either \n an int (e.g. --roi_radii 10) or an array-like (3,) (e.g. --roi_radii 20 30 10). [[20]]\n --roi_center tuple(3) tuple(3) tuple(3)\n If supplied, use this center to span the cuboid roi using roi_radii. \n [center of the 3D volume] (e.g. --roi_center 66 79 79)\n --wm_frf_mask file Path to the output WM frf mask file, the voxels used to compute the WM frf.\n --gm_frf_mask file Path to the output GM frf mask file, the voxels used to compute the GM frf.\n --csf_frf_mask file Path to the output CSF frf mask file, the voxels used to compute the CSF frf.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "individual", + "specific", + "unique", + "variety", + "different" + ], + [ + "total", + "number" + ], + [ + "true", + "always" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "matter", + "question", + "thinking", + "understand" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "individuals", + "those" + ], + [ + "work", + "and" + ], + [ + "brain", + "tissue" + ], + [ + "considered", + "are" + ], + [ + "clear", + "considered", + "highly", + "long", + "matter", + "true", + "work", + "very" + ], + [ + "order", + "order" + ], + [ + "result", + "moreover" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "work", + "working", + "work" + ], + [ + "left", + "from" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "work", + "find" + ], + [ + "direction", + "direction" + ], + [ + "order", + "set" + ], + [ + "clear", + "matter", + "true", + "seems" + ], + [ + "long", + "have" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "highest", + "level", + "highest" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "tool", + "useful" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "inferior", + "superior", + "superior" + ], + [ + "diffusion", + "diffusion" + ], + [ + "imaging", + "mri" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "action", + "response" + ], + [ + "held", + "in" + ], + [ + "defined", + "function", + "functional", + "functions", + "function" + ], + [ + "create", + "produce" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "clear", + "long", + "matter", + "result", + "work", + "so" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "clear", + "experience", + "thinking", + "true", + "good" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "variety", + "work", + "other" + ], + [ + "clear", + "held", + "work", + "taken" + ], + [ + "future", + "current" + ], + [ + "exist", + "exists" + ], + [ + "defined", + "function", + "defined" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "supported", + "supported" + ], + [ + "variance", + "variance" + ], + [ + "shape", + "shapes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "middle", + "middle" + ], + [ + "area", + "work", + "where" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "blue", + "dark", + "green", + "grey", + "white", + "gray" + ], + [ + "lack", + "minimal" + ], + [ + "level", + "above" + ], + [ + "considered", + "form", + "meaning", + "order", + "result", + "thus" + ], + [ + "order", + "allow" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "step", + "true", + "work", + "yet" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "total", + "100" + ], + [ + "function", + "functions", + "functions" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "based", + "based" + ], + [ + "clear", + "matter", + "question", + "thinking", + "sure" + ], + [ + "clear", + "considered", + "result", + "however" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "shape", + "structure", + "shape" + ], + [ + "indicating", + "suggest" + ], + [ + "considered", + "key", + "work", + "important" + ] + ], "keywords": [] }, { "name": "scil_frf_msmt", "docstring": "Compute response functions for multi-shell multi-tissue (MSMT) constrained\nspherical deconvolution from DWI data.\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n - In the wm, we compute the response function in each voxel where the FA is\n superior at threshold_fa_wm.\n - In the gm (or csf), we compute the response function in each voxel where\n the FA is below at threshold_fa_gm (or threshold_fa_csf) and where the MD\n is below threshold_md_gm (or threshold_md_csf).\n\nWe output one response function file for each tissue, containing the response\nfunction for each b-value (arranged by lines). These are saved as the diagonal\nof the axis-symmetric diffusion tensor (3 e-values) and a mean b0 value.\nFor example, a typical wm_frf is [15e-4, 4e-4, 4e-4, 700], where the tensor\ne-values are (15,4,4)x10^-4 mm^2/s and the mean b0 is 700.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical deconvolution\nfor improved analysis of multi-shell diffusion MRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_frf.py", "help": "usage: scil_frf_msmt.py [-h] [--mask MASK] [--mask_wm MASK_WM]\n [--mask_gm MASK_GM] [--mask_csf MASK_CSF]\n [--fa_thr_wm FA_THR_WM] [--fa_thr_gm FA_THR_GM]\n [--fa_thr_csf FA_THR_CSF] [--md_thr_gm MD_THR_GM]\n [--md_thr_csf MD_THR_CSF] [--min_nvox MIN_NVOX]\n [--tolerance TOLERANCE] [--skip_b0_check]\n [--dti_bval_limit DTI_BVAL_LIMIT]\n [--roi_radii ROI_RADII [ROI_RADII ...]]\n [--roi_center tuple(3) tuple(3) tuple(3)]\n [--wm_frf_mask file] [--gm_frf_mask file]\n [--csf_frf_mask file] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_wm_frf out_gm_frf\n out_csf_frf\n\nCompute response functions for multi-shell multi-tissue (MSMT) constrained\nspherical deconvolution from DWI data.\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n - In the wm, we compute the response function in each voxel where the FA is\n superior at threshold_fa_wm.\n - In the gm (or csf), we compute the response function in each voxel where\n the FA is below at threshold_fa_gm (or threshold_fa_csf) and where the MD\n is below threshold_md_gm (or threshold_md_csf).\n\nWe output one response function file for each tissue, containing the response\nfunction for each b-value (arranged by lines). These are saved as the diagonal\nof the axis-symmetric diffusion tensor (3 e-values) and a mean b0 value.\nFor example, a typical wm_frf is [15e-4, 4e-4, 4e-4, 700], where the tensor\ne-values are (15,4,4)x10^-4 mm^2/s and the mean b0 is 700.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical deconvolution\nfor improved analysis of multi-shell diffusion MRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_frf.py\n\npositional arguments:\n in_dwi Path to the input diffusion volume.\n in_bval Path to the bval file, in FSL format.\n in_bvec Path to the bvec file, in FSL format.\n out_wm_frf Path to the output WM frf file, in .txt format.\n out_gm_frf Path to the output GM frf file, in .txt format.\n out_csf_frf Path to the output CSF frf file, in .txt format.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to a binary mask. Only the data inside the mask\n will be used for computations and reconstruction.\n Useful if no tissue masks are available.\n --mask_wm MASK_WM Path to the input WM mask file, used to improve the\n final WM frf mask.\n --mask_gm MASK_GM Path to the input GM mask file, used to improve the\n final GM frf mask.\n --mask_csf MASK_CSF Path to the input CSF mask file, used to improve the\n final CSF frf mask.\n --fa_thr_wm FA_THR_WM\n If supplied, use this threshold to select single WM\n fiber voxels from the FA inside the WM mask defined by\n mask_wm. Each voxel above this threshold will be\n selected. [0.7]\n --fa_thr_gm FA_THR_GM\n If supplied, use this threshold to select GM voxels\n from the FA inside the GM mask defined by mask_gm.\n Each voxel below this threshold will be selected.\n [0.2]\n --fa_thr_csf FA_THR_CSF\n If supplied, use this threshold to select CSF voxels\n from the FA inside the CSF mask defined by mask_csf.\n Each voxel below this threshold will be selected.\n [0.1]\n --md_thr_gm MD_THR_GM\n If supplied, use this threshold to select GM voxels\n from the MD inside the GM mask defined by mask_gm.\n Each voxel below this threshold will be selected.\n [0.0007]\n --md_thr_csf MD_THR_CSF\n If supplied, use this threshold to select CSF voxels\n from the MD inside the CSF mask defined by mask_csf.\n Each voxel below this threshold will be selected.\n [0.003]\n --min_nvox MIN_NVOX Minimal number of voxels needed for each tissue masks\n in order to proceed to frf estimation. [100]\n --tolerance TOLERANCE\n The tolerated gap between the b-values to extract and\n the current b-value. [20]\n --skip_b0_check By default, we supervise that at least one b0 exists\n in your data (i.e. b-values below the default\n --tolerance). Use this option to allow continuing even\n if the minimum b-value is suspiciously high. Use with\n care, and only if you understand your data.\n --dti_bval_limit DTI_BVAL_LIMIT\n The highest b-value taken for the DTI model. [1200]\n --roi_radii ROI_RADII [ROI_RADII ...]\n If supplied, use those radii to select a cuboid roi to\n estimate the response functions. The roi will be a\n cuboid spanning from the middle of the volume in each\n direction with the different radii. The type is either\n an int (e.g. --roi_radii 10) or an array-like (3,)\n (e.g. --roi_radii 20 30 10). [[20]]\n --roi_center tuple(3) tuple(3) tuple(3)\n If supplied, use this center to span the cuboid roi\n using roi_radii. [center of the 3D volume] (e.g.\n --roi_center 66 79 79)\n --wm_frf_mask file Path to the output WM frf mask file, the voxels used\n to compute the WM frf.\n --gm_frf_mask file Path to the output GM frf mask file, the voxels used\n to compute the GM frf.\n --csf_frf_mask file Path to the output CSF frf mask file, the voxels used\n to compute the CSF frf.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "individual", + "specific", + "unique", + "variety", + "different" + ], + [ + "total", + "number" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "matter", + "question", + "thinking", + "understand" + ], + [ + "individuals", + "those" + ], + [ + "work", + "and" + ], + [ + "brain", + "tissue" + ], + [ + "considered", + "are" + ], + [ + "order", + "order" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "methods", + "use" + ], + [ + "direction", + "direction" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "highest", + "level", + "highest" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "tool", + "useful" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "inferior", + "superior", + "superior" + ], + [ + "diffusion", + "diffusion" + ], + [ + "imaging", + "mri" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "contrast", + "typical" + ], + [ + "action", + "response" + ], + [ + "held", + "in" + ], + [ + "defined", + "function", + "functional", + "functions", + "function" + ], + [ + "long", + "with" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "clear", + "held", + "work", + "taken" + ], + [ + "future", + "current" + ], + [ + "exist", + "exists" + ], + [ + "defined", + "function", + "defined" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "middle", + "middle" + ], + [ + "area", + "work", + "where" + ], + [ + "individual", + "each" + ], + [ + "blue", + "dark", + "green", + "grey", + "white", + "gray" + ], + [ + "lack", + "minimal" + ], + [ + "level", + "above" + ], + [ + "order", + "allow" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "total", + "100" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "function", + "functions", + "functions" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "based", + "based" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_frf_set_diffusivities", "docstring": "Replace the fiber response function in the FRF file.\nUse this script when you want to use a fixed response function\nand keep the mean b0.\n\nThe FRF file is obtained from scil_frf_ssst.py or scil_frf_msmt.py in the case\nof multi-shell data.\n\nFormerly: scil_set_response_function.py", "help": "usage: scil_frf_set_diffusivities.py [-h] [--no_factor]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n input new_frf output\n\nReplace the fiber response function in the FRF file.\nUse this script when you want to use a fixed response function\nand keep the mean b0.\n\nThe FRF file is obtained from scil_frf_ssst.py or scil_frf_msmt.py in the case\nof multi-shell data.\n\nFormerly: scil_set_response_function.py\n\npositional arguments:\n input Path of the FRF file.\n new_frf New response function given as a tuple. We will replace the \n response function in frf_file with this fiber response \n function x 10**-4 (e.g. 15,4,4). \n If multi-shell, write the first shell,then the second shell, \n and the third, etc. (e.g. 15,4,4,13,5,5,12,5,5).\n output Path of the new FRF file.\n\noptions:\n -h, --help show this help message and exit\n --no_factor If supplied, the fiber response function is\n evaluated without the x 10**-4 factor. [False].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "future", + "will" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "action", + "response" + ], + [ + "held", + "in" + ], + [ + "defined", + "function", + "functional", + "functions", + "function" + ], + [ + "long", + "with" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "methods", + "using" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "examined", + "evaluated" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "false", + "false" + ], + [ + "matter", + "question", + "case" + ] + ], "keywords": [] }, { "name": "scil_frf_ssst", "docstring": "Compute a single Fiber Response Function from a DWI.\n\nA DTI fit is made, and voxels containing a single fiber population are\nfound using a threshold on the FA.\n\nFormerly: scil_compute_ssst_frf.py", "help": "usage: scil_frf_ssst.py [-h] [--mask MASK] [--mask_wm MASK_WM]\n [--fa_thresh FA_THRESH]\n [--min_fa_thresh MIN_FA_THRESH] [--min_nvox MIN_NVOX]\n [--roi_radii ROI_RADII [ROI_RADII ...]]\n [--roi_center tuple(3) tuple(3) tuple(3)]\n [--b0_threshold thr] [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec frf_file\n\nCompute a single Fiber Response Function from a DWI.\n\nA DTI fit is made, and voxels containing a single fiber population are\nfound using a threshold on the FA.\n\nFormerly: scil_compute_ssst_frf.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n frf_file Path to the output FRF file, in .txt format, saved by Numpy.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to a binary mask. Only the data inside the mask will be used \n for computations and reconstruction. Useful if no white matter mask \n is available.\n --mask_wm MASK_WM Path to a binary white matter mask. Only the data inside this mask \n and above the threshold defined by --fa_thresh will be used to estimate the \n fiber response function.\n --fa_thresh FA_THRESH\n If supplied, use this threshold as the initial threshold to select \n single fiber voxels. [0.7]\n --min_fa_thresh MIN_FA_THRESH\n If supplied, this is the minimal value that will be tried when looking \n for single fiber voxels. [0.5]\n --min_nvox MIN_NVOX Minimal number of voxels needing to be identified as single fiber voxels \n in the automatic estimation. [300]\n --roi_radii ROI_RADII [ROI_RADII ...]\n If supplied, use those radii to select a cuboid roi to estimate the \n response functions. The roi will be a cuboid spanning from the middle of \n the volume in each direction with the different radii. The type is either \n an int (e.g. --roi_radii 10) or an array-like (3,) (e.g. --roi_radii 20 30 10). [[20]]\n --roi_center tuple(3) tuple(3) tuple(3)\n If supplied, use this center to span the roi of size roi_radius. [center of the 3D volume]\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences: [1] Tournier et al. NeuroImage 2007\n", - "synonyms": [], + "synonyms": [ + [ + "individual", + "specific", + "unique", + "variety", + "different" + ], + [ + "total", + "number" + ], + [ + "step", + "continue" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "individuals", + "those" + ], + [ + "matter", + "question", + "thinking", + "understand" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "fibre", + "fiber" + ], + [ + "unknown", + "identified" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "direction", + "direction" + ], + [ + "work", + "find" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "tool", + "useful" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "action", + "response" + ], + [ + "held", + "in" + ], + [ + "defined", + "function", + "functional", + "functions", + "function" + ], + [ + "long", + "with" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "population", + "population" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "exist", + "exists" + ], + [ + "defined", + "function", + "defined" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "thinking", + "working", + "looking" + ], + [ + "considered", + "is" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "middle", + "middle" + ], + [ + "individual", + "each" + ], + [ + "larger", + "size", + "size" + ], + [ + "lack", + "minimal" + ], + [ + "level", + "above" + ], + [ + "order", + "allow" + ], + [ + "clear", + "work", + "made" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "function", + "functions", + "functions" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_get_version", "docstring": "Give you information about your current scilpy installation.\nThis is useful for non-developers to give you the information\nneeded to reproduce your results, or to help debugging.\n\nIf you are experiencing a bug, please run this script and\nsend the output to the scilpy developers.", "help": "usage: scil_get_version.py [-h] [--show_dependencies]\n [-v [{DEBUG,INFO,WARNING}]]\n\nGive you information about your current scilpy installation.\nThis is useful for non-developers to give you the information\nneeded to reproduce your results, or to help debugging.\n\nIf you are experiencing a bug, please run this script and\nsend the output to the scilpy developers.\n\noptions:\n -h, --help show this help message and exit\n --show_dependencies Show the dependencies of scilpy.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [], + "synonyms": [ + [ + "thinking", + "you" + ], + [ + "result", + "results" + ], + [ + "long", + "a" + ], + [ + "tool", + "useful" + ], + [ + "work", + "and" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "methods", + "using" + ], + [ + "future", + "current" + ], + [ + "considered", + "are" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "is" + ], + [ + "clear", + "give" + ] + ], "keywords": [] }, { "name": "scil_gradients_apply_transform", "docstring": "Transform bvecs using an affine/rigid transformation.\n\nFormerly: scil_apply_transform_to_bvecs.py.", "help": "usage: scil_gradients_apply_transform.py [-h] [--inverse]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bvecs in_transfo out_bvecs\n\nTransform bvecs using an affine/rigid transformation.\n\nFormerly: scil_apply_transform_to_bvecs.py.\n\npositional arguments:\n in_bvecs Path of the bvec file, in FSL format\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_bvecs Output filename of the transformed bvecs.\n\noptions:\n -h, --help show this help message and exit\n --inverse Apply the inverse transformation.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "applied", + "apply" + ], + [ + "methods", + "using" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "matrices", + "matrix" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "left", + "result", + "when" + ], + [ + "held", + "in" + ], + [ + "considered", + "is" + ] + ], "keywords": [] }, { "name": "scil_gradients_convert", "docstring": "Script to convert gradient tables between FSL and MRtrix formats.\n\nFormerly: scil_convert_gradients_mrtrix_to_fsl.py or\nscil_convert_gradients_fsl_to_mrtrix.py", "help": "usage: scil_gradients_convert.py [-h] (--input_fsl | --input_mrtrix) [-f]\n [-v [{DEBUG,INFO,WARNING}]]\n GRADIENT_FILE(S) [GRADIENT_FILE(S) ...]\n output\n\nScript to convert gradient tables between FSL and MRtrix formats.\n\nFormerly: scil_convert_gradients_mrtrix_to_fsl.py or\nscil_convert_gradients_fsl_to_mrtrix.py\n\npositional arguments:\n GRADIENT_FILE(S) Path(s) to the gradient file(s). Either FSL (.bval, .bvec) or MRtrix (.b).\n output Basename of output without extension. Extension(s) will be added automatically (.b for MRtrix, .bval/.bvec for FSL.\n\noptions:\n -h, --help show this help message and exit\n --input_fsl FSL format.\n --input_mrtrix MRtrix format.\n -f Force overwriting of the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "methods", + "using" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_gradients_generate_sampling", "docstring": "Generate multi-shell gradient sampling with various processing options. Helps\naccelerate gradients, optimize duty cycle and avoid artefacts.\n\nMulti-shell gradient sampling is generated as in [1]. The bvecs are then\nflipped to maximize spread for eddy current correction, b0s are interleaved at\nequal spacing and the non-b0 samples are finally shuffled to minimize the total\ndiffusion gradient amplitude over a few TR.\n\nFormerly: scil_generate_gradient_sampling.py", "help": "usage: scil_gradients_generate_sampling.py [-h] [--eddy] [--duty]\n [--no_b0_start NO_B0_START | --b0_every B0_EVERY]\n [--b0_end] [--b0_value B0_VALUE]\n [--b0_philips]\n (--bvals bvals [bvals ...] | --b_lin_max B_LIN_MAX | --q_lin_max Q_LIN_MAX)\n (--fsl | --mrtrix)\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n nb_samples_per_shell\n [nb_samples_per_shell ...]\n out_basename\n\nGenerate multi-shell gradient sampling with various processing options. Helps\naccelerate gradients, optimize duty cycle and avoid artefacts.\n\nMulti-shell gradient sampling is generated as in [1]. The bvecs are then\nflipped to maximize spread for eddy current correction, b0s are interleaved at\nequal spacing and the non-b0 samples are finally shuffled to minimize the total\ndiffusion gradient amplitude over a few TR.\n\nFormerly: scil_generate_gradient_sampling.py\n\npositional arguments:\n nb_samples_per_shell Number of samples on each non b0 shell. \n If multishell, provide a number per shell.\n out_basename Gradient sampling output basename (don't include extension).\n Please add options --fsl and/or --mrtrix below.\n\noptions:\n -h, --help show this help message and exit\n --eddy If set, we apply eddy optimization.\n B-vectors are flipped to be well spread without symmetry.\n --duty If set, we apply duty cycle optimization. \n B-vectors are shuffled to reduce consecutive colinearity in the samples. [False]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nb0 acquisitions:\n Default if you add no option is to have a b0 at the start.\n\n --no_b0_start NO_B0_START\n If set, do not add a b0 at the beginning. \n --b0_every B0_EVERY Interleave a b0 every n=b0_every values. Starts after the first b0 \n (cannot be used with --no_b0_start). Must be an integer >= 1.\n --b0_end If set, adds a b0 as last sample.\n --b0_value B0_VALUE b-value of the b0s. [0.0]\n --b0_philips If set, replaces values of b0s bvecs by existing bvecs for Philips handling.\n\nNon-b0 acquisitions:\n --bvals bvals [bvals ...]\n bval of each non-b0 shell.\n --b_lin_max B_LIN_MAX\n b-max for linear bval distribution in *b*.\n --q_lin_max Q_LIN_MAX\n b-max for linear bval distribution in *q*; \n the square root of b-values will be linearly distributed..\n\nSave as:\n --fsl Save in FSL format (.bvec/.bval).\n --mrtrix Save in MRtrix format (.b).\n\nReferences: [1] Emmanuel Caruyer, Christophe Lenglet, Guillermo Sapiro,\nRachid Deriche. Design of multishell gradient sampling with uniform coverage\nin diffusion MRI. Magnetic Resonance in Medicine, Wiley, 2013, 69 (6),\npp. 1534-1540. \n \n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "create", + "generate" + ], + [ + "average", + "per" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "algorithm", + "optimization" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "order", + "set" + ], + [ + "long", + "over" + ], + [ + "long", + "have" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "processing", + "processing" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "applied", + "apply" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "possibility", + "avoid" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "specific", + "variety", + "various" + ], + [ + "diffusion", + "diffusion" + ], + [ + "imaging", + "mri" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "last" + ], + [ + "held", + "in" + ], + [ + "variety", + "include" + ], + [ + "long", + "with" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "create", + "provide" + ], + [ + "methods", + "using" + ], + [ + "future", + "current" + ], + [ + "left", + "after" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "individual", + "each" + ], + [ + "step", + "start" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "false", + "false" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "increase", + "reduce" + ], + [ + "considered", + "experience", + "large", + "long", + "result", + "variety", + "work", + "working", + "well" + ], + [ + "increase", + "total", + "total" + ], + [ + "large", + "long", + "few" + ], + [ + "exist", + "cannot" + ] + ], "keywords": [] }, { "name": "scil_gradients_modify_axes", "docstring": "Flip (ex, x --> -x) or swap (ex, x <-> y) chosen axes of the gradient sampling\nmatrix. Result will be saved in the same format as input gradient sampling\nfile.\n\nFormerly: scil_flip_gradients.py or scil_swap_gradient_axis.py", "help": "usage: scil_gradients_modify_axes.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_gradient_sampling_file\n out_gradient_sampling_file\n {1,2,3,-1,-2,-3} {1,2,3,-1,-2,-3}\n {1,2,3,-1,-2,-3}\n\nFlip (ex, x --> -x) or swap (ex, x <-> y) chosen axes of the gradient sampling\nmatrix. Result will be saved in the same format as input gradient sampling\nfile.\n\nFormerly: scil_flip_gradients.py or scil_swap_gradient_axis.py\n\npositional arguments:\n in_gradient_sampling_file\n Path to gradient sampling file. (.bvec or .b)\n out_gradient_sampling_file\n Where to save the flipped gradient sampling file.Extension (.bvec or .b) must be the same as in_gradient_sampling_file\n {1,2,3,-1,-2,-3} The final order of the axes, compared to original order: x=1 y=2 z=3.\n Ex: to only flip y: 1 -2 3.\n Ex: to only swap x and y: 2 1 3.\n Ex: to first flip x, then permute all three axes: 3 -1 2.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "order", + "order" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "lack", + "loss", + "result", + "result" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "methods", + "using" + ], + [ + "matrices", + "matrix" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "represent", + "chosen" + ], + [ + "area", + "work", + "where" + ], + [ + "average", + "compared" + ], + [ + "work", + "all" + ] + ], "keywords": [] }, { "name": "scil_gradients_round_bvals", "docstring": "Select b-values on specific b-value shells.\n\nWith the --tolerance argument, this is useful for sampling schemes where\nb-values of a shell are not all identical. Adjust the tolerance to vary the\naccepted interval around the targetted b-value.\n\nFor example, a b-value of 2000 and a tolerance of 20 will select all b-values\nbetween [1980, 2020] and round them to the value of 2000.\n\n>> scil_gradients_round_bvals.py bvals 0 1000 2000 newbvals --tolerance 20\n\nFormerly: scil_resample_bvals.py", "help": "usage: scil_gradients_round_bvals.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bval shells [shells ...] out_bval\n tolerance\n\nSelect b-values on specific b-value shells.\n\nWith the --tolerance argument, this is useful for sampling schemes where\nb-values of a shell are not all identical. Adjust the tolerance to vary the\naccepted interval around the targetted b-value.\n\nFor example, a b-value of 2000 and a tolerance of 20 will select all b-values\nbetween [1980, 2020] and round them to the value of 2000.\n\n>> scil_gradients_round_bvals.py bvals 0 1000 2000 newbvals --tolerance 20\n\nFormerly: scil_resample_bvals.py\n\npositional arguments:\n in_bval The b-values in FSL format.\n shells The list of expected shells. For example 0 1000 2000.\n All b-values in the b_val file should correspond to one given shell (up to the tolerance).\n out_bval The name of the output b-values.\n tolerance The tolerated gap between the b-values to extract and the \n actual b-values. Expecting an integer value. Comparison is \n strict: a b-value of 1010 with a tolerance of 10 is NOT \n included in shell 1000. Suggestion: 20.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "meaning", + "name" + ], + [ + "question", + "argument" + ], + [ + "specific", + "specific" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "tool", + "useful" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "contrast", + "comparison" + ], + [ + "methods", + "using" + ], + [ + "increase", + "expected" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "specific", + "actual" + ], + [ + "area", + "work", + "where" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ] + ], "keywords": [] }, { "name": "scil_gradients_validate_correct", "docstring": "Detect sign flips and/or axes swaps in the gradients table from a fiber\ncoherence index [1]. The script takes as input the principal direction(s)\nat each voxel, the b-vectors and the fractional anisotropy map and outputs\na corrected b-vectors file.\n\nA typical pipeline could be:\n>>> scil_dti_metrics.py dwi.nii.gz bval bvec --not_all --fa fa.nii.gz\n --evecs peaks.nii.gz\n>>> scil_gradients_validate_correct.py bvec peaks_v1.nii.gz fa.nii.gz bvec_corr\n\nNote that peaks_v1.nii.gz is the file containing the direction associated\nto the highest eigenvalue at each voxel.\n\nIt is also possible to use a file containing multiple principal directions per\nvoxel, given that they are sorted by decreasing amplitude. In that case, the\nfirst direction (with the highest amplitude) will be chosen for validation.\nOnly 4D data is supported, so the directions must be stored in a single\ndimension. For example, peaks.nii.gz from scil_fodf_metrics.py could be used.\n\nFormerly: scil_validate_and_correct_bvecs.py", "help": "usage: scil_gradients_validate_correct.py [-h] [--mask MASK]\n [--fa_threshold FA_THRESHOLD]\n [--column_wise]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bvec in_peaks in_FA out_bvec\n\nDetect sign flips and/or axes swaps in the gradients table from a fiber\ncoherence index [1]. The script takes as input the principal direction(s)\nat each voxel, the b-vectors and the fractional anisotropy map and outputs\na corrected b-vectors file.\n\nA typical pipeline could be:\n>>> scil_dti_metrics.py dwi.nii.gz bval bvec --not_all --fa fa.nii.gz\n --evecs peaks.nii.gz\n>>> scil_gradients_validate_correct.py bvec peaks_v1.nii.gz fa.nii.gz bvec_corr\n\nNote that peaks_v1.nii.gz is the file containing the direction associated\nto the highest eigenvalue at each voxel.\n\nIt is also possible to use a file containing multiple principal directions per\nvoxel, given that they are sorted by decreasing amplitude. In that case, the\nfirst direction (with the highest amplitude) will be chosen for validation.\nOnly 4D data is supported, so the directions must be stored in a single\ndimension. For example, peaks.nii.gz from scil_fodf_metrics.py could be used.\n\nFormerly: scil_validate_and_correct_bvecs.py\n\npositional arguments:\n in_bvec Path to bvec file.\n in_peaks Path to peaks file.\n in_FA Path to the fractional anisotropy file.\n out_bvec Path to corrected bvec file (FSL format).\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to an optional mask. If set, FA and Peaks will only be used inside the mask.\n --fa_threshold FA_THRESHOLD\n FA threshold. Only voxels with FA higher than fa_threshold will be considered. [0.2]\n --column_wise Specify if input peaks are column-wise (..., 3, N) instead of row-wise (..., N, 3).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Schilling KG, Yeh FC, Nath V, Hansen C, Williams O, Resnick S, Anderson AW,\nLandman BA. A fiber coherence index for quality control of B-table orientation\nin diffusion MRI scans. Magn Reson Imaging. 2019 May;58:82-89.\ndoi: 10.1016/j.mri.2019.01.018.\n", - "synonyms": [], + "synonyms": [ + [ + "average", + "per" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "orientation", + "orientation" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "considered", + "are" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "validation", + "validation" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "methods", + "use" + ], + [ + "direction", + "direction" + ], + [ + "order", + "set" + ], + [ + "direction", + "directions" + ], + [ + "high", + "higher", + "increase", + "level", + "higher" + ], + [ + "highest", + "level", + "highest" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "imaging", + "scans" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "imaging", + "mri" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "contrast", + "typical" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "clear", + "long", + "matter", + "result", + "work", + "so" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "methods", + "using" + ], + [ + "order", + "work", + "instead" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "long", + "than" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "supported", + "supported" + ], + [ + "imaging", + "imaging" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "result", + "may" + ], + [ + "represent", + "chosen" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "lack", + "quality" + ], + [ + "matter", + "question", + "case" + ], + [ + "maps", + "map" + ], + [ + "principal", + "principal" + ], + [ + "clear", + "result", + "work", + "could" + ] + ], "keywords": [] }, { "name": "scil_gradients_validate_correct_eddy", "docstring": "Validate and correct gradients from eddy outputs\nWith full AP-PA eddy outputs a full bvec bval (2x nb of dirs and bval)\nthat doesnt fit with the output dwi (1x nb of dir)\n\nFormerly: scil_validate_and_correct_eddy_gradients.py", "help": "usage: scil_gradients_validate_correct_eddy.py [-h]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bvec in_bval nb_dirs\n out_bvec out_bval\n\nValidate and correct gradients from eddy outputs\nWith full AP-PA eddy outputs a full bvec bval (2x nb of dirs and bval)\nthat doesnt fit with the output dwi (1x nb of dir)\n\nFormerly: scil_validate_and_correct_eddy_gradients.py\n\npositional arguments:\n in_bvec In bvec file.\n in_bval In bval file.\n nb_dirs Number of directions per DWI.\n out_bvec Out bvec file.\n out_bval Out bval file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "direction", + "directions" + ], + [ + "long", + "a" + ], + [ + "clear", + "left", + "out" + ], + [ + "average", + "per" + ], + [ + "work", + "and" + ], + [ + "held", + "on" + ], + [ + "methods", + "using" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "left", + "result", + "when" + ], + [ + "held", + "in" + ], + [ + "considered", + "is" + ], + [ + "long", + "full" + ], + [ + "long", + "with" + ] + ], "keywords": [] }, { "name": "scil_header_print_info", "docstring": "Print the raw header from the provided file or only the specified keys.\nSupport trk, nii and mgz files.\n\nFormerly: scil_print_header.py", "help": "usage: scil_header_print_info.py [-h] [--keys KEYS [KEYS ...]]\n [--print_affine] [-v [{DEBUG,INFO,WARNING}]]\n in_file\n\nPrint the raw header from the provided file or only the specified keys.\nSupport trk, nii and mgz files.\n\nFormerly: scil_print_header.py\n\npositional arguments:\n in_file Input file (trk, nii and mgz).\n\noptions:\n -h, --help show this help message and exit\n --keys KEYS [KEYS ...]\n Print only the specified keys.\n --print_affine Print nibabel's affine.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [], + "synonyms": [ + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "methods", + "using" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "is" + ], + [ + "parameters", + "specified" + ] + ], "keywords": [] }, { "name": "scil_header_validate_compatibility", "docstring": "Will compare all input files against the first one for the compatibility\nof their spatial attributes.\n\nSpatial attributes are: affine, dimensions, voxel sizes and voxel order.\n\nFormerly: scil_verify_space_attributes_compatibility.py", "help": "usage: scil_header_validate_compatibility.py [-h] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n in_files [in_files ...]\n\nWill compare all input files against the first one for the compatibility\nof their spatial attributes.\n\nSpatial attributes are: affine, dimensions, voxel sizes and voxel order.\n\nFormerly: scil_verify_space_attributes_compatibility.py\n\npositional arguments:\n in_files List of file to compare (trk, tck and nii/nii.gz).\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "voxel", + "voxel" + ], + [ + "work", + "and" + ], + [ + "held", + "on" + ], + [ + "methods", + "using" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "considered", + "are" + ], + [ + "work", + "all" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "order", + "order" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "future", + "will" + ], + [ + "action", + "against" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "size", + "sizes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "left", + "result", + "when" + ], + [ + "spatial", + "temporal", + "spatial" + ], + [ + "considered", + "is" + ] + ], "keywords": [] }, { "name": "scil_json_convert_entries_to_xlsx", "docstring": "Convert a final aggregated json file to an Excel spreadsheet.\nTypically used during the tractometry pipeline.\n\nFormerly: scil_convert_json_to_xlsx.py", "help": "usage: scil_json_convert_entries_to_xlsx.py [-h] [--no_sort_subs]\n [--no_sort_bundles]\n [--ignore_bundles FILE]\n [--stats_over_population]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_json out_xlsx\n\nConvert a final aggregated json file to an Excel spreadsheet.\nTypically used during the tractometry pipeline.\n\nFormerly: scil_convert_json_to_xlsx.py\n\npositional arguments:\n in_json File containing the json stats (.json).\n out_xlsx Output Excel file for the stats (.xlsx).\n\noptions:\n -h, --help show this help message and exit\n --no_sort_subs If set, subjects won't be sorted alphabetically.\n --no_sort_bundles If set, bundles won't be sorted alphabetically.\n --ignore_bundles FILE\n Path to a text file containing a list of bundles to ignore (.txt).\n One bundle, corresponding to keys in the json, per line.\n --stats_over_population\n If set, consider the input stats to be over an entire population and not subject-based.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "bundles", + "bundles" + ], + [ + "average", + "per" + ], + [ + "work", + "and" + ], + [ + "subject", + "subjects", + "subjects" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "order", + "set" + ], + [ + "long", + "over" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "population", + "population" + ], + [ + "methods", + "using" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "based", + "based" + ], + [ + "bundles", + "bundle" + ], + [ + "matter", + "question", + "subject", + "subjects", + "subject" + ], + [ + "considered", + "possibility", + "question", + "step", + "consider" + ] + ], "keywords": [] }, { "name": "scil_json_harmonize_entries", "docstring": "This script will harmonize a json file by adding missing keys and values\nthat differs between the different layers of the dictionary.\n\nThis is used only (for now) in Aggregate_All_* portion of tractometry-flow,\nto counter the problem of missing bundles/metrics/lesions between subjects.\n\nThe most common use case is when specific subjects have missing bundles\nwhich will cause a panda array to be incomplete, and thus crash. Finding out\nthe union of all bundles/metrics/lesions will allow to create a complete json\n(but with NaN for missing values).\n\nFormerly: scil_harmonize_json.py", "help": "usage: scil_json_harmonize_entries.py [-h] [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file out_file\n\n This script will harmonize a json file by adding missing keys and values\nthat differs between the different layers of the dictionary.\n\nThis is used only (for now) in Aggregate_All_* portion of tractometry-flow,\nto counter the problem of missing bundles/metrics/lesions between subjects.\n\nThe most common use case is when specific subjects have missing bundles\nwhich will cause a panda array to be incomplete, and thus crash. Finding out\nthe union of all bundles/metrics/lesions will allow to create a complete json\n(but with NaN for missing values).\n\nFormerly: scil_harmonize_json.py\n\npositional arguments:\n in_file Input file (json).\n out_file Output file (json).\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "individual", + "specific", + "unique", + "variety", + "different" + ], + [ + "considered", + "highly", + "long", + "work", + "most" + ], + [ + "bundles", + "bundles" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "long", + "work", + "working", + "now" + ], + [ + "subject", + "subjects", + "subjects" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "long", + "have" + ], + [ + "specific", + "specific" + ], + [ + "long", + "a" + ], + [ + "clear", + "left", + "out" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "forms", + "specific", + "common" + ], + [ + "create", + "create" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "clear", + "adding" + ], + [ + "methods", + "using" + ], + [ + "question", + "problem" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "considered", + "form", + "meaning", + "order", + "result", + "thus" + ], + [ + "order", + "allow" + ], + [ + "result", + "cause" + ], + [ + "work", + "all" + ], + [ + "matter", + "question", + "case" + ], + [ + "possibility", + "finding" + ] + ], "keywords": [] }, { "name": "scil_json_merge_entries", "docstring": "Merge multiple json files into a single one.\nTypically used during the tractometry pipeline.\n\nWithout option it will simply merge all entries at the top level, the top\nlevel must not have any conflicting keys.\n\n--keep_separate option will add a parent for each file, its basename will\nbecome the key.\n\n--no_list option will merge all entries at the top level, if there is a\nconflict the lowest level will be extended with the new values (if list) or\nadded (if value)\n\n--add_parent_key option will add a parent key before merging all entries.\n\n--remove_parent_key option will remove the parent key before merging all\nentries.\n\n--recursive option will merge all entries (scalar) at the lowest layers as a\nlist.\n\n--average_last_layer option will average all entries (scalar) at the lowest\nlayers, but instead of creating a list it creates a mean/std level.\n\nFormerly: scil_merge_json.py", "help": "usage: scil_json_merge_entries.py [-h] [--keep_separate] [--no_list]\n [--add_parent_key ADD_PARENT_KEY]\n [--remove_parent_key] [--recursive]\n [--average_last_layer] [--indent INDENT]\n [--sort_keys] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_json [in_json ...] out_json\n\n Merge multiple json files into a single one.\nTypically used during the tractometry pipeline.\n\nWithout option it will simply merge all entries at the top level, the top\nlevel must not have any conflicting keys.\n\n--keep_separate option will add a parent for each file, its basename will\nbecome the key.\n\n--no_list option will merge all entries at the top level, if there is a\nconflict the lowest level will be extended with the new values (if list) or\nadded (if value)\n\n--add_parent_key option will add a parent key before merging all entries.\n\n--remove_parent_key option will remove the parent key before merging all\nentries.\n\n--recursive option will merge all entries (scalar) at the lowest layers as a\nlist.\n\n--average_last_layer option will average all entries (scalar) at the lowest\nlayers, but instead of creating a list it creates a mean/std level.\n\nFormerly: scil_merge_json.py\n\npositional arguments:\n in_json List of json files to merge (.json).\n out_json Output json file (.json).\n\noptions:\n -h, --help show this help message and exit\n --keep_separate Merge entries as separate keys based on filename.\n --no_list Merge entries knowing there is no conflict.\n --add_parent_key ADD_PARENT_KEY\n Merge all entries under a single parent.\n --remove_parent_key Merge ignoring parent key (e.g for population).\n --recursive Merge all entries at the lowest layers.\n --average_last_layer Average all entries at the lowest layers.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "average", + "higher", + "highest", + "lowest" + ], + [ + "left", + "into" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "create", + "creates" + ], + [ + "create", + "creating" + ], + [ + "long", + "have" + ], + [ + "long", + "a" + ], + [ + "key", + "main", + "key" + ], + [ + "thinking", + "knowing" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "action", + "clear", + "considered", + "future", + "matter", + "possibility", + "potential", + "question", + "result", + "specific", + "any" + ], + [ + "long", + "with" + ], + [ + "average", + "average" + ], + [ + "population", + "population" + ], + [ + "methods", + "using" + ], + [ + "thinking", + "simply" + ], + [ + "order", + "work", + "instead" + ], + [ + "left", + "before" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "clear", + "long", + "question", + "result", + "work", + "there" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "based", + "based" + ], + [ + "considered", + "become" + ] + ], "keywords": [] }, { "name": "scil_labels_combine", "docstring": "Script to combine labels from multiple volumes. If there is overlap, it will\noverwrite them based on the input order.\n\n >>> scil_labels_combine.py out_labels.nii.gz\n --volume_ids animal_labels.nii 20\n --volume_ids DKT_labels.nii.gz 44 53\n --out_labels_indices 20 44 53\n >>> scil_labels_combine.py slf_labels.nii.gz\n --volume_ids a2009s_aseg.nii.gz all\n --volume_ids clean/s1__DKT.nii.gz 1028 2028\n\nFormerly: scil_combine_labels.py.", "help": "usage: scil_labels_combine.py [-h] --volume_ids VOLUME_IDS [VOLUME_IDS ...]\n [--out_labels_ids OUT_LABELS_IDS [OUT_LABELS_IDS ...]\n | --unique | --group_in_m]\n [--background BACKGROUND] [--merge_groups]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n output\n\nScript to combine labels from multiple volumes. If there is overlap, it will\noverwrite them based on the input order.\n\n >>> scil_labels_combine.py out_labels.nii.gz\n --volume_ids animal_labels.nii 20\n --volume_ids DKT_labels.nii.gz 44 53\n --out_labels_indices 20 44 53\n >>> scil_labels_combine.py slf_labels.nii.gz\n --volume_ids a2009s_aseg.nii.gz all\n --volume_ids clean/s1__DKT.nii.gz 1028 2028\n\nFormerly: scil_combine_labels.py.\n\npositional arguments:\n output Combined labels volume output.\n\noptions:\n -h, --help show this help message and exit\n --volume_ids VOLUME_IDS [VOLUME_IDS ...]\n List of volumes directly followed by their labels:\n --volume_ids atlasA id1a id2a \n --volume_ids atlasB id1b id2b ... \n \"all\" can be used instead of id numbers.\n --out_labels_ids OUT_LABELS_IDS [OUT_LABELS_IDS ...]\n List of labels indices for output images.\n --unique If set, output id with unique labels, excluding first background value.\n --group_in_m Add (x * 10 000) to each volume labels, where x is the input volume order number.\n --background BACKGROUND\n Background id, excluded from output [0],\n the value is used as output background value.\n --merge_groups Each group from the --volume_ids option will be merged as a single labels.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "work", + "and" + ], + [ + "connectome", + "connectome" + ], + [ + "order", + "order" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "order", + "set" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "future", + "will" + ], + [ + "result", + "followed" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "methods", + "using" + ], + [ + "order", + "work", + "instead" + ], + [ + "unique", + "variety", + "unique" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "area", + "work", + "where" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "based", + "group" + ], + [ + "volume", + "volumes", + "volumes" + ], + [ + "total", + "combined" + ], + [ + "clear", + "long", + "question", + "result", + "work", + "there" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "based", + "based" + ] + ], "keywords": [] }, { "name": "scil_labels_dilate", "docstring": "Dilate regions (with or without masking) from a labeled volume:\n- \"label_to_dilate\" are regions that will dilate over\n \"label_to_fill\" if close enough to it (\"distance\").\n- \"label_to_dilate\", by default (None) will be all\n non-\"label_to_fill\" and non-\"label_not_to_dilate\".\n- \"label_not_to_dilate\" will not be changed, but will not dilate.\n- \"mask\" is where the dilation is allowed (constrained)\n in addition to \"background_label\" (logical AND)\n\n>>> scil_labels_dilate.py wmparc_t1.nii.gz wmparc_dil.nii.gz \\\n --label_to_fill 0 5001 5002 \\\n --label_not_to_dilate 4 43 10 11 12 49 50 51\n\nFormerly: scil_dilate_labels.py", "help": "usage: scil_labels_dilate.py [-h] [--distance DISTANCE]\n [--labels_to_dilate LABELS_TO_DILATE [LABELS_TO_DILATE ...]]\n [--labels_to_fill LABELS_TO_FILL [LABELS_TO_FILL ...]]\n [--labels_not_to_dilate LABELS_NOT_TO_DILATE [LABELS_NOT_TO_DILATE ...]]\n [--mask MASK] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file out_file\n\nDilate regions (with or without masking) from a labeled volume:\n- \"label_to_dilate\" are regions that will dilate over\n \"label_to_fill\" if close enough to it (\"distance\").\n- \"label_to_dilate\", by default (None) will be all\n non-\"label_to_fill\" and non-\"label_not_to_dilate\".\n- \"label_not_to_dilate\" will not be changed, but will not dilate.\n- \"mask\" is where the dilation is allowed (constrained)\n in addition to \"background_label\" (logical AND)\n\n>>> scil_labels_dilate.py wmparc_t1.nii.gz wmparc_dil.nii.gz \\\n --label_to_fill 0 5001 5002 \\\n --label_not_to_dilate 4 43 10 11 12 49 50 51\n\nFormerly: scil_dilate_labels.py\n\npositional arguments:\n in_file Path of the volume (nii or nii.gz).\n out_file Output filename of the dilated labels.\n\noptions:\n -h, --help show this help message and exit\n --distance DISTANCE Maximal distance to dilate (in mm) [2.0].\n --labels_to_dilate LABELS_TO_DILATE [LABELS_TO_DILATE ...]\n Label list to dilate. By default it dilates all \n labels not in labels_to_fill nor in labels_not_to_dilate.\n --labels_to_fill LABELS_TO_FILL [LABELS_TO_FILL ...]\n Background id / labels to be filled [[0]],\n the first one is given as output background value.\n --labels_not_to_dilate LABELS_NOT_TO_DILATE [LABELS_NOT_TO_DILATE ...]\n Label list not to dilate.\n --mask MASK Only dilate values inside the mask.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "connectome", + "connectome" + ], + [ + "considered", + "are" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "areas", + "region", + "regions", + "regions" + ], + [ + "long", + "over" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "create", + "enough" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "total", + "50" + ], + [ + "methods", + "using" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "area", + "work", + "where" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "order", + "allowed" + ], + [ + "step", + "start" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "large", + "larger", + "variety", + "work", + "addition" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "matter", + "nor" + ] + ], "keywords": [] }, { "name": "scil_labels_remove", "docstring": "Script to remove specific labels from an atlas volume.\n\n >>> scil_labels_remove.py DKT_labels.nii out_labels.nii.gz -i 5001 5002\n\nFormerly: scil_remove_labels.py", "help": "usage: scil_labels_remove.py [-h] -i INDICES [INDICES ...]\n [--background BACKGROUND]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_labels out_labels\n\nScript to remove specific labels from an atlas volume.\n\n >>> scil_labels_remove.py DKT_labels.nii out_labels.nii.gz -i 5001 5002\n\nFormerly: scil_remove_labels.py\n\npositional arguments:\n in_labels Input labels volume.\n out_labels Output labels volume.\n\noptions:\n -h, --help show this help message and exit\n -i INDICES [INDICES ...], --indices INDICES [INDICES ...]\n List of labels indices to remove.\n --background BACKGROUND\n Integer used for removed labels [0].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \n", - "synonyms": [], + "synonyms": [ + [ + "volume", + "volumes", + "volume" + ], + [ + "work", + "and" + ], + [ + "connectome", + "connectome" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "specific", + "specific" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "thinking", + "i" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "atlas", + "atlas" + ], + [ + "methods", + "using" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ] + ], "keywords": [] }, { "name": "scil_labels_split_volume_by_ids", "docstring": "Split a label image into multiple images where the name of the output images\nis the id of the label (ex. 35.nii.gz, 36.nii.gz, ...). If the --range option\nis not provided, all labels of the image are extracted. The label 0 is\nconsidered as the background and is ignored.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_ids.py", "help": "usage: scil_labels_split_volume_by_ids.py [-h] [--out_dir OUT_DIR]\n [--out_prefix OUT_PREFIX]\n [-r min max min max]\n [--background BACKGROUND]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_labels\n\nSplit a label image into multiple images where the name of the output images\nis the id of the label (ex. 35.nii.gz, 36.nii.gz, ...). If the --range option\nis not provided, all labels of the image are extracted. The label 0 is\nconsidered as the background and is ignored.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_ids.py\n\npositional arguments:\n in_labels Path of the input label file, in a format supported by Nibabel.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Put all ouptput images in a specific directory.\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n -r min max min max, --range min max min max\n Specifies a subset of labels to split, formatted as min max. Ex: -r 3 5 will give files _3, _4, _5.\n --background BACKGROUND\n Background value. Will not be saved as a separate label. Default: 0.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "left", + "into" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "meaning", + "name" + ], + [ + "specific", + "specific" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "clear", + "left", + "work", + "put" + ], + [ + "methods", + "using" + ], + [ + "supported", + "supported" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "clear", + "give" + ], + [ + "area", + "work", + "where" + ], + [ + "individual", + "each" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "considered", + "key", + "work", + "important" + ] + ], "keywords": [] }, { "name": "scil_labels_split_volume_from_lut", "docstring": "Split a label image into multiple images where the name of the output images\nis taken from a lookup table (ex: left-lateral-occipital.nii.gz,\nright-thalamus.nii.gz, ...). Only the labels included in the lookup table\nare extracted.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_labels.py", "help": "usage: scil_labels_split_volume_from_lut.py [-h] [--out_dir OUT_DIR]\n [--out_prefix OUT_PREFIX]\n (--scilpy_lut {freesurfer_subcortical,dk_aggregate_structures,freesurfer_desikan_killiany} | --custom_lut CUSTOM_LUT)\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_label\n\nSplit a label image into multiple images where the name of the output images\nis taken from a lookup table (ex: left-lateral-occipital.nii.gz,\nright-thalamus.nii.gz, ...). Only the labels included in the lookup table\nare extracted.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_labels.py\n\npositional arguments:\n in_label Path of the input label file, in a format supported by Nibabel.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Put all ouptput images in a specific directory.\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n --scilpy_lut {freesurfer_subcortical,dk_aggregate_structures,freesurfer_desikan_killiany}\n Lookup table, in the file scilpy/data/LUT, used to name the output files.\n --custom_lut CUSTOM_LUT\n Path of the lookup table file, used to name the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "left", + "into" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "meaning", + "name" + ], + [ + "left", + "left" + ], + [ + "specific", + "specific" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "lobe", + "occipital", + "parietal", + "occipital" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "clear", + "left", + "work", + "put" + ], + [ + "methods", + "using" + ], + [ + "clear", + "held", + "work", + "taken" + ], + [ + "lateral", + "posterior", + "lateral" + ], + [ + "left", + "right" + ], + [ + "supported", + "supported" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "cortex", + "thalamus", + "thalamus" + ], + [ + "considered", + "is" + ], + [ + "area", + "work", + "where" + ], + [ + "individual", + "each" + ], + [ + "work", + "all" + ], + [ + "considered", + "key", + "work", + "important" + ] + ], "keywords": [] }, { "name": "scil_lesions_info", "docstring": "This script will output informations about lesion load in bundle(s).\nThe input can either be streamlines, binary bundle map, or a bundle voxel\nlabel map.\n\nTo be considered a valid lesion, the lesion volume must be at least\nmin_lesion_vol mm3. This avoid the detection of thousand of single voxel\nlesions if an automatic lesion segmentation tool is used.\n\nFormerly: scil_analyse_lesions_load.py", "help": "usage: scil_lesions_info.py [-h]\n [--bundle BUNDLE | --bundle_mask BUNDLE_MASK | --bundle_labels_map BUNDLE_LABELS_MAP]\n [--min_lesion_vol MIN_LESION_VOL]\n [--out_lesion_atlas FILE]\n [--out_lesion_stats FILE]\n [--out_streamlines_stats FILE] [--indent INDENT]\n [--sort_keys] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_lesion out_json\n\nThis script will output informations about lesion load in bundle(s).\nThe input can either be streamlines, binary bundle map, or a bundle voxel\nlabel map.\n\nTo be considered a valid lesion, the lesion volume must be at least\nmin_lesion_vol mm3. This avoid the detection of thousand of single voxel\nlesions if an automatic lesion segmentation tool is used.\n\nFormerly: scil_analyse_lesions_load.py\n\npositional arguments:\n in_lesion Binary mask of the lesion(s) (.nii.gz).\n out_json Output file for lesion information (.json).\n\noptions:\n -h, --help show this help message and exit\n --bundle BUNDLE Path of the bundle file (.trk).\n --bundle_mask BUNDLE_MASK\n Path of the bundle binary mask (.nii.gz).\n --bundle_labels_map BUNDLE_LABELS_MAP\n Path of the bundle labels map (.nii.gz).\n --min_lesion_vol MIN_LESION_VOL\n Minimum lesion volume in mm3 [7].\n --out_lesion_atlas FILE\n Save the labelized lesion(s) map (.nii.gz).\n --out_lesion_stats FILE\n Save the lesion-wise volume measure (.json).\n --out_streamlines_stats FILE\n Save the lesion-wise streamline count (.json).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "work", + "and" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "invalid", + "valid", + "valid" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "possibility", + "avoid" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "tool", + "tool" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "streamline", + "streamline" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "maps", + "map" + ], + [ + "bundles", + "bundle" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_mti_adjust_B1_header", "docstring": "Correct B1 map header problem, by applying the scaling (slope) and setting\nthe slope to 1.", "help": "usage: scil_mti_adjust_B1_header.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_B1_map out_B1_map in_B1_json\n\nCorrect B1 map header problem, by applying the scaling (slope) and setting\nthe slope to 1.\n\npositional arguments:\n in_B1_map Path to input B1 map file.\n out_B1_map Path to output B1 map file.\n in_B1_json Json file of the B1 map.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "held", + "on" + ], + [ + "methods", + "using" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "question", + "problem" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "maps", + "map" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "is" + ], + [ + "applied", + "applying" + ] + ], "keywords": [] }, { "name": "scil_mti_maps_MT", "docstring": "This script computes two myelin indices maps from the Magnetization Transfer\n(MT) images.\nMagnetization Transfer is a contrast mechanism in tissue resulting from the\nproton exchange between non-aqueous protons (from macromolecules and their\nclosely associated water molecules, the \"bound\" pool) and protons in the free\nwater pool called aqueous protons. This exchange attenuates the MRI signal,\nintroducing microstructure-dependent contrast. MT's effect reflects the\nrelative density of macromolecules such as proteins and lipids, it has been\nassociated with myelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse to saturating the\nprotons on non-aqueous molecules a frequency irradiation. The MT maps are\nobtained using three or four contrasts: a single positive frequency image\nand/or a single negative frequency image, and two unsaturated contrasts as\nreference. These two references should be acquired with predominant PD\n(proton density) and T1 weighting at different excitation flip angles\n(a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all input must have a matching json file with the same filename\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script.\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\n\nThe output consists of a MT_native_maps folder containing the 2 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n The MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n The MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\n\nThe final maps from MT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 1 or 2 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n\n>>> scil_mti_maps_MT.py path/to/output/directory\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_positive path/to/echo*pos.nii.gz --in_negative path/to/echo*neg.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.", "help": "usage: scil_mti_maps_MT.py [-h] [--out_prefix OUT_PREFIX] [--mask MASK]\n --in_positive IN_POSITIVE [IN_POSITIVE ...]\n --in_negative IN_NEGATIVE [IN_NEGATIVE ...]\n --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...]\n [--in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]]\n [--extended] [--filtering]\n [--in_jsons PD_json T1_json | --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time]\n [--in_B1_map IN_B1_MAP]\n [--B1_correction_method {empiric,model_based}]\n [--B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]]\n [--B1_nominal B1_NOMINAL]\n [--B1_smooth_dims B1_SMOOTH_DIMS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_dir\n\nThis script computes two myelin indices maps from the Magnetization Transfer\n(MT) images.\nMagnetization Transfer is a contrast mechanism in tissue resulting from the\nproton exchange between non-aqueous protons (from macromolecules and their\nclosely associated water molecules, the \"bound\" pool) and protons in the free\nwater pool called aqueous protons. This exchange attenuates the MRI signal,\nintroducing microstructure-dependent contrast. MT's effect reflects the\nrelative density of macromolecules such as proteins and lipids, it has been\nassociated with myelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse to saturating the\nprotons on non-aqueous molecules a frequency irradiation. The MT maps are\nobtained using three or four contrasts: a single positive frequency image\nand/or a single negative frequency image, and two unsaturated contrasts as\nreference. These two references should be acquired with predominant PD\n(proton density) and T1 weighting at different excitation flip angles\n(a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all input must have a matching json file with the same filename\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script.\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\nThe output consists of a MT_native_maps folder containing the 2 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n The MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n The MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\nThe final maps from MT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 1 or 2 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n>>> scil_mti_maps_MT.py path/to/output/directory\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_positive path/to/echo*pos.nii.gz --in_negative path/to/echo*neg.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.\n\npositional arguments:\n out_dir Path to output folder.\n\noptions:\n -h, --help show this help message and exit\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n --mask MASK Path to the binary brain mask.\n --extended If set, outputs the folder Complementary_maps.\n --filtering Gaussian filtering to remove Gibbs ringing. Not recommended.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nContrast maps:\n Path to echoes corresponding to contrast images. All constrasts must have \n the same number of echoes and coregistered between them. Use * to include all echoes. \n The in_mtoff_pd input and at least one of in_positive or in_negative are required.\n\n --in_positive IN_POSITIVE [IN_POSITIVE ...]\n Path to all echoes corresponding to the positive frequency \n saturation pulse.\n --in_negative IN_NEGATIVE [IN_NEGATIVE ...]\n Path to all echoes corresponding to the negative frequency \n saturation pulse.\n --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...]\n Path to all echoes corresponding to the predominant PD \n (proton density) weighting images with no saturation pulse.\n --in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]\n Path to all echoes corresponding to the predominant T1 \n weighting images with no saturation pulse. This one is optional, \n since it is only needed for the calculation of MTsat. \n Acquisition parameters should also be set with this image.\n\nAcquisition parameters:\n Acquisition parameters required for MTsat and ihMTsat calculation. \n These are the excitation flip angles (a_PD, a_T1), in DEGREES, and \n repetition times (TR_PD, TR_T1) of the PD and T1 images, in SECONDS. \n Can be given through json files (--in_jsons) or directly (--in_acq_parameters).\n\n --in_jsons PD_json T1_json\n Path to MToff PD json file and MToff T1 json file, in that order. \n The acquisition parameters will be extracted from these files. \n Must come from a Philips acquisition, otherwise, use in_acq_parameters.\n --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time\n Acquisition parameters in that order: flip angle of mtoff_PD, \n flip angle of mtoff_T1, repetition time of mtoff_PD, \n repetition time of mtoff_T1\n\nB1 correction:\n --in_B1_map IN_B1_MAP\n Path to B1 coregister map to MT contrasts.\n --B1_correction_method {empiric,model_based}\n Choice of B1 correction method. Choose between empiric and model-based. \n Note that the model-based method requires a B1 fitvalues file. \n Both method will only correct the saturation measures. [empiric]\n --B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]\n Path to B1 fitvalues files obtained externally. Should be one .mat \n file per input MT-on image, given in this specific order: \n positive frequency saturation, negative frequency saturation.\n --B1_nominal B1_NOMINAL\n Nominal value for the B1 map. For Philips, should be 100. [100]\n --B1_smooth_dims B1_SMOOTH_DIMS\n Dimension of the squared window used for B1 smoothing, in number of voxels. [5]\n", - "synonyms": [], + "synonyms": [ + [ + "individual", + "specific", + "unique", + "variety", + "different" + ], + [ + "positive", + "negative" + ], + [ + "methods", + "method" + ], + [ + "image", + "images" + ], + [ + "order", + "set" + ], + [ + "long", + "have" + ], + [ + "specific", + "specific" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "variety", + "include" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "maps", + "map" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "comprised", + "comprising", + "consists" + ], + [ + "average", + "per" + ], + [ + "work", + "also" + ], + [ + "result", + "following" + ], + [ + "left", + "from" + ], + [ + "positive", + "positive" + ], + [ + "thinking", + "you" + ], + [ + "step", + "steps" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "step", + "work", + "come" + ], + [ + "imaging", + "mri" + ], + [ + "future", + "will" + ], + [ + "create", + "create" + ], + [ + "considered", + "involved", + "work", + "been" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "signal", + "signal" + ], + [ + "result", + "resulting" + ], + [ + "analysis", + "methodology", + "methods", + "processes", + "methods" + ], + [ + "contrast", + "contrasts" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "result", + "effect" + ], + [ + "considered", + "specific", + "variety", + "such" + ], + [ + "left", + "off" + ], + [ + "order", + "required" + ], + [ + "brain", + "tissue" + ], + [ + "considered", + "are" + ], + [ + "action", + "clear", + "future", + "result", + "step", + "change" + ], + [ + "represent", + "representing" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "methods", + "using" + ], + [ + "left", + "after" + ], + [ + "nuclei", + "protons" + ], + [ + "increase", + "total", + "amount" + ], + [ + "degree", + "degrees" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "left", + "before" + ], + [ + "individual", + "each" + ], + [ + "long", + "two" + ], + [ + "based", + "based" + ], + [ + "dominant", + "predominant" + ], + [ + "contrast", + "contrast" + ], + [ + "total", + "number" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "order", + "order" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "result", + "work", + "working", + "time" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "binary", + "binary" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "weighted", + "weighted" + ], + [ + "work", + "working", + "done" + ], + [ + "total", + "100" + ], + [ + "maps", + "maps" + ], + [ + "result", + "since" + ] + ], "keywords": [] }, { "name": "scil_mti_maps_ihMT", "docstring": "This script computes four myelin indices maps from the Magnetization Transfer\n(MT) and inhomogeneous Magnetization Transfer (ihMT) images. Magnetization\nTransfer is a contrast mechanism in tissue resulting from the proton exchange\nbetween non-aqueous protons (from macromolecules and their closely associated\nwater molecules, the \"bound\" pool) and protons in the free water pool called\naqueous protons. This exchange attenuates the MRI signal, introducing\nmicrostructure-dependent contrast. MT's effect reflects the relative density\nof macromolecules such as proteins and lipids, it has been associated with\nmyelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse prior to image\nacquisition (a prepulse), saturating the protons on non-aqueous molecules,\nby applying different frequency irradiation. The two MT maps and two ihMT maps\nare obtained using six contrasts: single positive frequency image, single\nnegative frequency image, dual alternating positive/negative frequency image,\ndual alternating negative/positive frequency image (saturated images);\nand two unsaturated contrasts as reference. These two references should be\nacquired with predominant PD (proton density) and T1 weighting at different\nexcitation flip angles (a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\n\nThe output consists of a ihMT_native_maps folder containing the 4 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n - ihMTR.nii.gz : inhomogeneous Magnetization Transfer Ratio map\n The (ih)MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n - ihMTsat.nii.gz : inhomogeneous Magnetization Transfer saturation map\n The (ih)MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - altnp.nii.gz : dual alternating negative and positive frequency image\n - altpn.nii.gz : dual alternating positive and negative frequency image\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_d.nii.gz : MTsat computed from the mean dual frequency images\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\n\nThe final maps from ihMT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 3 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation, dual frequency saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n\n>>> scil_mti_maps_ihMT.py path/to/output/directory\n --in_altnp path/to/echo*altnp.nii.gz --in_altpn path/to/echo*altpn.nii.gz\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_negative path/to/echo*neg.nii.gz --in_positive path/to/echo*pos.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.", "help": "usage: scil_mti_maps_ihMT.py [-h] [--out_prefix OUT_PREFIX] [--mask MASK]\n --in_altnp IN_ALTNP [IN_ALTNP ...] --in_altpn\n IN_ALTPN [IN_ALTPN ...] --in_negative IN_NEGATIVE\n [IN_NEGATIVE ...] --in_positive IN_POSITIVE\n [IN_POSITIVE ...] --in_mtoff_pd IN_MTOFF_PD\n [IN_MTOFF_PD ...]\n [--in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]]\n [--extended] [--filtering]\n [--in_jsons PD_json T1_json | --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time]\n [--in_B1_map IN_B1_MAP]\n [--B1_correction_method {empiric,model_based}]\n [--B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]]\n [--B1_nominal B1_NOMINAL]\n [--B1_smooth_dims B1_SMOOTH_DIMS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_dir\n\nThis script computes four myelin indices maps from the Magnetization Transfer\n(MT) and inhomogeneous Magnetization Transfer (ihMT) images. Magnetization\nTransfer is a contrast mechanism in tissue resulting from the proton exchange\nbetween non-aqueous protons (from macromolecules and their closely associated\nwater molecules, the \"bound\" pool) and protons in the free water pool called\naqueous protons. This exchange attenuates the MRI signal, introducing\nmicrostructure-dependent contrast. MT's effect reflects the relative density\nof macromolecules such as proteins and lipids, it has been associated with\nmyelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse prior to image\nacquisition (a prepulse), saturating the protons on non-aqueous molecules,\nby applying different frequency irradiation. The two MT maps and two ihMT maps\nare obtained using six contrasts: single positive frequency image, single\nnegative frequency image, dual alternating positive/negative frequency image,\ndual alternating negative/positive frequency image (saturated images);\nand two unsaturated contrasts as reference. These two references should be\nacquired with predominant PD (proton density) and T1 weighting at different\nexcitation flip angles (a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\nThe output consists of a ihMT_native_maps folder containing the 4 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n - ihMTR.nii.gz : inhomogeneous Magnetization Transfer Ratio map\n The (ih)MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n - ihMTsat.nii.gz : inhomogeneous Magnetization Transfer saturation map\n The (ih)MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - altnp.nii.gz : dual alternating negative and positive frequency image\n - altpn.nii.gz : dual alternating positive and negative frequency image\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_d.nii.gz : MTsat computed from the mean dual frequency images\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\nThe final maps from ihMT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 3 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation, dual frequency saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n>>> scil_mti_maps_ihMT.py path/to/output/directory\n --in_altnp path/to/echo*altnp.nii.gz --in_altpn path/to/echo*altpn.nii.gz\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_negative path/to/echo*neg.nii.gz --in_positive path/to/echo*pos.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.\n\npositional arguments:\n out_dir Path to output folder.\n\noptions:\n -h, --help show this help message and exit\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n --mask MASK Path to the binary brain mask.\n --extended If set, outputs the folder Complementary_maps.\n --filtering Gaussian filtering to remove Gibbs ringing. Not recommended.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nContrast maps:\n Path to echoes corresponding to contrast images. All constrasts must have \n the same number of echoes and coregistered between them. Use * to include all echoes.\n\n --in_altnp IN_ALTNP [IN_ALTNP ...]\n Path to all echoes corresponding to the alternation of \n negative and positive frequency saturation pulse.\n --in_altpn IN_ALTPN [IN_ALTPN ...]\n Path to all echoes corresponding to the alternation of \n positive and negative frequency saturation pulse.\n --in_negative IN_NEGATIVE [IN_NEGATIVE ...]\n Path to all echoes corresponding to the negative frequency \n saturation pulse.\n --in_positive IN_POSITIVE [IN_POSITIVE ...]\n Path to all echoes corresponding to the positive frequency \n saturation pulse.\n --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...]\n Path to all echoes corresponding to the predominant PD \n (proton density) weighting images with no saturation pulse.\n --in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]\n Path to all echoes corresponding to the predominant T1 \n weighting images with no saturation pulse. This one is optional, \n since it is only needed for the calculation of MTsat and ihMTsat. \n Acquisition parameters should also be set with this image.\n\nAcquisition parameters:\n Acquisition parameters required for MTsat and ihMTsat calculation. \n These are the excitation flip angles (a_PD, a_T1), in DEGREES, and \n repetition times (TR_PD, TR_T1) of the PD and T1 images, in SECONDS. \n Can be given through json files (--in_jsons) or directly (--in_acq_parameters).\n\n --in_jsons PD_json T1_json\n Path to MToff PD json file and MToff T1 json file, in that order. \n The acquisition parameters will be extracted from these files. \n Must come from a Philips acquisition, otherwise, use in_acq_parameters.\n --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time\n Acquisition parameters in that order: flip angle of mtoff_PD, \n flip angle of mtoff_T1, repetition time of mtoff_PD, \n repetition time of mtoff_T1\n\nB1 correction:\n --in_B1_map IN_B1_MAP\n Path to B1 coregister map to MT contrasts.\n --B1_correction_method {empiric,model_based}\n Choice of B1 correction method. Choose between empiric and model-based. \n Note that the model-based method requires a B1 fitvalues file. \n Both method will only correct the saturation measures. [empiric]\n --B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]\n Path to B1 fitvalues files obtained externally. Should be one .mat \n file per input MT-on image, given in this specific order: \n positive frequency saturation, negative frequency saturation.\n --B1_nominal B1_NOMINAL\n Nominal value for the B1 map. For Philips, should be 100. [100]\n --B1_smooth_dims B1_SMOOTH_DIMS\n Dimension of the squared window used for B1 smoothing, in number of voxels. [5]\n", - "synonyms": [], + "synonyms": [ + [ + "individual", + "specific", + "unique", + "variety", + "different" + ], + [ + "positive", + "negative" + ], + [ + "methods", + "method" + ], + [ + "image", + "images" + ], + [ + "order", + "set" + ], + [ + "long", + "have" + ], + [ + "specific", + "specific" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "variety", + "include" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "maps", + "map" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "comprised", + "comprising", + "consists" + ], + [ + "average", + "per" + ], + [ + "work", + "also" + ], + [ + "result", + "following" + ], + [ + "left", + "from" + ], + [ + "positive", + "positive" + ], + [ + "thinking", + "you" + ], + [ + "step", + "steps" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "step", + "work", + "come" + ], + [ + "imaging", + "mri" + ], + [ + "future", + "will" + ], + [ + "create", + "create" + ], + [ + "considered", + "involved", + "work", + "been" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "signal", + "signal" + ], + [ + "result", + "resulting" + ], + [ + "applied", + "applying" + ], + [ + "analysis", + "methodology", + "methods", + "processes", + "methods" + ], + [ + "contrast", + "contrasts" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "result", + "effect" + ], + [ + "considered", + "specific", + "variety", + "such" + ], + [ + "left", + "off" + ], + [ + "order", + "required" + ], + [ + "brain", + "tissue" + ], + [ + "considered", + "are" + ], + [ + "action", + "clear", + "future", + "result", + "step", + "change" + ], + [ + "represent", + "representing" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "methods", + "using" + ], + [ + "left", + "after" + ], + [ + "nuclei", + "protons" + ], + [ + "increase", + "total", + "amount" + ], + [ + "degree", + "degrees" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "left", + "before" + ], + [ + "individual", + "each" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "long", + "two" + ], + [ + "based", + "based" + ], + [ + "dominant", + "predominant" + ], + [ + "contrast", + "contrast" + ], + [ + "total", + "number" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "order", + "order" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "result", + "work", + "working", + "time" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "binary", + "binary" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "weighted", + "weighted" + ], + [ + "work", + "working", + "done" + ], + [ + "total", + "100" + ], + [ + "maps", + "maps" + ], + [ + "result", + "since" + ] + ], "keywords": [] }, { "name": "scil_plot_stats_per_point", "docstring": "Plot all mean/std per point for a subject or population json file from\ntractometry-flow.\nWARNING: For population, the displayed STDs is only showing the variation\nof the means. It does not account intra-subject STDs.\n\nFormerly: scil_plot_mean_std_per_point.py", "help": "usage: scil_plot_stats_per_point.py [-h] [--stats_over_population]\n [--nb_pts NB_PTS] [--display_means]\n [--fill_color FILL_COLOR | --dict_colors DICT_COLORS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_json out_dir\n\nPlot all mean/std per point for a subject or population json file from\ntractometry-flow.\nWARNING: For population, the displayed STDs is only showing the variation\nof the means. It does not account intra-subject STDs.\n\nFormerly: scil_plot_mean_std_per_point.py\n\npositional arguments:\n in_json JSON file containing the mean/std per point. For example, can be created using scil_bundle_mean_std.py.\n out_dir Output directory.\n\noptions:\n -h, --help show this help message and exit\n --stats_over_population\n If set, consider the input stats to be over an entire population and not subject-based.\n --nb_pts NB_PTS Force the number of divisions for the bundles.\n Avoid unequal plots across datasets, replace missing data with zeros.\n --display_means Display the subjects means as semi-transparent line.\n Poor results when the number of subject is high.\n --fill_color FILL_COLOR\n Hexadecimal RGB color filling the region between mean +/- std. The hexadecimal RGB color should be formatted as 0xRRGGBB.\n --dict_colors DICT_COLORS\n Dictionnary mapping basename to color.Same convention as --color.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "bundles", + "bundles" + ], + [ + "average", + "per" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "difference", + "point" + ], + [ + "subject", + "subjects", + "subjects" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "order", + "set" + ], + [ + "long", + "over" + ], + [ + "maps", + "mapping" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "possibility", + "avoid" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "indicating", + "showing" + ], + [ + "population", + "population" + ], + [ + "methods", + "using" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "areas", + "across" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "matter", + "question", + "does" + ], + [ + "result", + "results" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "based", + "based" + ], + [ + "area", + "areas", + "region", + "regions", + "region" + ], + [ + "create", + "created" + ], + [ + "matter", + "question", + "subject", + "subjects", + "subject" + ], + [ + "meaning", + "order", + "result", + "step", + "true", + "means" + ], + [ + "considered", + "possibility", + "question", + "step", + "consider" + ] + ], "keywords": [] }, { "name": "scil_qball_metrics", "docstring": "Script to compute the Constant Solid Angle (CSA) or Analytical Q-ball model,\nthe generalized fractional anisotropy (GFA) and the peaks of the model.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Descoteaux et al MRM 2007, Aganj et al MRM 2009] for details and\n[Cote et al MEDIA 2013] for quantitative comparisons.\n\nFormerly: scil_compute_qball_metrics.py", "help": "usage: scil_qball_metrics.py [-h] [-f] [--sh_order SH_ORDER] [--mask MASK]\n [--use_qball] [--not_all] [--gfa GFA]\n [--peaks PEAKS] [--peak_indices PEAK_INDICES]\n [--sh SH] [--nufo NUFO] [--a_power A_POWER]\n [--b0_threshold thr] [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval in_bvec\n\nScript to compute the Constant Solid Angle (CSA) or Analytical Q-ball model,\nthe generalized fractional anisotropy (GFA) and the peaks of the model.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Descoteaux et al MRM 2007, Aganj et al MRM 2009] for details and\n[Cote et al MEDIA 2013] for quantitative comparisons.\n\nFormerly: scil_compute_qball_metrics.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n\noptions:\n -h, --help show this help message and exit\n -f Force overwriting of the output files.\n --sh_order SH_ORDER Spherical harmonics order. Must be a positive even number [4].\n --mask MASK Path to a binary mask. Only data inside the mask will be used for computations and reconstruction [None].\n --use_qball If set, qball will be used as the odf reconstruction model instead of CSA.\n --not_all If set, will only save the files specified using the following flags.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nFile flags:\n --gfa GFA Output filename for the generalized fractional anisotropy [gfa.nii.gz].\n --peaks PEAKS Output filename for the extracted peaks [peaks.nii.gz].\n --peak_indices PEAK_INDICES\n Output filename for the generated peaks indices on the sphere [peaks_indices.nii.gz].\n --sh SH Output filename for the spherical harmonics coefficients [sh.nii.gz].\n --nufo NUFO Output filename for the NUFO map [nufo.nii.gz].\n --a_power A_POWER Output filename for the anisotropic power map[anisotropic_power.nii.gz].\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "step", + "continue" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "matter", + "question", + "thinking", + "understand" + ], + [ + "work", + "and" + ], + [ + "result", + "following" + ], + [ + "considered", + "are" + ], + [ + "methodology", + "analytical" + ], + [ + "order", + "order" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "positive", + "positive" + ], + [ + "work", + "find" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "specific", + "specific" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "parameters", + "specified" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "order", + "work", + "instead" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "exist", + "exists" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "lack", + "minimal" + ], + [ + "order", + "allow" + ], + [ + "step", + "start" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "maps", + "map" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_rgb_convert", "docstring": "Converts a RGB image encoded as a 4D image to a RGB image encoded as\na 3D image, or vice versa.\n\nTypically, most software tools used in the SCIL (including MI-Brain) use\nthe former, while Trackvis uses the latter.\n\nInput\n-Case 1: 4D image where the 4th dimension contains 3 values.\n-Case 2: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value.\n\nOutput\n-Case 1: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value (uint8).\n-Case 2: 4D image where the 4th dimension contains 3 values (uint8).\n\nFormerly: scil_convert_rgb.py", "help": "usage: scil_rgb_convert.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image\n\nConverts a RGB image encoded as a 4D image to a RGB image encoded as\na 3D image, or vice versa.\n\nTypically, most software tools used in the SCIL (including MI-Brain) use\nthe former, while Trackvis uses the latter.\n\nInput\n-Case 1: 4D image where the 4th dimension contains 3 values.\n-Case 2: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value.\n\nOutput\n-Case 1: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value (uint8).\n-Case 2: 4D image where the 4th dimension contains 3 values (uint8).\n\nFormerly: scil_convert_rgb.py\n\npositional arguments:\n in_image name of input RGB image.\n Either 4D or 3D image.\n out_image name of output RGB image.\n Either 3D or 4D image.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "considered", + "highly", + "long", + "work", + "most" + ], + [ + "work", + "and" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "long", + "a" + ], + [ + "application", + "systems", + "software" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "considered", + "form", + "latter" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "methods", + "using" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "area", + "work", + "where" + ], + [ + "individual", + "each" + ], + [ + "methods", + "tool", + "tools" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "matter", + "question", + "case" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_sh_convert", "docstring": "Convert a SH file between the two of the following basis choices:\n'descoteaux07', 'descoteaux07_legacy', 'tournier07' or 'tournier07_legacy'.\nUsing the sh_basis argument, both the input and the output SH bases must be\ngiven, in the order. For more information about the bases, see\nhttps://docs.dipy.org/stable/theory/sh_basis.html.\n\nFormerly: scil_convert_sh_basis.py", "help": "usage: scil_sh_convert.py [-h] [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_sh out_sh\n {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n\nConvert a SH file between the two of the following basis choices:\n'descoteaux07', 'descoteaux07_legacy', 'tournier07' or 'tournier07_legacy'.\nUsing the sh_basis argument, both the input and the output SH bases must be\ngiven, in the order. For more information about the bases, see\nhttps://docs.dipy.org/stable/theory/sh_basis.html.\n\nFormerly: scil_convert_sh_basis.py\n\npositional arguments:\n in_sh Input SH filename. (nii or nii.gz)\n out_sh Output SH filename. (nii or nii.gz)\n {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Both the input and output bases are required, in that order.\n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy', 'tournier07']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n\noptions:\n -h, --help show this help message and exit\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "order", + "required" + ], + [ + "work", + "and" + ], + [ + "result", + "following" + ], + [ + "considered", + "are" + ], + [ + "order", + "order" + ], + [ + "long", + "work", + "more" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "question", + "argument" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "long", + "two" + ], + [ + "step", + "start" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_sh_fusion", "docstring": "Merge a list of Spherical Harmonics files.\n\nThis merges the coefficients of multiple Spherical Harmonics files by taking,\nfor each coefficient, the one with the largest magnitude.\n\nCan be used to merge fODFs computed from different shells into 1, while\nconserving the most relevant information.\n\nBased on [1] and [2].\n\nFormerly: scil_merge_sh.py", "help": "usage: scil_sh_fusion.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_shs [in_shs ...] out_sh\n\nMerge a list of Spherical Harmonics files.\n\nThis merges the coefficients of multiple Spherical Harmonics files by taking,\nfor each coefficient, the one with the largest magnitude.\n\nCan be used to merge fODFs computed from different shells into 1, while\nconserving the most relevant information.\n\nBased on [1] and [2].\n\nFormerly: scil_merge_sh.py\n\npositional arguments:\n in_shs List of SH files.\n out_sh output SH file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReference:\n[1] Garyfallidis, E., Zucchelli, M., Houde, J-C., Descoteaux, M.\n How to perform best ODF reconstruction from the Human Connectome\n Project sampling scheme?\n ISMRM 2014.\n\n[2] Khachaturian, M. H., Wisco, J. J., & Tuch, D. S. (2007). Boosting the\n sampling efficiency of q\u2010ball imaging using multiple wavevector fusion.\n Magnetic Resonance in Medicine: An Official Journal of the International\n Society for Magnetic Resonance in Medicine, 57(2), 289-296.\n", - "synonyms": [], + "synonyms": [ + [ + "individual", + "specific", + "unique", + "variety", + "different" + ], + [ + "animal", + "human", + "human" + ], + [ + "considered", + "highly", + "long", + "work", + "most" + ], + [ + "work", + "and" + ], + [ + "connectome", + "connectome" + ], + [ + "left", + "into" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "project", + "project" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "matter", + "question", + "thinking", + "true", + "work", + "working", + "how" + ], + [ + "long", + "with" + ], + [ + "specific", + "relevant" + ], + [ + "methods", + "using" + ], + [ + "imaging", + "imaging" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "individual", + "each" + ], + [ + "global", + "international" + ], + [ + "large", + "largest" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "based", + "based" + ] + ], "keywords": [] }, { "name": "scil_sh_to_aodf", "docstring": "Script to estimate asymmetric ODFs (aODFs) from a spherical harmonics image.\n\nTwo methods are available:\n * Unified filtering [1] combines four asymmetric filtering methods into\n a single equation and relies on a combination of four gaussian filters.\n * Cosine filtering [2] is a simpler implementation using cosine distance\n for assigning weights to neighbours.\n\nUnified filtering can be accelerated using OpenCL with the option --use_opencl.\nMake sure you have pyopencl installed before using this option. By default, the\nOpenCL program will run on the cpu. To use a gpu instead, also specify the\noption --device gpu.", "help": "usage: scil_sh_to_aodf.py [-h] [--out_sym OUT_SYM]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--method {unified,cosine}]\n [--sigma_spatial SIGMA_SPATIAL]\n [--sigma_align SIGMA_ALIGN]\n [--sigma_range SIGMA_RANGE]\n [--sigma_angle SIGMA_ANGLE] [--disable_spatial]\n [--disable_align] [--disable_range]\n [--include_center] [--win_hwidth WIN_HWIDTH]\n [--sharpness SHARPNESS] [--device {cpu,gpu}]\n [--use_opencl] [--patch_size PATCH_SIZE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh out_sh\n\nScript to estimate asymmetric ODFs (aODFs) from a spherical harmonics image.\n\nTwo methods are available:\n * Unified filtering [1] combines four asymmetric filtering methods into\n a single equation and relies on a combination of four gaussian filters.\n * Cosine filtering [2] is a simpler implementation using cosine distance\n for assigning weights to neighbours.\n\nUnified filtering can be accelerated using OpenCL with the option --use_opencl.\nMake sure you have pyopencl installed before using this option. By default, the\nOpenCL program will run on the cpu. To use a gpu instead, also specify the\noption --device gpu.\n\npositional arguments:\n in_sh Path to the input file.\n out_sh File name for averaged signal.\n\noptions:\n -h, --help show this help message and exit\n --out_sym OUT_SYM Name of optional symmetric output. [None]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Sphere used for the SH to SF projection. [repulsion200]\n --method {unified,cosine}\n Method for estimating asymmetric ODFs [unified].\n One of:\n 'unified': Unified filtering [1].\n 'cosine' : Cosine-based filtering [2].\n --device {cpu,gpu} Device to use for execution. [cpu]\n --use_opencl Accelerate code using OpenCL (requires pyopencl\n and a working OpenCL implementation).\n --patch_size PATCH_SIZE\n OpenCL patch size. [40]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nShared filter arguments:\n --sigma_spatial SIGMA_SPATIAL\n Standard deviation for spatial distance. [1.0]\n\nUnified filter arguments:\n --sigma_align SIGMA_ALIGN\n Standard deviation for alignment filter. [0.8]\n --sigma_range SIGMA_RANGE\n Standard deviation for range filter\n *relative to SF range of image*. [0.2]\n --sigma_angle SIGMA_ANGLE\n Standard deviation for angular filter\n (disabled by default).\n --disable_spatial Disable spatial filtering.\n --disable_align Disable alignment filtering.\n --disable_range Disable range filtering.\n --include_center Include center voxel in neighourhood.\n --win_hwidth WIN_HWIDTH\n Filtering window half-width. Defaults to 3*sigma_spatial.\n\nCosine filter arguments:\n --sharpness SHARPNESS\n Specify sharpness factor to use for\n weighted average. [1.0]\n\n[1] Poirier and Descoteaux, 2024, \"A Unified Filtering Method for Estimating\n Asymmetric Orientation Distribution Functions\", Neuroimage, vol. 287,\n https://doi.org/10.1016/j.neuroimage.2024.120516\n\n[2] Poirier et al, 2021, \"Investigating the Occurrence of Asymmetric Patterns\n in White Matter Fiber Orientation Distribution Functions\", ISMRM 2021\n (abstract 0865)\n", - "synonyms": [], + "synonyms": [ + [ + "methods", + "method" + ], + [ + "orientation", + "orientation" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "form", + "combination" + ], + [ + "considered", + "are" + ], + [ + "involved", + "work", + "working", + "working" + ], + [ + "left", + "into" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "fibre", + "fiber" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "examining", + "involved", + "investigating" + ], + [ + "occurrence", + "occurrence" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "long", + "have" + ], + [ + "left", + "half" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "total", + "40" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "create", + "future", + "step", + "work", + "make" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "variety", + "include" + ], + [ + "long", + "with" + ], + [ + "average", + "average" + ], + [ + "methods", + "using" + ], + [ + "order", + "work", + "instead" + ], + [ + "signal", + "signal" + ], + [ + "left", + "before" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "projection", + "projection" + ], + [ + "considered", + "is" + ], + [ + "project", + "program" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "analysis", + "methodology", + "methods", + "processes", + "methods" + ], + [ + "larger", + "size", + "size" + ], + [ + "weighted", + "weighted" + ], + [ + "long", + "two" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "function", + "functions", + "functions" + ], + [ + "based", + "based" + ], + [ + "clear", + "matter", + "question", + "thinking", + "sure" + ], + [ + "spatial", + "temporal", + "spatial" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_sh_to_rish", "docstring": "Compute the RISH (Rotationally Invariant Spherical Harmonics) features of an SH\nsignal [1].\n\nEach RISH feature map is the total energy of its associated order.\nMathematically, it is the sum of the squared SH coefficients of the SH order.\n\nThis script supports both symmetrical and asymmetrical SH images as input, of\nany SH order.\n\nEach RISH feature will be saved as a separate file.\n\n[1] Mirzaalian, Hengameh, et al. \"Harmonizing diffusion MRI data across\nmultiple sites and scanners.\" MICCAI 2015.\nhttps://scholar.harvard.edu/files/hengameh/files/miccai2015.pdf\n\nFormerly: scil_compute_rish_from_sh.py", "help": "usage: scil_sh_to_rish.py [-h] [--full_basis] [--mask MASK]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh out_prefix\n\nCompute the RISH (Rotationally Invariant Spherical Harmonics) features of an SH\nsignal [1].\n\nEach RISH feature map is the total energy of its associated order.\nMathematically, it is the sum of the squared SH coefficients of the SH order.\n\nThis script supports both symmetrical and asymmetrical SH images as input, of\nany SH order.\n\nEach RISH feature will be saved as a separate file.\n\n[1] Mirzaalian, Hengameh, et al. \"Harmonizing diffusion MRI data across\nmultiple sites and scanners.\" MICCAI 2015.\nhttps://scholar.harvard.edu/files/hengameh/files/miccai2015.pdf\n\nFormerly: scil_compute_rish_from_sh.py\n\npositional arguments:\n in_sh Path of the sh image. They can be formatted in any sh basis, but we \n expect it to be a symmetrical one. Else, provide --full_basis.\n out_prefix Prefix of the output RISH files to save. Suffixes will be \n based on the sh orders.\n\noptions:\n -h, --help show this help message and exit\n --full_basis Input SH image uses a full SH basis (asymmetrical).\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computation.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "order", + "order" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "image", + "images" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "imaging", + "mri" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "action", + "clear", + "considered", + "future", + "matter", + "possibility", + "potential", + "question", + "result", + "specific", + "any" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "supported", + "supports" + ], + [ + "create", + "provide" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "signal", + "signal" + ], + [ + "areas", + "across" + ], + [ + "matter", + "thinking", + "else" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "based", + "based" + ], + [ + "maps", + "map" + ], + [ + "unique", + "features" + ], + [ + "increase", + "total", + "total" + ], + [ + "long", + "full" + ] + ], "keywords": [] }, { "name": "scil_sh_to_sf", "docstring": "Script to sample SF values from a Spherical Harmonics signal. Outputs a Nifti\nfile with the SF values and an associated .bvec file with the chosen\ndirections.\n\nIf converting from SH to a DWI-like SF volume, --in_bval and --in_b0 need\nto be provided to concatenate the b0 image to the SF, and to generate the new\nbvals file. Otherwise, no .bval file will be created.\n\nFormerly: scil_compute_sf_from_sh.py", "help": "usage: scil_sh_to_sf.py [-h]\n (--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} | --in_bvec IN_BVEC)\n [--dtype {float32,float64}] [--in_bval IN_BVAL]\n [--in_b0 IN_B0] [--out_bval OUT_BVAL]\n [--out_bvec OUT_BVEC] [--b0_scaling]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--full_basis] [--b0_threshold thr] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh out_sf\n\nScript to sample SF values from a Spherical Harmonics signal. Outputs a Nifti\nfile with the SF values and an associated .bvec file with the chosen\ndirections.\n\nIf converting from SH to a DWI-like SF volume, --in_bval and --in_b0 need\nto be provided to concatenate the b0 image to the SF, and to generate the new\nbvals file. Otherwise, no .bval file will be created.\n\nFormerly: scil_compute_sf_from_sh.py\n\npositional arguments:\n in_sh Path of the SH volume.\n out_sf Name of the output SF file to save (bvals/bvecs will be automatically named when necessary).\n\noptions:\n -h, --help show this help message and exit\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Sphere used for the SH to SF projection. \n --in_bvec IN_BVEC Directions used for the SH to SF projection. \n If given, --in_bval must also be provided.\n --dtype {float32,float64}\n Datatype to use for SF computation and output array.'[float32]'\n --in_bval IN_BVAL b-value file, in FSL format, used to assign a b-value to the \n output SF and generate a `.bval` file.\n - If used, --out_bval is required.\n - The output bval will contain one b-value per point in the SF \n output (i.e. one per point on the --sphere or one per --in_bvec.)\n - The values of the output bval will all be set to the same b-value:\n the average of your in_bval. (Any b0 found in this file, i.e \n b-values under --b0_threshold, will be removed beforehand.)\n - To add b0s to both the SF volume and the --out_bval file, use --in_b0.\n --in_b0 IN_B0 b0 volume to concatenate to the final SF volume.\n --out_bval OUT_BVAL Optional output bval file.\n --out_bvec OUT_BVEC Optional output bvec file.\n --b0_scaling Scale resulting SF by the b0 image (--in_b0 mustbe given).\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --full_basis If true, use a full basis for the input SH coefficients.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n Default if not set is 20.\n This value is used with option --in_bval only: any b0 found in the in_bval will be removed.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "create", + "generate" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "order", + "required" + ], + [ + "average", + "per" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "considered", + "are" + ], + [ + "difference", + "point" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "direction", + "directions" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "action", + "clear", + "considered", + "future", + "matter", + "possibility", + "potential", + "question", + "result", + "specific", + "any" + ], + [ + "long", + "with" + ], + [ + "average", + "average" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "methods", + "using" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "meaning", + "true", + "true" + ], + [ + "signal", + "signal" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "projection", + "projection" + ], + [ + "image", + "image" + ], + [ + "result", + "resulting" + ], + [ + "considered", + "is" + ], + [ + "represent", + "chosen" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "create", + "lack", + "step", + "work", + "working", + "need" + ], + [ + "step", + "start" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "create", + "created" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "long", + "full" + ], + [ + "order", + "necessary" + ] + ], "keywords": [] }, { "name": "scil_stats_group_comparison", "docstring": "Run group comparison statistics on metrics from tractometry\n1) Separate the sample given a particular variable (group_by) into groups\n\n2) Does Shapiro-Wilk test of normality for every sample\nhttps://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test\n\n3) Does Levene or Bartlett (depending on normality) test of variance\nhomogeneity Levene:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm\nBartlett:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm\n\n4) Test the group difference for every measure with the correct test depending\n on the sample (Student, Welch, Mannwhitneyu, ANOVA, Kruskall-Wallis)\nStudent :\nhttps://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test\nWelch :\nhttps://en.wikipedia.org/wiki/Welch%27s_t-test\nMann-Whitney U :\nhttps://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test\nANOVA :\nhttp://www.biostathandbook.com/onewayanova.html\nKruskall-Wallis :\nhttps://en.wikipedia.org/wiki/Kruskal%E2%80%93Wallis_one-way_analysis_of_variance\n\n\n5) If the group difference test is positive and number of group is greater than\n 2, test the group difference two by two.\n\n6) Generate the result for all metrics and bundles\n\nFormerly: scil_group_comparison.py", "help": "usage: scil_stats_group_comparison.py [-h] [--out_dir OUT_DIR]\n [--out_json OUT_JSON]\n [--bundles BUNDLES [BUNDLES ...]]\n [--metrics METRICS [METRICS ...]]\n [--values VALUES [VALUES ...]]\n [--alpha_error ALPHA_ERROR]\n [--generate_graph] [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n IN_JSON IN_PARTICIPANTS GROUP_BY\n\nRun group comparison statistics on metrics from tractometry\n1) Separate the sample given a particular variable (group_by) into groups\n\n2) Does Shapiro-Wilk test of normality for every sample\nhttps://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test\n\n3) Does Levene or Bartlett (depending on normality) test of variance\nhomogeneity Levene:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm\nBartlett:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm\n\n4) Test the group difference for every measure with the correct test depending\n on the sample (Student, Welch, Mannwhitneyu, ANOVA, Kruskall-Wallis)\nStudent :\nhttps://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test\nWelch :\nhttps://en.wikipedia.org/wiki/Welch%27s_t-test\nMann-Whitney U :\nhttps://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test\nANOVA :\nhttp://www.biostathandbook.com/onewayanova.html\nKruskall-Wallis :\nhttps://en.wikipedia.org/wiki/Kruskal%E2%80%93Wallis_one-way_analysis_of_variance\n\n5) If the group difference test is positive and number of group is greater than\n 2, test the group difference two by two.\n\n6) Generate the result for all metrics and bundles\n\nFormerly: scil_group_comparison.py\n\npositional arguments:\n IN_JSON Input JSON file from tractometry nextflow pipeline or equivalent.\n IN_PARTICIPANTS Input tsv participants file.See doc in https://scilpy.readthedocs.io/en/latest/documentation/construct_participants_tsv_file.html.\n GROUP_BY Variable that will be used to compare group together.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Name of the output folder path. [stats]\n --out_json OUT_JSON The name of the result json output file otherwise it will be printed.\n --bundles BUNDLES [BUNDLES ...], -b BUNDLES [BUNDLES ...]\n Bundle(s) in which you want to do stats. [all]\n --metrics METRICS [METRICS ...], -m METRICS [METRICS ...]\n Metric(s) on which you want to do stats. [all]\n --values VALUES [VALUES ...], --va VALUES [VALUES ...]\n Value(s) on which you want to do stats (mean, std). [all]\n --alpha_error ALPHA_ERROR, -a ALPHA_ERROR\n Type 1 error for all the test. [0.05]\n --generate_graph, --gg\n Generate a simple plot of every metric across groups.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "bundles", + "bundles" + ], + [ + "create", + "generate" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "work", + "and" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "left", + "into" + ], + [ + "working", + "together" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "lack", + "loss", + "result", + "result" + ], + [ + "meaning", + "name" + ], + [ + "positive", + "positive" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "considered", + "knowledge", + "question", + "specific", + "subject", + "unique", + "particular" + ], + [ + "long", + "with" + ], + [ + "contrast", + "comparison" + ], + [ + "difference", + "difference" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "long", + "than" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "variance", + "variance" + ], + [ + "areas", + "across" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "participants", + "participants" + ], + [ + "based", + "group" + ], + [ + "variable", + "variable" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "matter", + "question", + "does" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "long", + "two" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "total", + "80" + ], + [ + "error", + "error" + ], + [ + "bundles", + "bundle" + ], + [ + "greater", + "greater" + ] + ], "keywords": [] }, { "name": "scil_surface_apply_transform", "docstring": "Script to apply a transform to a surface (FreeSurfer or VTK supported),\nusing output from ANTs registration tools (i.e. affine.txt, warp.nii.gz).\n\nExample usage from T1 to b0 using ANTs transforms:\n> ConvertTransformFile 3 output0GenericAffine.mat vtk_transfo.txt --hm\n> scil_surface_apply_transform.py lh_white_lps.vtk affine.txt lh_white_b0.vtk\\\n --ants_warp warp.nii.gz\n\nImportant: The input surface needs to be in *T1 world LPS* coordinates\n(aligned over the T1 in MI-Brain).\n\nThe script will use the linear affine first and then the warp image.\nThe resulting surface will be in *b0 world LPS* coordinates\n(aligned over the b0 in MI-Brain).\n\nFormerly: scil_apply_transform_to_surface.py.", "help": "usage: scil_surface_apply_transform.py [-h] [--ants_warp ANTS_WARP]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface ants_affine out_surface\n\nScript to apply a transform to a surface (FreeSurfer or VTK supported),\nusing output from ANTs registration tools (i.e. affine.txt, warp.nii.gz).\n\nExample usage from T1 to b0 using ANTs transforms:\n> ConvertTransformFile 3 output0GenericAffine.mat vtk_transfo.txt --hm\n> scil_surface_apply_transform.py lh_white_lps.vtk affine.txt lh_white_b0.vtk\\\n --ants_warp warp.nii.gz\n\nImportant: The input surface needs to be in *T1 world LPS* coordinates\n(aligned over the T1 in MI-Brain).\n\nThe script will use the linear affine first and then the warp image.\nThe resulting surface will be in *b0 world LPS* coordinates\n(aligned over the b0 in MI-Brain).\n\nFormerly: scil_apply_transform_to_surface.py.\n\npositional arguments:\n in_surface Input surface (.vtk).\n ants_affine Affine transform from ANTs (.txt or .mat).\n out_surface Output surface (.vtk).\n\noptions:\n -h, --help show this help message and exit\n --ants_warp ANTS_WARP\n Warp image from ANTs (Nifti image).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "long", + "over" + ], + [ + "long", + "a" + ], + [ + "applied", + "apply" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "methods", + "using" + ], + [ + "supported", + "supported" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "result", + "resulting" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "methods", + "tool", + "tools" + ], + [ + "considered", + "key", + "work", + "important" + ] + ], "keywords": [] }, { "name": "scil_surface_convert", "docstring": "Script to convert surface formats\n\nSupported formats:\n \".vtk\", \".vtp\", \".ply\", \".stl\", \".xml\", \".obj\"\n and FreeSurfer surfaces\n\n> scil_surface_convert.py surf.vtk converted_surf.ply\n\nFormerly: scil_convert_surface.py", "help": "usage: scil_surface_convert.py [-h] [--xform XFORM] [--to_lps]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface out_surface\n\nScript to convert surface formats\n\nSupported formats:\n \".vtk\", \".vtp\", \".ply\", \".stl\", \".xml\", \".obj\"\n and FreeSurfer surfaces\n\n> scil_surface_convert.py surf.vtk converted_surf.ply\n\nFormerly: scil_convert_surface.py\n\npositional arguments:\n in_surface Input a surface (FreeSurfer or supported by VTK).\n out_surface Output surface (formats supported by VTK).\n\noptions:\n -h, --help show this help message and exit\n --xform XFORM Path of the copy-paste output from mri_info \n Using: mri_info $input >> log.txt, \n The file log.txt would be this parameter\n --to_lps Flip for Surface/MI-Brain LPS\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", - "synonyms": [], + "synonyms": [ + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "work", + "and" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "parameter", + "parameters", + "parameter" + ], + [ + "order", + "set" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "methods", + "using" + ], + [ + "supported", + "supported" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ] + ], "keywords": [] }, { "name": "scil_surface_flip", "docstring": "Script to flip a given surface (FreeSurfer or VTK supported).\n\nCan flip surface coordinates around a single or multiple axes\nCan also be used to reverse the orientation of the surface normals.\n\nFormerly: scil_flip_surface.py", "help": "usage: scil_surface_flip.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface out_surface {x,y,z,n} [{x,y,z,n} ...]\n\nScript to flip a given surface (FreeSurfer or VTK supported).\n\nCan flip surface coordinates around a single or multiple axes\nCan also be used to reverse the orientation of the surface normals.\n\nFormerly: scil_flip_surface.py\n\npositional arguments:\n in_surface Input surface (.vtk).\n out_surface Output flipped surface (.vtk).\n {x,y,z,n} The axes you want to flip. eg: to flip the x and y axes use: x y. to reverse the surface normals use: n\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", - "synonyms": [], + "synonyms": [ + [ + "orientation", + "orientation" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "methods", + "using" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "supported", + "supported" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ] + ], "keywords": [] }, { "name": "scil_surface_smooth", "docstring": "Script to smooth a surface with a Laplacian blur.\n\nFor a standard FreeSurfer white matter mesh a step_size from 0.1 to 10\nis recommended\n\nSmoothing time = step_size * nb_steps\n small amount of smoothing [step_size 1, nb_steps 10]\n moderate amount of smoothing [step_size 10, nb_steps 100]\n large amount of smoothing [step_size 100, nb_steps 1000]\n\nFormerly: scil_smooth_surface.py", "help": "usage: scil_surface_smooth.py [-h] [-m VTS_MASK] [-n NB_STEPS] [-s STEP_SIZE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface out_surface\n\nScript to smooth a surface with a Laplacian blur.\n\nFor a standard FreeSurfer white matter mesh a step_size from 0.1 to 10\nis recommended\n\nSmoothing time = step_size * nb_steps\n small amount of smoothing [step_size 1, nb_steps 10]\n moderate amount of smoothing [step_size 10, nb_steps 100]\n large amount of smoothing [step_size 100, nb_steps 1000]\n\nFormerly: scil_smooth_surface.py\n\npositional arguments:\n in_surface Input surface (.vtk).\n out_surface Output smoothed surface (.vtk).\n\noptions:\n -h, --help show this help message and exit\n -m VTS_MASK, --vts_mask VTS_MASK\n Vertex mask no smoothing where mask equals 0 (.npy).\n -n NB_STEPS, --nb_steps NB_STEPS\n Number of steps for laplacian smooth [2].\n -s STEP_SIZE, --step_size STEP_SIZE\n Laplacian smooth step size [5.0].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "large", + "larger", + "small" + ], + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "order", + "set" + ], + [ + "step", + "steps" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "large", + "larger", + "large" + ], + [ + "long", + "with" + ], + [ + "long", + "result", + "work", + "working", + "time" + ], + [ + "action", + "process", + "step", + "step" + ], + [ + "methods", + "using" + ], + [ + "increase", + "total", + "amount" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "area", + "work", + "where" + ], + [ + "larger", + "size", + "size" + ], + [ + "total", + "100" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ] + ], "keywords": [] }, { "name": "scil_tracking_local", "docstring": "Local streamline HARDI tractography.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nAlgo 'eudx': select the peak from the spherical function (SF) most closely\naligned to the previous direction, and follow an average of it and the previous\ndirection [1].\n\nAlgo 'det': select the orientation corresponding to the maximum of the\nspherical function.\n\nAlgo 'prob': select a direction drawn from the empirical distribution function\ndefined from the SF.\n\nAlgo 'ptt': select the propagation direction using Parallel-Transport\nTractography (PTT) framework, see [2] for more details.\n\nNOTE: eudx can be used with pre-computed peaks from fodf as well as\nevecs_v1.nii.gz from scil_dti_metrics.py (experimental).\n\nNOTE: If tracking with PTT, the step-size should be smaller than usual,\ni.e 0.1-0.2mm or lower. The maximum angle between segments (theta) should\nbe between 10 and 20 degrees.\n\nThe local tracking algorithm can also run on the GPU using the --use_gpu\noption (experimental). By default, GPU tracking behaves the same as\nDIPY. Below is a list of known divergences between the CPU and GPU\nimplementations:\n * Backend: The CPU implementation uses DIPY's LocalTracking and the\n GPU implementation uses an in-house OpenCL implementation.\n * Algo: For the GPU implementation, the only available algorithm is\n Algo 'prob'.\n * SH interpolation: For GPU tracking, SH interpolation can be set to either\n nearest neighbour or trilinear (default). With DIPY, the only available\n method is trilinear.\n * Forward tracking: For GPU tracking, the `--forward_only` flag can be used\n to disable backward tracking. This option isn't available for CPU\n tracking.\n\nAll the input nifti files must be in isotropic resolution.\n\n\nReferences\n----------\n[1]: Garyfallidis, E. (2012). Towards an accurate brain tractography\n[PhD thesis]. University of Cambridge. United Kingdom.\n\n[2]: Aydogan, D. B., & Shi, Y. (2020). Parallel transport tractography.\nIEEE transactions on medical imaging, 40(2), 635-647.\n\nFormerly: scil_compute_local_tracking.py", "help": "usage: scil_tracking_local.py [-h] [--step STEP_SIZE] [--min_length m]\n [--max_length M] [--theta THETA]\n [--sfthres sf_th]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--npv NPV | --nt NT] [--sh_to_pmf]\n [--algo {det,prob,ptt,eudx}]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--sub_sphere SUB_SPHERE]\n [--probe_length PROBE_LENGTH]\n [--probe_radius PROBE_RADIUS]\n [--probe_quality PROBE_QUALITY]\n [--probe_count PROBE_COUNT]\n [--support_exponent SUPPORT_EXPONENT]\n [--use_gpu] [--sh_interp {trilinear,nearest}]\n [--forward_only] [--batch_size BATCH_SIZE]\n [--compress [COMPRESS_TH]] [-f] [--save_seeds]\n [--seed SEED] [-v [{DEBUG,INFO,WARNING}]]\n in_odf in_seed in_mask out_tractogram\n\nLocal streamline HARDI tractography.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nAlgo 'eudx': select the peak from the spherical function (SF) most closely\naligned to the previous direction, and follow an average of it and the previous\ndirection [1].\n\nAlgo 'det': select the orientation corresponding to the maximum of the\nspherical function.\n\nAlgo 'prob': select a direction drawn from the empirical distribution function\ndefined from the SF.\n\nAlgo 'ptt': select the propagation direction using Parallel-Transport\nTractography (PTT) framework, see [2] for more details.\n\nNOTE: eudx can be used with pre-computed peaks from fodf as well as\nevecs_v1.nii.gz from scil_dti_metrics.py (experimental).\n\nNOTE: If tracking with PTT, the step-size should be smaller than usual,\ni.e 0.1-0.2mm or lower. The maximum angle between segments (theta) should\nbe between 10 and 20 degrees.\n\nThe local tracking algorithm can also run on the GPU using the --use_gpu\noption (experimental). By default, GPU tracking behaves the same as\nDIPY. Below is a list of known divergences between the CPU and GPU\nimplementations:\n * Backend: The CPU implementation uses DIPY's LocalTracking and the\n GPU implementation uses an in-house OpenCL implementation.\n * Algo: For the GPU implementation, the only available algorithm is\n Algo 'prob'.\n * SH interpolation: For GPU tracking, SH interpolation can be set to either\n nearest neighbour or trilinear (default). With DIPY, the only available\n method is trilinear.\n * Forward tracking: For GPU tracking, the `--forward_only` flag can be used\n to disable backward tracking. This option isn't available for CPU\n tracking.\n\nAll the input nifti files must be in isotropic resolution.\n\nReferences\n----------\n[1]: Garyfallidis, E. (2012). Towards an accurate brain tractography\n[PhD thesis]. University of Cambridge. United Kingdom.\n\n[2]: Aydogan, D. B., & Shi, Y. (2020). Parallel transport tractography.\nIEEE transactions on medical imaging, 40(2), 635-647.\n\nFormerly: scil_compute_local_tracking.py\n\npositional arguments:\n in_odf File containing the orientation diffusion function \n as spherical harmonics file (.nii.gz). Ex: ODF or fODF.\n in_seed Seeding mask (.nii.gz).\n in_mask Tracking mask (.nii.gz).\n Tracking will stop outside this mask. The last point of each \n streamline (triggering the stopping criteria) IS added to the streamline.\n out_tractogram Tractogram output file (must be .trk or .tck).\n\noptions:\n -h, --help show this help message and exit\n\nTracking options:\n --step STEP_SIZE Step size in mm. [0.5]\n --min_length m Minimum length of a streamline in mm. [10.0]\n --max_length M Maximum length of a streamline in mm. [300.0]\n --theta THETA Maximum angle between 2 steps. If the angle is too big, streamline is \n stopped and the following point is NOT included.\n [\"eudx\"=60, \"det\"=45, \"prob\"=20, \"ptt\"=20]\n --sfthres sf_th Spherical function relative threshold. [0.1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --sh_to_pmf If set, map sherical harmonics to spherical function (pmf) before \n tracking (faster, requires more memory)\n --algo {det,prob,ptt,eudx}\n Algorithm to use. [prob]\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Dipy sphere; set of possible directions.\n Default: [repulsion724]\n --sub_sphere SUB_SPHERE\n Subdivides each face of the sphere into 4^s new faces. [0]\n\nSeeding options:\n When no option is provided, uses --npv 1.\n\n --npv NPV Number of seeds per voxel.\n --nt NT Total number of seeds to use.\n\nPTT options:\n --probe_length PROBE_LENGTH\n The length of the probes. Smaller value\n yields more dispersed fibers. [1.0]\n --probe_radius PROBE_RADIUS\n The radius of the probe. A large probe_radius\n helps mitigate noise in the pmf but it might\n make it harder to sample thin and intricate\n connections, also the boundary of fiber\n bundles might be eroded. [0]\n --probe_quality PROBE_QUALITY\n The quality of the probe. This parameter sets\n the number of segments to split the cylinder\n along the length of the probe (minimum=2) [3]\n --probe_count PROBE_COUNT\n The number of probes. This parameter sets the\n number of parallel lines used to model the\n cylinder (minimum=1). [1]\n --support_exponent SUPPORT_EXPONENT\n Data support exponent, used for rejection\n sampling. [3]\n\nGPU options:\n --use_gpu Enable GPU tracking (experimental).\n --sh_interp {trilinear,nearest}\n SH image interpolation method. [trilinear]\n --forward_only Perform forward tracking only.\n --batch_size BATCH_SIZE\n Approximate size of GPU batches (number\n of streamlines to track in parallel). [10000]\n\nOutput options:\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.\n A rule of thumb is to set it to 0.1mm for deterministic \n streamlines and to 0.2mm for probabilitic streamlines.[0.1]\n -f Force overwriting of the output files.\n --save_seeds If set, save the seeds used for the tracking \n in the data_per_streamline property.\n Hint: you can then use scil_tractogram_seed_density_map.\n --seed SEED Random number generator seed.\n\nLogging options:\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [], + "synonyms": [ + [ + "methods", + "method" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "long", + "work", + "more" + ], + [ + "fibre", + "fiber" + ], + [ + "order", + "set" + ], + [ + "direction", + "directions" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "create", + "future", + "step", + "work", + "make" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "diffusion", + "diffusion" + ], + [ + "threshold", + "threshold" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "experiment", + "experimental" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "long", + "than" + ], + [ + "imaging", + "imaging" + ], + [ + "step", + "follow" + ], + [ + "step", + "forward" + ], + [ + "represent", + "chosen" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "maps", + "map" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "question", + "might" + ], + [ + "memory", + "memory" + ], + [ + "average", + "per" + ], + [ + "orientation", + "orientation" + ], + [ + "work", + "also" + ], + [ + "considered", + "known" + ], + [ + "result", + "following" + ], + [ + "difference", + "point" + ], + [ + "left", + "from" + ], + [ + "thinking", + "you" + ], + [ + "step", + "steps" + ], + [ + "long", + "a" + ], + [ + "total", + "40" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "future", + "will" + ], + [ + "random", + "random" + ], + [ + "held", + "last" + ], + [ + "average", + "average" + ], + [ + "view", + "see" + ], + [ + "result", + "resulting" + ], + [ + "order", + "rule" + ], + [ + "large", + "big" + ], + [ + "connect", + "connected", + "connection", + "connections", + "connections" + ], + [ + "matter", + "question", + "does" + ], + [ + "streamline", + "streamline" + ], + [ + "studies", + "university" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "lack", + "quality" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "data", + "tracking", + "tracking" + ], + [ + "considered", + "highly", + "long", + "work", + "most" + ], + [ + "left", + "into" + ], + [ + "papers", + "paper" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "thinking", + "i" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "direction", + "towards" + ], + [ + "defined", + "function", + "functional", + "functions", + "function" + ], + [ + "methods", + "using" + ], + [ + "area", + "main", + "along" + ], + [ + "defined", + "function", + "defined" + ], + [ + "degree", + "degrees" + ], + [ + "left", + "before" + ], + [ + "individual", + "each" + ], + [ + "higher", + "lower" + ], + [ + "total", + "number" + ], + [ + "bundles", + "bundles" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "long", + "too" + ], + [ + "seeding", + "seeding" + ], + [ + "parameter", + "parameters", + "parameter" + ], + [ + "methods", + "use" + ], + [ + "direction", + "direction" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "large", + "larger", + "large" + ], + [ + "probabilistic", + "deterministic" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "action", + "process", + "step", + "step" + ], + [ + "algorithm", + "algorithm" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "reliable", + "accurate" + ], + [ + "fibres", + "fibers" + ], + [ + "larger", + "size", + "size" + ], + [ + "total", + "60" + ], + [ + "considered", + "experience", + "large", + "long", + "result", + "variety", + "work", + "working", + "well" + ], + [ + "large", + "larger", + "smaller" + ], + [ + "increase", + "total", + "total" + ] + ], "keywords": [] }, { "name": "scil_tracking_local_dev", "docstring": "Local streamline HARDI tractography using scilpy-only methods -- no dipy (i.e\nno cython). The goal of this is to have a python-only version that can be\nmodified more easily by our team when testing new algorithms and parameters,\nand that can be used as parent classes in sub-projects of our lab such as in\ndwi_ml.\n\nWARNING. MUCH SLOWER THAN scil_tracking_local.py. We recommand using multi-\nprocessing with option --nb_processes.\n\nSimilar to scil_tracking_local:\n The tracking direction is chosen in the aperture cone defined by the\n previous tracking direction and the angular constraint.\n - Algo 'det': the maxima of the spherical function (SF) the most closely\n aligned to the previous direction.\n - Algo 'prob': a direction drawn from the empirical distribution function\n defined from the SF.\n\nContrary to scil_tracking_local:\n - Algo 'eudx' is not yet available!\n - Input nifti files do not necessarily need to be in isotropic resolution.\n - The script works with asymmetric input ODF.\n - The interpolation for the tracking mask and spherical function can be\n one of 'nearest' or 'trilinear'.\n - Runge-Kutta integration is supported for the step function.\n\nA few notes on Runge-Kutta integration.\n 1. Runge-Kutta integration is used to approximate the next tracking\n direction by estimating directions from future tracking steps. This\n works well for deterministic tracking. However, in the context of\n probabilistic tracking, the next tracking directions cannot be estimated\n in advance, because they are picked randomly from a distribution. It is\n therefore recommanded to keep the rk_order to 1 for probabilistic\n tracking.\n 2. As a rule of thumb, doubling the rk_order will double the computation\n time in the worst case.\n\nReferences: [1] Girard, G., Whittingstall K., Deriche, R., and\n Descoteaux, M. (2014). Towards quantitative connectivity analysis:\n reducing tractography biases. Neuroimage, 98, 266-278.\n\nFormerly: scil_compute_local_tracking_dev.py", "help": "usage: scil_tracking_local_dev.py [-h] [--step STEP_SIZE] [--min_length m]\n [--max_length M] [--theta THETA]\n [--sfthres sf_th]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--npv NPV | --nt NT] [--algo {det,prob}]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--sub_sphere SUB_SPHERE]\n [--sfthres_init sf_th] [--rk_order K]\n [--max_invalid_nb_points MAX]\n [--forward_only]\n [--sh_interp {nearest,trilinear}]\n [--mask_interp {nearest,trilinear}]\n [--keep_last_out_point]\n [--n_repeats_per_seed N_REPEATS_PER_SEED]\n [--rng_seed RNG_SEED] [--skip SKIP]\n [--processes NBR] [--compress [COMPRESS_TH]]\n [-f] [--save_seeds]\n [-v [{DEBUG,INFO,WARNING}]]\n in_odf in_seed in_mask out_tractogram\n\nLocal streamline HARDI tractography using scilpy-only methods -- no dipy (i.e\nno cython). The goal of this is to have a python-only version that can be\nmodified more easily by our team when testing new algorithms and parameters,\nand that can be used as parent classes in sub-projects of our lab such as in\ndwi_ml.\n\nWARNING. MUCH SLOWER THAN scil_tracking_local.py. We recommand using multi-\nprocessing with option --nb_processes.\n\nSimilar to scil_tracking_local:\n The tracking direction is chosen in the aperture cone defined by the\n previous tracking direction and the angular constraint.\n - Algo 'det': the maxima of the spherical function (SF) the most closely\n aligned to the previous direction.\n - Algo 'prob': a direction drawn from the empirical distribution function\n defined from the SF.\n\nContrary to scil_tracking_local:\n - Algo 'eudx' is not yet available!\n - Input nifti files do not necessarily need to be in isotropic resolution.\n - The script works with asymmetric input ODF.\n - The interpolation for the tracking mask and spherical function can be\n one of 'nearest' or 'trilinear'.\n - Runge-Kutta integration is supported for the step function.\n\nA few notes on Runge-Kutta integration.\n 1. Runge-Kutta integration is used to approximate the next tracking\n direction by estimating directions from future tracking steps. This\n works well for deterministic tracking. However, in the context of\n probabilistic tracking, the next tracking directions cannot be estimated\n in advance, because they are picked randomly from a distribution. It is\n therefore recommanded to keep the rk_order to 1 for probabilistic\n tracking.\n 2. As a rule of thumb, doubling the rk_order will double the computation\n time in the worst case.\n\nReferences: [1] Girard, G., Whittingstall K., Deriche, R., and\n Descoteaux, M. (2014). Towards quantitative connectivity analysis:\n reducing tractography biases. Neuroimage, 98, 266-278.\n\nFormerly: scil_compute_local_tracking_dev.py\n\npositional arguments:\n in_odf File containing the orientation diffusion function \n as spherical harmonics file (.nii.gz). Ex: ODF or fODF.\n in_seed Seeding mask (.nii.gz).\n in_mask Tracking mask (.nii.gz).\n Tracking will stop outside this mask. The last point of each \n streamline (triggering the stopping criteria) IS added to the streamline.\n out_tractogram Tractogram output file (must be .trk or .tck).\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nTracking options:\n --step STEP_SIZE Step size in mm. [0.5]\n --min_length m Minimum length of a streamline in mm. [10.0]\n --max_length M Maximum length of a streamline in mm. [300.0]\n --theta THETA Maximum angle between 2 steps. If the angle is too big, streamline is \n stopped and the following point is NOT included.\n [\"eudx\"=60, \"det\"=45, \"prob\"=20, \"ptt\"=20]\n --sfthres sf_th Spherical function relative threshold. [0.1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --algo {det,prob} Algorithm to use. [prob]\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Dipy sphere; set of possible directions.\n Default: [repulsion724]\n --sub_sphere SUB_SPHERE\n Subdivides each face of the sphere into 4^s new faces. [0]\n --sfthres_init sf_th Spherical function relative threshold value for the \n initial direction. [0.5]\n --rk_order K The order of the Runge-Kutta integration used for the step function.\n For more information, refer to the note in the script description. [1]\n --max_invalid_nb_points MAX\n Maximum number of steps without valid direction, \n ex: if threshold on ODF or max angles are reached.\n Default: 0, i.e. do not add points following an invalid direction.\n --forward_only If set, tracks in one direction only (forward) given the \n initial seed. The direction is randomly drawn from the ODF.\n --sh_interp {nearest,trilinear}\n Spherical harmonic interpolation: nearest-neighbor \n or trilinear. [trilinear]\n --mask_interp {nearest,trilinear}\n Mask interpolation: nearest-neighbor or trilinear. [nearest]\n --keep_last_out_point\n If set, keep the last point (once out of the tracking mask) of \n the streamline. Default: discard them. This is the default in \n Dipy too. Note that points obtained after an invalid direction \n (ex when angle is too sharp or sh_threshold not reached) are \n never added.\n --n_repeats_per_seed N_REPEATS_PER_SEED\n By default, each seed position is used only once. This option\n allows for tracking from the exact same seed n_repeats_per_seed\n times. [1]\n\nSeeding options:\n When no option is provided, uses --npv 1.\n\n --npv NPV Number of seeds per voxel.\n --nt NT Total number of seeds to use.\n\nRandom seeding options:\n --rng_seed RNG_SEED Initial value for the random number generator. [0]\n --skip SKIP Skip the first N random number. \n Useful if you want to create new streamlines to add to \n a previously created tractogram with a fixed --rng_seed.\n Ex: If tractogram_1 was created with -nt 1,000,000, \n you can create tractogram_2 with \n --skip 1,000,000.\n\nMemory options:\n --processes NBR Number of sub-processes to start. \n Default: [1]\n\nOutput options:\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.\n A rule of thumb is to set it to 0.1mm for deterministic \n streamlines and to 0.2mm for probabilitic streamlines.[0.1]\n -f Force overwriting of the output files.\n --save_seeds If set, save the seeds used for the tracking \n in the data_per_streamline property.\n Hint: you can then use scil_tractogram_seed_density_map.\n", - "synonyms": [], + "synonyms": [ + [ + "long", + "work", + "more" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "invalid", + "valid", + "invalid" + ], + [ + "order", + "set" + ], + [ + "long", + "have" + ], + [ + "processing", + "processing" + ], + [ + "direction", + "directions" + ], + [ + "increase", + "limiting", + "reducing" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "diffusion", + "diffusion" + ], + [ + "threshold", + "threshold" + ], + [ + "long", + "with" + ], + [ + "application", + "allows" + ], + [ + "future", + "our" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "contrast", + "form", + "forms", + "larger", + "result", + "specific", + "variety", + "similar" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "long", + "than" + ], + [ + "step", + "forward" + ], + [ + "represent", + "chosen" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "create", + "created" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "large", + "long", + "few" + ], + [ + "exist", + "cannot" + ], + [ + "memory", + "memory" + ], + [ + "average", + "per" + ], + [ + "orientation", + "orientation" + ], + [ + "result", + "following" + ], + [ + "considered", + "result", + "therefore" + ], + [ + "difference", + "point" + ], + [ + "left", + "from" + ], + [ + "thinking", + "you" + ], + [ + "step", + "steps" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "future", + "will" + ], + [ + "random", + "random" + ], + [ + "create", + "create" + ], + [ + "held", + "last" + ], + [ + "clear", + "considered", + "future", + "lack", + "long", + "matter", + "question", + "result", + "work", + "because" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "result", + "resulting" + ], + [ + "order", + "rule" + ], + [ + "left", + "long", + "work", + "once" + ], + [ + "considered", + "subsequently", + "was" + ], + [ + "large", + "big" + ], + [ + "analysis", + "methodology", + "methods", + "processes", + "methods" + ], + [ + "streamline", + "streamline" + ], + [ + "connectivity", + "connectivity" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "future", + "held", + "step", + "next" + ], + [ + "data", + "tracking", + "tracking" + ], + [ + "considered", + "specific", + "variety", + "such" + ], + [ + "algorithm", + "algorithms" + ], + [ + "considered", + "highly", + "long", + "work", + "most" + ], + [ + "considered", + "are" + ], + [ + "left", + "into" + ], + [ + "papers", + "paper" + ], + [ + "attention", + "experience", + "long", + "result", + "work", + "much" + ], + [ + "total", + "estimated" + ], + [ + "meaning", + "refer" + ], + [ + "tool", + "useful" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "thinking", + "i" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "project", + "projects" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "direction", + "towards" + ], + [ + "defined", + "function", + "functional", + "functions", + "function" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "methods", + "using" + ], + [ + "left", + "after" + ], + [ + "defined", + "function", + "defined" + ], + [ + "exist", + "necessarily" + ], + [ + "individual", + "each" + ], + [ + "create", + "lack", + "step", + "work", + "working", + "need" + ], + [ + "step", + "start" + ], + [ + "matter", + "question", + "case" + ], + [ + "clear", + "considered", + "result", + "however" + ], + [ + "total", + "number" + ], + [ + "subsequently", + "previously" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "order", + "order" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "long", + "too" + ], + [ + "seeding", + "seeding" + ], + [ + "methods", + "use" + ], + [ + "direction", + "direction" + ], + [ + "invalid", + "valid", + "valid" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "clear", + "left", + "out" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "probabilistic", + "deterministic" + ], + [ + "long", + "result", + "work", + "working", + "time" + ], + [ + "action", + "process", + "step", + "step" + ], + [ + "algorithm", + "algorithm" + ], + [ + "streamlines", + "streamlines" + ], + [ + "work", + "works" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "supported", + "supported" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "future", + "possibility", + "potential", + "future" + ], + [ + "considered", + "is" + ], + [ + "larger", + "size", + "size" + ], + [ + "total", + "60" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "step", + "true", + "work", + "yet" + ], + [ + "considered", + "experience", + "large", + "long", + "result", + "variety", + "work", + "working", + "well" + ], + [ + "increase", + "total", + "total" + ], + [ + "probabilistic", + "probabilistic" + ] + ], "keywords": [] }, { "name": "scil_tracking_pft", "docstring": "Local streamline HARDI tractography including Particle Filtering tracking.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nThe tracking is done inside partial volume estimation maps and uses the\nparticle filtering tractography (PFT) algorithm. See\nscil_tracking_pft_maps.py to generate PFT required maps.\n\nStreamlines longer than min_length and shorter than max_length are kept.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\nDefault parameters as suggested in [1].\n\nAlgo 'det': the maxima of the spherical function (SF) the most closely aligned\nto the previous direction.\nAlgo 'prob': a direction drawn from the empirical distribution function defined\nfrom the SF.\n\nFor streamline compression, a rule of thumb is to set it to 0.1mm for the\ndeterministic algorithm and 0.2mm for probabilitic algorithm.\n\nAll the input nifti files must be in isotropic resolution.\n\nFormerly: scil_compute_pft.py", "help": "usage: scil_tracking_pft.py [-h] [--algo {det,prob}] [--step STEP_SIZE]\n [--min_length MIN_LENGTH]\n [--max_length MAX_LENGTH] [--theta THETA] [--act]\n [--sfthres SF_THRESHOLD]\n [--sfthres_init SF_THRESHOLD_INIT]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--npv NPV | --nt NT] [--particles PARTICLES]\n [--back BACK_TRACKING]\n [--forward FORWARD_TRACKING] [--all] [--seed SEED]\n [-f] [--save_seeds] [--compress [COMPRESS_TH]]\n [-v [{DEBUG,INFO,WARNING}]]\n in_sh in_seed in_map_include map_exclude_file\n out_tractogram\n\nLocal streamline HARDI tractography including Particle Filtering tracking.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nThe tracking is done inside partial volume estimation maps and uses the\nparticle filtering tractography (PFT) algorithm. See\nscil_tracking_pft_maps.py to generate PFT required maps.\n\nStreamlines longer than min_length and shorter than max_length are kept.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\nDefault parameters as suggested in [1].\n\nAlgo 'det': the maxima of the spherical function (SF) the most closely aligned\nto the previous direction.\nAlgo 'prob': a direction drawn from the empirical distribution function defined\nfrom the SF.\n\nFor streamline compression, a rule of thumb is to set it to 0.1mm for the\ndeterministic algorithm and 0.2mm for probabilitic algorithm.\n\nAll the input nifti files must be in isotropic resolution.\n\nFormerly: scil_compute_pft.py\n\npositional arguments:\n in_sh Spherical harmonic file (.nii.gz).\n in_seed Seeding mask (.nii.gz).\n in_map_include The probability map (.nii.gz) of ending the\n streamline and including it in the output (CMC, PFT [1])\n map_exclude_file The probability map (.nii.gz) of ending the\n streamline and excluding it in the output (CMC, PFT [1]).\n out_tractogram Tractogram output file (must be .trk or .tck).\n\nGeneric options:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nTracking options:\n --algo {det,prob} Algorithm to use (must be \"det\" or \"prob\"). [prob]\n --step STEP_SIZE Step size in mm. [0.2]\n --min_length MIN_LENGTH\n Minimum length of a streamline in mm. [10.0]\n --max_length MAX_LENGTH\n Maximum length of a streamline in mm. [300.0]\n --theta THETA Maximum angle between 2 steps. [\"det\"=45, \"prob\"=20]\n --act If set, uses anatomically-constrained tractography (ACT) \n instead of continuous map criterion (CMC).\n --sfthres SF_THRESHOLD\n Spherical function relative threshold. [0.1]\n --sfthres_init SF_THRESHOLD_INIT\n Spherical function relative threshold value for the \n initial direction. [0.5]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n\nSeeding options:\n When no option is provided, uses --npv 1.\n\n --npv NPV Number of seeds per voxel.\n --nt NT Total number of seeds to use.\n\nPFT options:\n --particles PARTICLES\n Number of particles to use for PFT. [15]\n --back BACK_TRACKING Length of PFT back tracking (mm). [2.0]\n --forward FORWARD_TRACKING\n Length of PFT forward tracking (mm). [1.0]\n\nOutput options:\n --all If set, keeps \"excluded\" streamlines.\n NOT RECOMMENDED, except for debugging.\n --seed SEED Random number generator seed.\n -f Force overwriting of the output files.\n --save_seeds If set, save the seeds used for the tracking \n in the data_per_streamline property.\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.[0.1]\n\nReferences: [1] Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M. (2014). Towards quantitative connectivity analysis: reducing tractography biases. Neuroimage, 98, 266-278.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "considered", + "highly", + "long", + "work", + "most" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "create", + "generate" + ], + [ + "order", + "required" + ], + [ + "average", + "per" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "probability", + "probability" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "seeding", + "seeding" + ], + [ + "methods", + "use" + ], + [ + "direction", + "direction" + ], + [ + "order", + "set" + ], + [ + "step", + "steps" + ], + [ + "long", + "a" + ], + [ + "increase", + "limiting", + "reducing" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "left", + "back" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "random", + "random" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "direction", + "towards" + ], + [ + "defined", + "function", + "functional", + "functions", + "function" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "probabilistic", + "deterministic" + ], + [ + "action", + "process", + "step", + "step" + ], + [ + "algorithm", + "algorithm" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "order", + "work", + "instead" + ], + [ + "possibility", + "proposed", + "suggested" + ], + [ + "defined", + "function", + "defined" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "result", + "resulting" + ], + [ + "considered", + "is" + ], + [ + "order", + "rule" + ], + [ + "step", + "forward" + ], + [ + "represent", + "chosen" + ], + [ + "anatomically", + "anatomically" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "larger", + "size", + "size" + ], + [ + "matter", + "question", + "does" + ], + [ + "streamline", + "streamline" + ], + [ + "work", + "working", + "done" + ], + [ + "connectivity", + "connectivity" + ], + [ + "long", + "longer" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "maps", + "maps" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "maps", + "map" + ], + [ + "data", + "tracking", + "tracking" + ], + [ + "increase", + "total", + "total" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_tracking_pft_maps", "docstring": "Compute include and exclude maps, and the seeding interface mask from partial\nvolume estimation (PVE) maps. Maps should have values in [0,1], gm+wm+csf=1 in\nall voxels of the brain, gm+wm+csf=0 elsewhere.\n\nReferences: Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M.\n(2014). Towards quantitative connectivity analysis: reducing tractography\nbiases. Neuroimage.\n\nFormerly: scil_compute_maps_for_particle_filter_tracking.py", "help": "usage: scil_tracking_pft_maps.py [-h] [--include filename]\n [--exclude filename] [--interface filename]\n [-t THRESHOLD] [-f]\n [-v [{DEBUG,INFO,WARNING}]]\n in_wm in_gm in_csf\n\nCompute include and exclude maps, and the seeding interface mask from partial\nvolume estimation (PVE) maps. Maps should have values in [0,1], gm+wm+csf=1 in\nall voxels of the brain, gm+wm+csf=0 elsewhere.\n\nReferences: Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M.\n(2014). Towards quantitative connectivity analysis: reducing tractography\nbiases. Neuroimage.\n\nFormerly: scil_compute_maps_for_particle_filter_tracking.py\n\npositional arguments:\n in_wm White matter PVE map (nifti). From normal FAST output, has a PVE_2 name suffix.\n in_gm Grey matter PVE map (nifti). From normal FAST output, has a PVE_1 name suffix.\n in_csf Cerebrospinal fluid PVE map (nifti). From normal FAST output, has a PVE_0 name suffix.\n\noptions:\n -h, --help show this help message and exit\n --include filename Output include map (nifti). [map_include.nii.gz]\n --exclude filename Output exclude map (nifti). [map_exclude.nii.gz]\n --interface filename Output interface seeding mask (nifti). [interface.nii.gz]\n -t THRESHOLD Minimum gm and wm PVE values in a voxel to be into the interface. [0.1]\n -f Force overwriting of the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [], + "synonyms": [ + [ + "volume", + "volumes", + "volume" + ], + [ + "work", + "and" + ], + [ + "left", + "into" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "seeding", + "seeding" + ], + [ + "meaning", + "name" + ], + [ + "long", + "have" + ], + [ + "long", + "a" + ], + [ + "increase", + "limiting", + "reducing" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "direction", + "towards" + ], + [ + "variety", + "include" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "methods", + "using" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "grey", + "grey" + ], + [ + "connectivity", + "connectivity" + ], + [ + "work", + "all" + ], + [ + "maps", + "maps" + ], + [ + "maps", + "map" + ] + ], "keywords": [] }, { "name": "scil_tracking_pft_maps_edit", "docstring": "Modify PFT maps to allow PFT tracking in given mask (e.g edema).\n\nFormerly: scil_add_tracking_mask_to_pft_maps.py.", "help": "usage: scil_tracking_pft_maps_edit.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n map_include map_exclude additional_mask\n map_include_corr map_exclude_corr\n\nModify PFT maps to allow PFT tracking in given mask (e.g edema).\n\nFormerly: scil_add_tracking_mask_to_pft_maps.py.\n\npositional arguments:\n map_include PFT map include.\n map_exclude PFT map exclude.\n additional_mask Allow PFT tracking in this mask.\n map_include_corr Corrected PFT map include output file name.\n map_exclude_corr Corrected PFT map exclude output file name.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "order", + "allow" + ], + [ + "work", + "and" + ], + [ + "held", + "on" + ], + [ + "methods", + "using" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "maps", + "maps" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "maps", + "map" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "left", + "result", + "when" + ], + [ + "data", + "tracking", + "tracking" + ], + [ + "held", + "in" + ], + [ + "considered", + "is" + ], + [ + "variety", + "include" + ], + [ + "meaning", + "name" + ] + ], "keywords": [] }, { "name": "scil_tractogram_apply_transform", "docstring": "Transform a tractogram using an affine/rigid transformation and nonlinear\ndeformation (optional).\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nApplying transformation to a tractogram can lead to invalid streamlines (out of\nthe bounding box), and thus three strategies are available:\n1) Do nothing, may crash at saving if invalid streamlines are present.\n [This is the default]\n2) --keep_invalid, save invalid streamlines. Leave it to the user to run\n scil_tractogram_remove_invalid.py if needed.\n3) --remove_invalid, automatically remove invalid streamlines before saving.\n Should not remove more than a few streamlines. Typically, the streamlines\n that are rejected are the ones reaching the limits of the brain, ex, near\n the pons.\n4) --cut_invalid, automatically cut invalid streamlines before saving, i.e. the\n streamlines are kept but the points out of the bounding box are cut.\n\nExample:\nTo apply a transformation from ANTs to a tractogram, if the ANTs command was\nMOVING->REFERENCE...\n1) To apply the original transformation:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --inverse\n --in_deformation 1InverseWarp.nii.gz\n\n2) To apply the inverse transformation, i.e. REFERENCE->MOVING:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --in_deformation 1Warp.nii.gz\n --reverse_operation\n\nFormerly: scil_apply_transform_to_tractogram.py", "help": "usage: scil_tractogram_apply_transform.py [-h] [--inverse]\n [--in_deformation file]\n [--reverse_operation]\n [--cut_invalid | --remove_invalid | --keep_invalid]\n [--no_empty] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_moving_tractogram in_target_file\n in_transfo out_tractogram\n\nTransform a tractogram using an affine/rigid transformation and nonlinear\ndeformation (optional).\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nApplying transformation to a tractogram can lead to invalid streamlines (out of\nthe bounding box), and thus three strategies are available:\n1) Do nothing, may crash at saving if invalid streamlines are present.\n [This is the default]\n2) --keep_invalid, save invalid streamlines. Leave it to the user to run\n scil_tractogram_remove_invalid.py if needed.\n3) --remove_invalid, automatically remove invalid streamlines before saving.\n Should not remove more than a few streamlines. Typically, the streamlines\n that are rejected are the ones reaching the limits of the brain, ex, near\n the pons.\n4) --cut_invalid, automatically cut invalid streamlines before saving, i.e. the\n streamlines are kept but the points out of the bounding box are cut.\n\nExample:\nTo apply a transformation from ANTs to a tractogram, if the ANTs command was\nMOVING->REFERENCE...\n1) To apply the original transformation:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --inverse\n --in_deformation 1InverseWarp.nii.gz\n\n2) To apply the inverse transformation, i.e. REFERENCE->MOVING:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --in_deformation 1Warp.nii.gz\n --reverse_operation\n\nFormerly: scil_apply_transform_to_tractogram.py\n\npositional arguments:\n in_moving_tractogram Path of the tractogram to be transformed.\n Bounding box validity will not be checked (could \n contain invalid streamlines).\n in_target_file Path of the reference target file (trk or nii).\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_tractogram Output tractogram filename (transformed data).\n\noptions:\n -h, --help show this help message and exit\n --no_empty Do not write file if there is no streamline.\n You may save an empty file if you use remove_invalid.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nTransformation options:\n --inverse Apply the inverse linear transformation.\n --in_deformation file\n Path to the file containing a deformation field.\n --reverse_operation Apply the transformation in reverse (see doc), warp\n first, then linear.\n\nManagement of invalid streamlines:\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_invalid Remove the streamlines landing out of the bounding box.\n --keep_invalid Keep the streamlines landing out of the bounding box.\n", - "synonyms": [], + "synonyms": [ + [ + "application", + "database", + "user" + ], + [ + "anatomy", + "anatomy" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "long", + "work", + "more" + ], + [ + "brain", + "brain" + ], + [ + "limiting", + "limits" + ], + [ + "proposed", + "rejected" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "invalid", + "valid", + "invalid" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "clear", + "considered", + "create", + "form", + "manner", + "matter", + "result", + "subject", + "thinking", + "true", + "view", + "work", + "rather" + ], + [ + "methods", + "use" + ], + [ + "area", + "near" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "clear", + "left", + "out" + ], + [ + "held", + "on" + ], + [ + "applied", + "apply" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "matter", + "question", + "true", + "nothing" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "matter", + "question", + "thinking", + "true", + "work", + "working", + "how" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "left", + "before" + ], + [ + "matrices", + "matrix" + ], + [ + "step", + "follow" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "result", + "may" + ], + [ + "considered", + "is" + ], + [ + "considered", + "subsequently", + "was" + ], + [ + "applied", + "applying" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "streamline", + "streamline" + ], + [ + "considered", + "form", + "meaning", + "order", + "result", + "thus" + ], + [ + "clear", + "long", + "question", + "result", + "work", + "there" + ], + [ + "connection", + "connections", + "link" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "left", + "leave" + ], + [ + "clear", + "result", + "work", + "could" + ], + [ + "assigned", + "command" + ], + [ + "large", + "long", + "few" + ] + ], "keywords": [] }, { "name": "scil_tractogram_apply_transform_to_hdf5", "docstring": "Transform tractogram(s) contained in the hdf5 output from a connectivity\nscript, using an affine/rigid transformation and nonlinear deformation\n(optional).\n\nSee scil_tractogram_apply_transform.py to apply directly to a tractogram.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nOr use >> scil_tractogram_apply_transform.py --help\n\nFormerly: scil_apply_transform_to_hdf5.py", "help": "usage: scil_tractogram_apply_transform_to_hdf5.py [-h] [--inverse]\n [--in_deformation file]\n [--reverse_operation]\n [--cut_invalid | --remove_invalid | --keep_invalid]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_hdf5 in_target_file\n in_transfo out_hdf5\n\nTransform tractogram(s) contained in the hdf5 output from a connectivity\nscript, using an affine/rigid transformation and nonlinear deformation\n(optional).\n\nSee scil_tractogram_apply_transform.py to apply directly to a tractogram.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nOr use >> scil_tractogram_apply_transform.py --help\n\nFormerly: scil_apply_transform_to_hdf5.py\n\npositional arguments:\n in_hdf5 Path of the hdf5 containing the moving tractogram, to be transformed. (.h5 extension).\n in_target_file Path of the reference target file (.trk or .nii).\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_hdf5 Output tractogram filename (transformed data).\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nTransformation options:\n --inverse Apply the inverse linear transformation.\n --in_deformation file\n Path to the file containing a deformation field.\n --reverse_operation Apply the transformation in reverse (see doc), warp\n first, then linear.\n\nManagement of invalid streamlines:\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_invalid Remove the streamlines landing out of the bounding box.\n --keep_invalid Keep the streamlines landing out of the bounding box.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "long", + "work", + "more" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "invalid", + "valid", + "invalid" + ], + [ + "clear", + "considered", + "create", + "form", + "manner", + "matter", + "result", + "subject", + "thinking", + "true", + "view", + "work", + "rather" + ], + [ + "methods", + "use" + ], + [ + "long", + "a" + ], + [ + "clear", + "left", + "out" + ], + [ + "held", + "on" + ], + [ + "applied", + "apply" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "matter", + "question", + "thinking", + "true", + "work", + "working", + "how" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "matrices", + "matrix" + ], + [ + "step", + "follow" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "connectivity", + "connectivity" + ], + [ + "connection", + "connections", + "link" + ] + ], "keywords": [] }, { "name": "scil_tractogram_assign_custom_color", "docstring": "The script uses scalars from an anatomy, data_per_point or data_per_streamline\n(e.g. commit_weights) to visualize them on the streamlines.\nSaves the RGB values in the data_per_point 'color' with 3 values per point:\n(color_x, color_y, color_z).\n\nIf called with .tck, the output will always be .trk, because data_per_point has\nno equivalent in tck file.\n\nIf used with a visualization software like MI-Brain\n(https://github.com/imeka/mi-brain), the 'color' dps is applied by default at\nloading time.\n\nCOLORING METHOD\nThis script maps the raw values from these sources to RGB using a colormap.\n --use_dpp: The data from each point is converted to a color.\n --use_dps: The same color is applied to all points of the streamline.\n --from_anatomy: The voxel's color is used for the points of the streamlines\n crossing it. See also scil_tractogram_project_map_to_streamlines.py. You\n can have more options to project maps to dpp, and then use --use_dpp here.\n --along_profile: The data used here is each point's position in the\n streamline. To have nice results, you should first uniformize head/tail.\n See scil_tractogram_uniformize_endpoints.py.\n --local_angle.\n\nCOLORING OPTIONS\nA minimum and a maximum range can be provided to clip values. If the range of\nvalues is too large for intuitive visualization, a log transform can be\napplied.\n\nIf the data provided from --use_dps, --use_dpp and --from_anatomy are integer\nlabels, they can be mapped using a LookUp Table (--LUT).\nThe file provided as a LUT should be either .txt or .npy and if the size is\nN=20, then the data provided should be between 1-20.\n\nA custom colormap can be provided using --colormap. It should be a string\ncontaining a colormap name OR multiple Matplotlib named colors separated by -.\nThe colormap used for mapping values to colors can be saved to a png/jpg image\nusing the --out_colorbar option.\n\nSee also: scil_tractogram_assign_uniform_color.py, for simplified options.\n\nFormerly: scil_assign_custom_color_to_tractogram.py", "help": "", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "true", + "always" + ], + [ + "methods", + "method" + ], + [ + "average", + "per" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "applied", + "applied" + ], + [ + "considered", + "are" + ], + [ + "long", + "work", + "more" + ], + [ + "difference", + "point" + ], + [ + "brain", + "brain" + ], + [ + "left", + "from" + ], + [ + "clear", + "long", + "too" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "long", + "have" + ], + [ + "thinking", + "you" + ], + [ + "maps", + "mapping" + ], + [ + "long", + "a" + ], + [ + "application", + "systems", + "software" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "project", + "project" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "held", + "in" + ], + [ + "large", + "larger", + "large" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "long", + "result", + "work", + "working", + "time" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "clear", + "considered", + "future", + "lack", + "long", + "matter", + "question", + "result", + "work", + "because" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "streamlines", + "streamlines" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "larger", + "size", + "size" + ], + [ + "streamline", + "streamline" + ], + [ + "result", + "results" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "maps", + "maps" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_tractogram_assign_uniform_color", "docstring": "Assign an hexadecimal RGB color to one or more Trackvis (.trk) tractogram.\n(If called with .tck, the output will always be .trk, because data_per_point\nhas no equivalent in tck file.)\n\nSaves the RGB values in the data_per_point 'color' with values\n(color_x, color_y, color_z).\n\nThe hexadecimal RGB color should be formatted as 0xRRGGBB or \"#RRGGBB\".\n\nSee also: scil_tractogram_assign_custom_color.py\n\nFormerly: scil_assign_uniform_color_to_tractograms.py", "help": "usage: scil_tractogram_assign_uniform_color.py [-h]\n (--fill_color str | --dict_colors file.json)\n (--out_suffix [suffix] | --out_tractogram file.trk)\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_tractograms\n [in_tractograms ...]\n\nAssign an hexadecimal RGB color to one or more Trackvis (.trk) tractogram.\n(If called with .tck, the output will always be .trk, because data_per_point\nhas no equivalent in tck file.)\n\nSaves the RGB values in the data_per_point 'color' with values\n(color_x, color_y, color_z).\n\nThe hexadecimal RGB color should be formatted as 0xRRGGBB or \"#RRGGBB\".\n\nSee also: scil_tractogram_assign_custom_color.py\n\nFormerly: scil_assign_uniform_color_to_tractograms.py\n\npositional arguments:\n in_tractograms Input tractograms (.trk or .tck).\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nColoring Methods:\n --fill_color str Can be hexadecimal (ie. either \"#RRGGBB\" or 0xRRGGBB).\n --dict_colors file.json\n Json file: dictionnary mapping each tractogram's basename to a color.\n Do not put your file's extension in your dict.\n Same convention as --fill_color.\n\nOutput options:\n --out_suffix [suffix]\n Specify suffix to append to input basename.\n Mandatory choice if you run this script on multiple tractograms.\n Mandatory choice with --dict_colors.\n [None]\n --out_tractogram file.trk\n Output filename of colored tractogram (.trk).\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "true", + "always" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "long", + "work", + "more" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "thinking", + "you" + ], + [ + "maps", + "mapping" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "blue", + "colored" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "clear", + "considered", + "future", + "lack", + "long", + "matter", + "question", + "result", + "work", + "because" + ], + [ + "clear", + "left", + "work", + "put" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "analysis", + "methodology", + "methods", + "processes", + "methods" + ], + [ + "individual", + "each" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_tractogram_commit", "docstring": "Convex Optimization Modeling for Microstructure Informed Tractography (COMMIT)\nestimates, globally, how a given tractogram explains the DWI in terms of signal\nfit, assuming a certain forward microstructure model. It assigns a weight to\neach streamline, which represents how well it explains the DWI signal globally.\nThe default forward microstructure model is stick-zeppelin-ball, which requires\nmulti-shell data and a peak file (principal fiber directions in each voxel,\ntypically from a field of fODFs).\n\nIt is possible to use the ball-and-stick model for single-shell and multi-shell\ndata. In this case, the peak file is not mandatory. Multi-shell should follow a\n\"NODDI protocol\" (low and high b-values), multiple shells with similar b-values\nshould not be used with COMMIT.\n\nThe output from COMMIT is:\n- fit_NRMSE.nii.gz\n fiting error (Normalized Root Mean Square Error)\n- fit_RMSE.nii.gz\n fiting error (Root Mean Square Error)\n- results.pickle\n Dictionary containing the experiment parameters and final weights\n- compartment_EC.nii.gz\n (est. Extra-Cellular signal fraction)\n- compartment_IC.nii.gz\n (est. Intra-Cellular signal fraction)\n- compartment_ISO.nii.gz\n (est. isotropic signal fraction (freewater comportment)):\n Each of COMMIT compartments\n- streamline_weights.txt\n Text file containing the commit weights for each streamline of the\n input tractogram.\n- streamlines_length.txt\n Text file containing the length (mm) of each streamline.\n- streamline_weights_by_length.txt\n Text file containing the commit weights for each streamline of the\n input tractogram, ordered by their length.\n- tot_streamline_weights\n Text file containing the total commit weights of each streamline.\n Equal to commit_weights * streamlines_length (W_i * L_i)\n- essential.trk / non_essential.trk\n Tractograms containing the streamlines below or equal (essential) and\n above (non_essential) a threshold_weights of 0.\n- decompose_commit.h5\n In the case where the input is a hdf5 file only, we will save an output\n hdf5 with the following information separated into each bundle's dps:\n - streamlines_weights\n - streamline_weights_by_length\n For each bundle, only the essential streamlines are kept.\n\nThis script can divide the input tractogram in two using a threshold to apply\non the streamlines' weight. The threshold used is 0.0, keeping only streamlines\nthat have non-zero weight and that contribute to explain the DWI signal.\nStreamlines with 0 weight are essentially not necessary according to COMMIT.\n\nCOMMIT2 is available only for HDF5 data from\nscil_tractogram_segment_bundles_for_connectivity.py and\nwith the --ball_stick option. Use the --commit2 option to activite it, slightly\nlonger computation time. This wrapper offers a simplify way to call COMMIT,\nbut does not allow to use (or fine-tune) every parameter. If you want to use\nCOMMIT with full access to all parameters,\nvisit: https://github.com/daducci/COMMIT\n\nWhen tunning parameters, such as --iso_diff, --para_diff, --perp_diff or\n--lambda_commit_2 you should evaluate the quality of results by:\n - Looking at the 'density' (GTM) of the connnectome (essential tractogram)\n - Confirm the quality of WM bundles reconstruction (essential tractogram)\n - Inspect the (N)RMSE map and look for peaks or anomalies\n - Compare the density map before and after (essential tractogram)\n\nFormerly: scil_run_commit.py", "help": "usage: scil_tractogram_commit.py [-h] [--nbr_dir NBR_DIR]\n [--nbr_iter NBR_ITER] [--in_peaks IN_PEAKS]\n [--in_tracking_mask IN_TRACKING_MASK]\n [--commit2]\n [--lambda_commit_2 LAMBDA_COMMIT_2]\n [--ball_stick] [--para_diff PARA_DIFF]\n [--perp_diff PERP_DIFF [PERP_DIFF ...]]\n [--iso_diff ISO_DIFF [ISO_DIFF ...]]\n [--keep_whole_tractogram]\n [--save_kernels DIRECTORY | --load_kernels DIRECTORY]\n [--compute_only] [--tolerance tol]\n [--skip_b0_check] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram in_dwi in_bval in_bvec out_dir\n\nConvex Optimization Modeling for Microstructure Informed Tractography (COMMIT)\nestimates, globally, how a given tractogram explains the DWI in terms of signal\nfit, assuming a certain forward microstructure model. It assigns a weight to\neach streamline, which represents how well it explains the DWI signal globally.\nThe default forward microstructure model is stick-zeppelin-ball, which requires\nmulti-shell data and a peak file (principal fiber directions in each voxel,\ntypically from a field of fODFs).\n\nIt is possible to use the ball-and-stick model for single-shell and multi-shell\ndata. In this case, the peak file is not mandatory. Multi-shell should follow a\n\"NODDI protocol\" (low and high b-values), multiple shells with similar b-values\nshould not be used with COMMIT.\n\nThe output from COMMIT is:\n- fit_NRMSE.nii.gz\n fiting error (Normalized Root Mean Square Error)\n- fit_RMSE.nii.gz\n fiting error (Root Mean Square Error)\n- results.pickle\n Dictionary containing the experiment parameters and final weights\n- compartment_EC.nii.gz\n (est. Extra-Cellular signal fraction)\n- compartment_IC.nii.gz\n (est. Intra-Cellular signal fraction)\n- compartment_ISO.nii.gz\n (est. isotropic signal fraction (freewater comportment)):\n Each of COMMIT compartments\n- streamline_weights.txt\n Text file containing the commit weights for each streamline of the\n input tractogram.\n- streamlines_length.txt\n Text file containing the length (mm) of each streamline.\n- streamline_weights_by_length.txt\n Text file containing the commit weights for each streamline of the\n input tractogram, ordered by their length.\n- tot_streamline_weights\n Text file containing the total commit weights of each streamline.\n Equal to commit_weights * streamlines_length (W_i * L_i)\n- essential.trk / non_essential.trk\n Tractograms containing the streamlines below or equal (essential) and\n above (non_essential) a threshold_weights of 0.\n- decompose_commit.h5\n In the case where the input is a hdf5 file only, we will save an output\n hdf5 with the following information separated into each bundle's dps:\n - streamlines_weights\n - streamline_weights_by_length\n For each bundle, only the essential streamlines are kept.\n\nThis script can divide the input tractogram in two using a threshold to apply\non the streamlines' weight. The threshold used is 0.0, keeping only streamlines\nthat have non-zero weight and that contribute to explain the DWI signal.\nStreamlines with 0 weight are essentially not necessary according to COMMIT.\n\nCOMMIT2 is available only for HDF5 data from\nscil_tractogram_segment_bundles_for_connectivity.py and\nwith the --ball_stick option. Use the --commit2 option to activite it, slightly\nlonger computation time. This wrapper offers a simplify way to call COMMIT,\nbut does not allow to use (or fine-tune) every parameter. If you want to use\nCOMMIT with full access to all parameters,\nvisit: https://github.com/daducci/COMMIT\n\nWhen tunning parameters, such as --iso_diff, --para_diff, --perp_diff or\n--lambda_commit_2 you should evaluate the quality of results by:\n - Looking at the 'density' (GTM) of the connnectome (essential tractogram)\n - Confirm the quality of WM bundles reconstruction (essential tractogram)\n - Inspect the (N)RMSE map and look for peaks or anomalies\n - Compare the density map before and after (essential tractogram)\n\nFormerly: scil_run_commit.py\n\npositional arguments:\n in_tractogram Input tractogram (.trk or .tck or .h5).\n in_dwi Diffusion-weighted image used by COMMIT (.nii.gz).\n in_bval b-values in the FSL format (.bval).\n in_bvec b-vectors in the FSL format (.bvec).\n out_dir Output directory for the COMMIT maps.\n\noptions:\n -h, --help show this help message and exit\n --nbr_dir NBR_DIR Number of directions, on the half of the sphere,\n representing the possible orientations of the response functions [500].\n --nbr_iter NBR_ITER Maximum number of iterations [1000].\n --in_peaks IN_PEAKS Peaks file representing principal direction(s) locally,\n typically coming from fODFs. This file is mandatory for the default \n stick-zeppelin-ball model.\n --in_tracking_mask IN_TRACKING_MASK\n Binary mask where tratography was allowed.\n If not set, uses a binary mask computed from the streamlines.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nCOMMIT2 options:\n --commit2 Run commit2, requires .h5 as input and will force\n ball&stick model.\n --lambda_commit_2 LAMBDA_COMMIT_2\n Specify the clustering prior strength [0.001].\n\nModel options:\n --ball_stick Use the ball&Stick model, disable the zeppelin compartment.\n Only model suitable for single-shell data.\n --para_diff PARA_DIFF\n Parallel diffusivity in mm^2/s.\n Default for both ball_stick and stick_zeppelin_ball: 1.7E-3.\n --perp_diff PERP_DIFF [PERP_DIFF ...]\n Perpendicular diffusivity in mm^2/s.\n Default for ball_stick: None\n Default for stick_zeppelin_ball: [0.51E-3]\n --iso_diff ISO_DIFF [ISO_DIFF ...]\n Istropic diffusivity in mm^2/s.\n Default for ball_stick: [2.0E-3]\n Default for stick_zeppelin_ball: [1.7E-3, 3.0E-3]\n\nTractogram options:\n --keep_whole_tractogram\n Save a tractogram copy with streamlines weights in the data_per_streamline\n [False].\n --compute_only Compute kernels only, --save_kernels must be used.\n\nKernels options:\n --save_kernels DIRECTORY\n Output directory for the COMMIT kernels.\n --load_kernels DIRECTORY\n Input directory where the COMMIT kernels are located.\n\nReferences:\n[1] Daducci, Alessandro, et al. \"COMMIT: convex optimization modeling for\n microstructure informed tractography.\" IEEE transactions on medical\n imaging 34.1 (2014): 246-257.\n[2] Schiavi, Simona, et al. \"A new method for accurate in vivo mapping of\n human brain connections using microstructural and anatomical information.\"\n Science advances 6.31 (2020): eaba8245.\n", - "synonyms": [], + "synonyms": [ + [ + "step", + "continue" + ], + [ + "shape", + "view", + "look" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "methods", + "method" + ], + [ + "clear", + "long", + "step", + "thinking", + "view", + "work", + "working", + "way" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "fibre", + "fiber" + ], + [ + "order", + "set" + ], + [ + "long", + "have" + ], + [ + "direction", + "directions" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "diffusion", + "diffusion" + ], + [ + "threshold", + "threshold" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "matter", + "question", + "thinking", + "true", + "work", + "working", + "how" + ], + [ + "long", + "with" + ], + [ + "contrast", + "form", + "forms", + "larger", + "result", + "specific", + "variety", + "similar" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "exist", + "exists" + ], + [ + "imaging", + "imaging" + ], + [ + "step", + "follow" + ], + [ + "step", + "forward" + ], + [ + "cell", + "cellular" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "literature", + "scientific", + "studies", + "study", + "science" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "false", + "false" + ], + [ + "function", + "functions", + "functions" + ], + [ + "maps", + "map" + ], + [ + "bundles", + "bundle" + ], + [ + "examine", + "evaluate" + ], + [ + "long", + "full" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "result", + "following" + ], + [ + "left", + "from" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "applied", + "apply" + ], + [ + "future", + "will" + ], + [ + "anatomical", + "anatomical" + ], + [ + "represent", + "represents" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "signal", + "signal" + ], + [ + "vivo", + "vivo" + ], + [ + "considered", + "subsequently", + "was" + ], + [ + "connect", + "connected", + "connection", + "connections", + "connections" + ], + [ + "matter", + "question", + "does" + ], + [ + "streamline", + "streamline" + ], + [ + "order", + "allowed" + ], + [ + "long", + "longer" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "lack", + "quality" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "experiment", + "experiment" + ], + [ + "considered", + "specific", + "variety", + "such" + ], + [ + "considered", + "are" + ], + [ + "algorithm", + "optimization" + ], + [ + "left", + "into" + ], + [ + "work", + "find" + ], + [ + "left", + "half" + ], + [ + "represent", + "representing" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "thinking", + "i" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "streamline", + "simplify" + ], + [ + "subject", + "terms" + ], + [ + "methods", + "using" + ], + [ + "left", + "after" + ], + [ + "left", + "before" + ], + [ + "thinking", + "working", + "looking" + ], + [ + "area", + "work", + "where" + ], + [ + "individual", + "each" + ], + [ + "high", + "higher", + "level", + "low" + ], + [ + "lack", + "minimal" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "long", + "two" + ], + [ + "step", + "start" + ], + [ + "matter", + "question", + "case" + ], + [ + "based", + "reported", + "according" + ], + [ + "principal", + "principal" + ], + [ + "order", + "necessary" + ], + [ + "total", + "number" + ], + [ + "animal", + "human", + "human" + ], + [ + "bundles", + "bundles" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "matter", + "question", + "thinking", + "understand" + ], + [ + "work", + "and" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "parameter", + "parameters", + "parameter" + ], + [ + "methods", + "use" + ], + [ + "direction", + "direction" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "maps", + "mapping" + ], + [ + "area", + "located" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "action", + "response" + ], + [ + "held", + "in" + ], + [ + "long", + "result", + "work", + "working", + "time" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "binary", + "binary" + ], + [ + "future", + "current" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "streamlines", + "streamlines" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "individual", + "lack", + "matter", + "result", + "specific", + "subject", + "certain" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "reliable", + "accurate" + ], + [ + "result", + "results" + ], + [ + "weighted", + "weighted" + ], + [ + "level", + "above" + ], + [ + "order", + "allow" + ], + [ + "question", + "explain" + ], + [ + "fundamental", + "essential" + ], + [ + "maps", + "maps" + ], + [ + "error", + "error" + ], + [ + "considered", + "experience", + "large", + "long", + "result", + "variety", + "work", + "working", + "well" + ], + [ + "increase", + "total", + "total" + ] + ], "keywords": [] }, { "name": "scil_tractogram_compress", "docstring": "Compress tractogram by removing collinear (or almost) points.\n\nThe compression threshold represents the maximum distance (in mm) to the\noriginal position of the point.\n\nFormerly: scil_compress_streamlines.py", "help": "usage: scil_tractogram_compress.py [-h] [-e ERROR_RATE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nCompress tractogram by removing collinear (or almost) points.\n\nThe compression threshold represents the maximum distance (in mm) to the\noriginal position of the point.\n\nFormerly: scil_compress_streamlines.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file (trk or tck).\n out_tractogram Path of the output tractogram file (trk or tck).\n\noptions:\n -h, --help show this help message and exit\n -e ERROR_RATE Maximum compression distance in mm [0.1].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "represent", + "represents" + ], + [ + "held", + "on" + ], + [ + "methods", + "using" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "difference", + "point" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "threshold", + "threshold" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "left", + "result", + "when" + ], + [ + "held", + "in" + ], + [ + "considered", + "is" + ] + ], "keywords": [] }, { "name": "scil_tractogram_compute_TODI", "docstring": "Compute a Track Orientation Density Image (TODI).\n\nEach segment of the streamlines is weighted by its length (to support\ncompressed streamlines).\n\nThis script can afterwards output a Track Density Image (TDI) or a TODI with SF\nor SH representation, based on streamlines' segments.\n\nFormerly: scil_compute_todi.py", "help": "usage: scil_tractogram_compute_TODI.py [-h] [--sphere SPHERE] [--mask MASK]\n [--sh_order SH_ORDER]\n [--normalize_per_voxel]\n [--smooth_todi | --asymmetric]\n [--n_steps N_STEPS]\n [--out_mask OUT_MASK]\n [--out_tdi OUT_TDI]\n [--out_todi_sf OUT_TODI_SF]\n [--out_todi_sh OUT_TODI_SH]\n [--reference REFERENCE]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram\n\nCompute a Track Orientation Density Image (TODI).\n\nEach segment of the streamlines is weighted by its length (to support\ncompressed streamlines).\n\nThis script can afterwards output a Track Density Image (TDI) or a TODI with SF\nor SH representation, based on streamlines' segments.\n\nFormerly: scil_compute_todi.py\n\npositional arguments:\n in_tractogram Input streamlines file.\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nComputing options:\n --sphere SPHERE Sphere used for the angular discretization. [repulsion724]\n --mask MASK If set, use the given mask.\n --sh_order SH_ORDER Order of the original SH. [8]\n --normalize_per_voxel\n If set, normalize each SF/SH at each voxel.\n --smooth_todi If set, smooth TODI (angular and spatial).\n --asymmetric If set, compute asymmetric TODI.\n Cannot be used with --smooth_todi.\n --n_steps N_STEPS Number of steps for streamline segments subdivision prior to binning [1].\n\nOutput files. Saves only when filename is set:\n --out_mask OUT_MASK Mask showing where TDI > 0.\n --out_tdi OUT_TDI Output Track Density Image (TDI).\n --out_todi_sf OUT_TODI_SF\n Output TODI, with SF (each directions\n on the sphere, requires a lot of memory)\n --out_todi_sh OUT_TODI_SH\n Output TODI, with SH coefficients.\n\nReferences:\n [1] Dhollander T, Emsell L, Van Hecke W, Maes F, Sunaert S, Suetens P.\n Track orientation density imaging (TODI) and\n track orientation distribution (TOD) based tractography.\n NeuroImage. 2014 Jul 1;94:312-36.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "memory", + "memory" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "orientation", + "orientation" + ], + [ + "work", + "and" + ], + [ + "order", + "order" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "experience", + "thinking", + "lot" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "step", + "steps" + ], + [ + "direction", + "directions" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "indicating", + "showing" + ], + [ + "methods", + "using" + ], + [ + "streamlines", + "streamlines" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "imaging", + "imaging" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "area", + "work", + "where" + ], + [ + "individual", + "each" + ], + [ + "streamline", + "streamline" + ], + [ + "weighted", + "weighted" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "subsequently", + "afterwards" + ], + [ + "based", + "based" + ], + [ + "spatial", + "temporal", + "spatial" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "exist", + "cannot" + ] + ], "keywords": [] }, { "name": "scil_tractogram_compute_density_map", "docstring": "Compute a density map from a streamlines file. Can be binary.\n\nThis script correctly handles compressed streamlines.\n\nFormerly: scil_compute_streamlines_density_map.py", "help": "usage: scil_tractogram_compute_density_map.py [-h] [--binary [FIXED_VALUE]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_img\n\nCompute a density map from a streamlines file. Can be binary.\n\nThis script correctly handles compressed streamlines.\n\nFormerly: scil_compute_streamlines_density_map.py\n\npositional arguments:\n in_bundle Tractogram filename.\n out_img path of the output image file.\n\noptions:\n -h, --help show this help message and exit\n --binary [FIXED_VALUE]\n If set, will store the same value for all intersected voxels, \n creating a binary map.When set without a value, 1 is used (and dtype \n uint8). If a value is given, will be used as the stored value.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "create", + "creating" + ], + [ + "order", + "set" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "streamlines", + "streamlines" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "intersected", + "intersected" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "maps", + "map" + ] + ], "keywords": [] }, { "name": "scil_tractogram_convert", "docstring": "Conversion of '.tck', '.trk', '.fib', '.vtk' and 'dpy' files using updated file\nformat standard. TRK file always needs a reference file, a NIFTI, for\nconversion. The FIB file format is in fact a VTK, MITK Diffusion supports it.\n\nFormerly: scil_convert_tractogram.py", "help": "usage: scil_tractogram_convert.py [-h] [--no_bbox_check]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram output_name\n\nConversion of '.tck', '.trk', '.fib', '.vtk' and 'dpy' files using updated file\nformat standard. TRK file always needs a reference file, a NIFTI, for\nconversion. The FIB file format is in fact a VTK, MITK Diffusion supports it.\n\nFormerly: scil_convert_tractogram.py\n\npositional arguments:\n in_tractogram Tractogram filename. Format must be one of \n trk, tck, vtk, fib, dpy\n output_name Output filename. Format must be one of \n trk, tck, vtk, fib, dpy\n\noptions:\n -h, --help show this help message and exit\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "true", + "always" + ], + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "invalid", + "valid", + "invalid" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "supported", + "supports" + ], + [ + "methods", + "using" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "clear", + "considered", + "difference", + "lack", + "matter", + "question", + "result", + "subject", + "thinking", + "true", + "view", + "fact" + ] + ], "keywords": [] }, { "name": "scil_tractogram_convert_hdf5_to_trk", "docstring": "Save connections of a hdf5 created with\n>> scil_tractogram_segment_bundles_for_connectivity.py.\n\nUseful for quality control and visual inspections.\n\nIt can either save all connections (default), individual connections specified\nwith --edge_keys or connections from specific nodes specified with --node_keys.\n\nWith the option --save_empty, a label_lists, as a txt file, must be provided.\nThis option saves existing connections and empty connections.\n\nThe output is a directory containing the thousands of connections:\nout_dir/\n |-- LABEL1_LABEL1.trk\n |-- LABEL1_LABEL2.trk\n |-- [...]\n |-- LABEL90_LABEL90.trk\n\nFormerly: scil_save_connections_from_hdf5.py", "help": "usage: scil_tractogram_convert_hdf5_to_trk.py [-h] [--include_dps]\n [--edge_keys LABEL1_LABEL2 [LABEL1_LABEL2 ...]\n | --node_keys NODE [NODE ...]]\n [--save_empty labels_list]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_hdf5 out_dir\n\nSave connections of a hdf5 created with\n>> scil_tractogram_segment_bundles_for_connectivity.py.\n\nUseful for quality control and visual inspections.\n\nIt can either save all connections (default), individual connections specified\nwith --edge_keys or connections from specific nodes specified with --node_keys.\n\nWith the option --save_empty, a label_lists, as a txt file, must be provided.\nThis option saves existing connections and empty connections.\n\nThe output is a directory containing the thousands of connections:\nout_dir/\n |-- LABEL1_LABEL1.trk\n |-- LABEL1_LABEL2.trk\n |-- [...]\n |-- LABEL90_LABEL90.trk\n\nFormerly: scil_save_connections_from_hdf5.py\n\npositional arguments:\n in_hdf5 HDF5 filename (.h5) containing decomposed connections.\n out_dir Path of the output directory.\n\noptions:\n -h, --help show this help message and exit\n --include_dps Include the data_per_streamline the metadata.\n --edge_keys LABEL1_LABEL2 [LABEL1_LABEL2 ...]\n Keys to identify the edges (connections) of interest.\n --node_keys NODE [NODE ...]\n Node keys to identify the sub-networks of interest.\n Equivalent to adding any --edge_keys node_LABEL2 or LABEL2_node.\n --save_empty labels_list\n Save empty connections. Then, the list of possible connections is \n not found from the hdf5 but inferred from labels_list, a txt file \n containing a list of nodes saved by the decomposition script.\n *If used together with edge_keys or node_keys, the provided nodes must \n exist in labels_list.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n CAREFUL. The whole output directory will be deleted if it exists.\n", - "synonyms": [], + "synonyms": [ + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "working", + "together" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "specific", + "specific" + ], + [ + "exist", + "exist" + ], + [ + "long", + "a" + ], + [ + "tool", + "useful" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "higher", + "interest" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "variety", + "include" + ], + [ + "action", + "clear", + "considered", + "future", + "matter", + "possibility", + "potential", + "question", + "result", + "specific", + "any" + ], + [ + "long", + "with" + ], + [ + "parameters", + "specified" + ], + [ + "visual", + "visual" + ], + [ + "network", + "networks", + "networks" + ], + [ + "clear", + "adding" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "methods", + "using" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "exist", + "exists" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "individual", + "individuals", + "individual" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "connect", + "connected", + "connection", + "connections", + "connections" + ], + [ + "work", + "all" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "lack", + "quality" + ], + [ + "create", + "created" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_tractogram_count_streamlines", "docstring": "Return the number of streamlines in a tractogram. Only support trk and tck in\norder to support the lazy loading from nibabel.\n\nFormerly: scil_count_streamlines.py", "help": "usage: scil_tractogram_count_streamlines.py [-h] [--print_count_alone]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n in_tractogram\n\nReturn the number of streamlines in a tractogram. Only support trk and tck in\norder to support the lazy loading from nibabel.\n\nFormerly: scil_count_streamlines.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --print_count_alone If true, prints the result only. \n Else, prints the bundle name and count formatted as a json dict.(default)\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "long", + "a" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "held", + "on" + ], + [ + "methods", + "using" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "order", + "order" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "streamlines", + "streamlines" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "meaning", + "true", + "true" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "left", + "from" + ], + [ + "matter", + "thinking", + "else" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "bundles", + "bundle" + ], + [ + "left", + "result", + "when" + ], + [ + "held", + "in" + ], + [ + "considered", + "is" + ], + [ + "lack", + "loss", + "result", + "result" + ], + [ + "meaning", + "name" + ] + ], "keywords": [] }, { "name": "scil_tractogram_cut_streamlines", "docstring": "Filters streamlines and only keeps the parts of streamlines within or\nbetween the ROIs. Two options are available.\n\nInput mask:\n\nThe mask has either 1 entity/blob or\n2 entities/blobs (does not support disconnected voxels).\nThe option --biggest_blob can help if you have such a scenario.\n\nThe 1 entity scenario will 'trim' the streamlines so their longest segment is\nwithin the bounding box or a binary mask.\n\nThe 2 entities scenario will cut streamlines so their segment are within the\nbounding box or going from binary mask #1 to binary mask #2.\n\nInput label:\n\nThe label MUST contain 2 labels different from zero.\nLabel values could be anything.\nThe script will cut streamlines going from label 1 to label 2.\n\nBoth inputs and scenarios will erase data_per_point and data_per_streamline.\n\nFormerly: scil_cut_streamlines.py", "help": "usage: scil_tractogram_cut_streamlines.py [-h] (--mask MASK | --label LABEL)\n [--label_ids LABEL_IDS LABEL_IDS]\n [--resample STEP_SIZE]\n [--biggest_blob]\n [--compress [COMPRESS_TH]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nFilters streamlines and only keeps the parts of streamlines within or\nbetween the ROIs. Two options are available.\n\nInput mask:\n\nThe mask has either 1 entity/blob or\n2 entities/blobs (does not support disconnected voxels).\nThe option --biggest_blob can help if you have such a scenario.\n\nThe 1 entity scenario will 'trim' the streamlines so their longest segment is\nwithin the bounding box or a binary mask.\n\nThe 2 entities scenario will cut streamlines so their segment are within the\nbounding box or going from binary mask #1 to binary mask #2.\n\nInput label:\n\nThe label MUST contain 2 labels different from zero.\nLabel values could be anything.\nThe script will cut streamlines going from label 1 to label 2.\n\nBoth inputs and scenarios will erase data_per_point and data_per_streamline.\n\nFormerly: scil_cut_streamlines.py\n\npositional arguments:\n in_tractogram Input tractogram file.\n out_tractogram Output tractogram file. Note: data_per_point will be discarded, if any!\n\noptions:\n -h, --help show this help message and exit\n --label_ids LABEL_IDS LABEL_IDS\n List of labels indices to use to cut streamlines (2 values).\n --resample STEP_SIZE Resample streamlines to a specific step-size in mm [None].\n --biggest_blob Use the biggest entity and force the 1 ROI scenario.\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.[0.1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMandatory mask options:\n Choose between mask or label input.\n\n --mask MASK Binary mask containing either 1 or 2 blobs.\n --label LABEL Label containing 2 blobs.\n", - "synonyms": [], + "synonyms": [ + [ + "individual", + "specific", + "unique", + "variety", + "different" + ], + [ + "anatomy", + "anatomy" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "matter", + "true", + "anything" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "long", + "have" + ], + [ + "thinking", + "you" + ], + [ + "specific", + "specific" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "action", + "clear", + "considered", + "future", + "matter", + "possibility", + "potential", + "question", + "result", + "specific", + "any" + ], + [ + "clear", + "long", + "matter", + "result", + "work", + "so" + ], + [ + "action", + "process", + "step", + "step" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "streamlines", + "streamlines" + ], + [ + "considered", + "specific", + "variety", + "such" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "result", + "resulting" + ], + [ + "considered", + "is" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "larger", + "size", + "size" + ], + [ + "matter", + "question", + "does" + ], + [ + "streamline", + "streamline" + ], + [ + "long", + "two" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "areas", + "parts" + ], + [ + "step", + "thinking", + "going" + ], + [ + "clear", + "result", + "work", + "could" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_tractogram_detect_loops", "docstring": "This script can be used to remove loops in two types of streamline datasets:\n\n - Whole brain: For this type, the script removes streamlines if they\n make a loop with an angle of more than 360 degrees. It's possible to change\n this angle with the --angle option. Warning: Don't use --qb option for a\n whole brain tractography.\n\n - Bundle dataset: For this type, it is possible to remove loops and\n streamlines outside the bundle. For the sharp angle turn, use --qb option.\n\nSee also:\n scil_tractogram_filter_by_anatomy.py\n\nFormerly: scil_detect_streamlines_loops.py", "help": "usage: scil_tractogram_detect_loops.py [-h]\n [--looping_tractogram out_filename]\n [--qb [threshold]] [--angle ANGLE]\n [--display_counts] [--no_empty]\n [--indent INDENT] [--sort_keys]\n [--processes NBR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script can be used to remove loops in two types of streamline datasets:\n\n - Whole brain: For this type, the script removes streamlines if they\n make a loop with an angle of more than 360 degrees. It's possible to change\n this angle with the --angle option. Warning: Don't use --qb option for a\n whole brain tractography.\n\n - Bundle dataset: For this type, it is possible to remove loops and\n streamlines outside the bundle. For the sharp angle turn, use --qb option.\n\nSee also:\n scil_tractogram_filter_by_anatomy.py\n\nFormerly: scil_detect_streamlines_loops.py\n\npositional arguments:\n in_tractogram Tractogram input file name.\n out_tractogram Output tractogram without loops.\n\noptions:\n -h, --help show this help message and exit\n --looping_tractogram out_filename\n If set, saves detected looping streamlines.\n --qb [threshold] If set, uses QuickBundles to detect outliers (loops, sharp angle \n turns). Given threshold is the maximal streamline to bundle \n distance for a streamline to be considered as a tracking error.\n Default if set: [8.0]\n --angle ANGLE Maximum looping (or turning) angle of\n a streamline in degrees. [360]\n --display_counts Print streamline count before and after filtering\n --no_empty If set, will not save outputs if they are empty.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\nReferences:\n QuickBundles, based on [Garyfallidis12] Frontiers in Neuroscience, 2012.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "considered", + "are" + ], + [ + "long", + "work", + "more" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "long", + "a" + ], + [ + "action", + "clear", + "future", + "result", + "step", + "change" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "create", + "future", + "step", + "work", + "make" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "left", + "after" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "degree", + "degrees" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "clear", + "step", + "turn" + ], + [ + "left", + "before" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "streamline", + "streamline" + ], + [ + "long", + "two" + ], + [ + "step", + "start" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "based", + "based" + ], + [ + "forms", + "specific", + "variety", + "types" + ], + [ + "error", + "error" + ], + [ + "bundles", + "bundle" + ], + [ + "data", + "tracking", + "tracking" + ] + ], "keywords": [] }, { "name": "scil_tractogram_dpp_math", "docstring": "Performs an operation on data per point (dpp) from input streamlines.\n\nAlthough the input data always comes from the dpp, the output can be either\na dpp or a data_per_streamline (dps), depending on the chosen options.\nTwo modes of operation are supported: dpp and dps.\n - In dps mode, the operation is performed on dpp across the dimension of\n the streamlines resulting in a single value (or array in the 4D case)\n per streamline, stored as dps.\n - In dpp mode, the operation is performed on each point separately,\n resulting in a new dpp.\n\nIf endpoints_only and dpp mode is set the operation will only be calculated at\nthe streamline endpoints the rest of the values along the streamline will be\nNaN.\n\nIf endpoints_only and dps mode is set operation will be calculated across the\ndata at the endpoints and stored as a single value (or array in the 4D case)\nper streamline.\n\nEndpoint only operation:\ncorrelation: correlation calculated between arrays extracted from streamline\nendpoints (data must be multivalued per point) and dps mode must be set.", "help": "usage: scil_tractogram_dpp_math.py [-h] --mode {dpp,dps} --in_dpp_name key\n [key ...] --out_keys key [key ...]\n [--endpoints_only] [--keep_all_dpp_dps]\n [--overwrite_dpp_dps]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--no_bbox_check]\n OPERATION INPUT_FILE OUTPUT_FILE\n\nPerforms an operation on data per point (dpp) from input streamlines.\n\nAlthough the input data always comes from the dpp, the output can be either\na dpp or a data_per_streamline (dps), depending on the chosen options.\nTwo modes of operation are supported: dpp and dps.\n - In dps mode, the operation is performed on dpp across the dimension of\n the streamlines resulting in a single value (or array in the 4D case)\n per streamline, stored as dps.\n - In dpp mode, the operation is performed on each point separately,\n resulting in a new dpp.\n\nIf endpoints_only and dpp mode is set the operation will only be calculated at\nthe streamline endpoints the rest of the values along the streamline will be\nNaN.\n\nIf endpoints_only and dps mode is set operation will be calculated across the\ndata at the endpoints and stored as a single value (or array in the 4D case)\nper streamline.\n\nEndpoint only operation:\ncorrelation: correlation calculated between arrays extracted from streamline\nendpoints (data must be multivalued per point) and dps mode must be set.\n\npositional arguments:\n OPERATION The type of operation to be performed on the \n streamlines. Must be one of the following: \n [mean, sum, min, max, correlation.]\n INPUT_FILE Input tractogram containing streamlines and metadata.\n OUTPUT_FILE The file where the remaining streamlines \n are saved.\n\noptions:\n -h, --help show this help message and exit\n --mode {dpp,dps} Set to dps if the operation is to be performed \n across all dimensions resulting in a single value per \n streamline. Set to dpp if the operation is to be \n performed on each point separately resulting in a \n single value per point.\n --in_dpp_name key [key ...]\n Name or list of names of the data_per_point for \n operation to be performed on. If more than one dpp \n is selected, the same operation will be applied \n separately to each one.\n --out_keys key [key ...]\n Name of the resulting data_per_point or \n data_per_streamline to be saved in the output \n tractogram. If more than one --in_dpp_name was used, \n enter the same number of --out_keys values.\n --endpoints_only If set, will only perform operation on endpoints \n If not set, will perform operation on all streamline \n points.\n --keep_all_dpp_dps If set, previous data_per_point will be preserved \n in the output tractogram. Else, only --out_dpp_name \n keys will be saved.\n --overwrite_dpp_dps If set, if --keep_all_dpp_dps is set and some \n --out_keys keys already existed in your \n data_per_point or data_per_streamline, allow \n overwriting old data_per_point.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "true", + "always" + ], + [ + "average", + "per" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "applied", + "applied" + ], + [ + "result", + "following" + ], + [ + "considered", + "are" + ], + [ + "long", + "work", + "more" + ], + [ + "difference", + "point" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "invalid", + "valid", + "invalid" + ], + [ + "meaning", + "name" + ], + [ + "order", + "set" + ], + [ + "long", + "a" + ], + [ + "key", + "main", + "key" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "clear", + "considered", + "contrast", + "long", + "result", + "work", + "although" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "methods", + "using" + ], + [ + "area", + "main", + "along" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "left", + "rest" + ], + [ + "supported", + "supported" + ], + [ + "areas", + "across" + ], + [ + "matter", + "thinking", + "else" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "result", + "resulting" + ], + [ + "considered", + "is" + ], + [ + "considered", + "subsequently", + "was" + ], + [ + "represent", + "chosen" + ], + [ + "area", + "work", + "where" + ], + [ + "individual", + "each" + ], + [ + "streamline", + "streamline" + ], + [ + "order", + "allow" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "long", + "two" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "exist", + "existed" + ], + [ + "matter", + "question", + "case" + ], + [ + "attention", + "comes" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "large", + "work", + "some" + ] + ], "keywords": [] }, { "name": "scil_tractogram_extract_ushape", "docstring": "This script extracts streamlines depending on their U-shapeness.\nThis script is a replica of Trackvis method.\n\nWhen ufactor is close to:\n* 0 it defines straight streamlines\n* 1 it defines U-fibers\n* -1 it defines S-fibers\n\nFormerly: scil_extract_ushape.py", "help": "usage: scil_tractogram_extract_ushape.py [-h] [--minU MINU] [--maxU MAXU]\n [--remaining_tractogram filename]\n [--no_empty] [--display_counts]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script extracts streamlines depending on their U-shapeness.\nThis script is a replica of Trackvis method.\n\nWhen ufactor is close to:\n* 0 it defines straight streamlines\n* 1 it defines U-fibers\n* -1 it defines S-fibers\n\nFormerly: scil_extract_ushape.py\n\npositional arguments:\n in_tractogram Tractogram input file name.\n out_tractogram Output tractogram file name.\n\noptions:\n -h, --help show this help message and exit\n --minU MINU Min ufactor value. [0.5]\n --maxU MAXU Max ufactor value. [1.0]\n --remaining_tractogram filename\n If set, saves remaining streamlines.\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "methods", + "method" + ], + [ + "work", + "and" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "meaning", + "name" + ], + [ + "defined", + "defines" + ], + [ + "order", + "set" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "methods", + "using" + ], + [ + "left", + "after" + ], + [ + "streamlines", + "streamlines" + ], + [ + "left", + "before" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "fibres", + "fibers" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "streamline", + "streamline" + ], + [ + "clear", + "long", + "question", + "result", + "work", + "there" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ] + ], "keywords": [] }, { "name": "scil_tractogram_filter_by_anatomy", "docstring": "This script filters streamlines in a tractogram according to their geometrical\nproperties (i.e. limiting their length and looping angle) and their anatomical\nending properties (i.e. the anatomical tissue or region their endpoints lie\nin).\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nThe filtering is performed sequentially in four steps, each step processing the\ndata on the output of the previous step:\n\n Step 1 - Remove streamlines below the minimum length and above the\n maximum length. These thresholds must be set with the ``--minL``\n and ``--maxL`` options.\n Step 2 - Ensure that no streamlines end in the cerebrospinal fluid\n according to the provided parcellation. A binary mask can be used\n alternatively through the ``--csf_bin`` option.\n Step 3 - Ensure that no streamlines end in white matter by ensuring that\n they reach the cortical regions according to the provided\n parcellation. The cortical regions of the parcellation can be\n dilated using the ``--ctx_dilation_radius``.\n Step 4 - Remove streamlines if they make a loop with an angle above a\n certain threshold. It's possible to change this angle with the\n ``-a`` option.\n\nLength and loop-based filtering (steps 1 and 2) will not have practical effects\nif no specific thresholds are provided (but will be still executed), since\ndefault values are 0 for the minimum allowed length and infinite for the\nmaximum allowed length and angle.\n\nThe anatomical region endings filtering requires a parcellation or label image\nfile including the cerebrospinal fluid and gray matter (cortical) regions\naccording to the Desikan-Killiany atlas. Intermediate tractograms (results of\neach step and outliers) and volumes can be saved throughout the process.\n\nExample usages:\n\n# Filter length, looping angle and anatomical ending region\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --minL 20 --maxL 200 -a 300\n# Filter only anatomical ending region, with WM dilation and provided csf mask\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --csf_bin csf_bin.nii.gz --ctx_dilation_radius 2\n\nFormerly: scil_filter_streamlines_anatomically.py\n\nNOTE: As of version 2.0.0, the dilation of the cortical mask has changed; from\nan in-house process to scipy's dilation. Results may differ from previous\nversions.", "help": "usage: scil_tractogram_filter_by_anatomy.py [-h] [--minL MINL] [--maxL MAXL]\n [--angle ANGLE]\n [--csf_bin CSF_BIN]\n [--dilate_ctx value]\n [--save_intermediate_tractograms]\n [--save_volumes] [--save_counts]\n [--save_rejected] [--no_empty]\n [--indent INDENT] [--sort_keys]\n [--processes NBR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram in_wmparc out_path\n\nThis script filters streamlines in a tractogram according to their geometrical\nproperties (i.e. limiting their length and looping angle) and their anatomical\nending properties (i.e. the anatomical tissue or region their endpoints lie\nin).\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nThe filtering is performed sequentially in four steps, each step processing the\ndata on the output of the previous step:\n\n Step 1 - Remove streamlines below the minimum length and above the\n maximum length. These thresholds must be set with the ``--minL``\n and ``--maxL`` options.\n Step 2 - Ensure that no streamlines end in the cerebrospinal fluid\n according to the provided parcellation. A binary mask can be used\n alternatively through the ``--csf_bin`` option.\n Step 3 - Ensure that no streamlines end in white matter by ensuring that\n they reach the cortical regions according to the provided\n parcellation. The cortical regions of the parcellation can be\n dilated using the ``--ctx_dilation_radius``.\n Step 4 - Remove streamlines if they make a loop with an angle above a\n certain threshold. It's possible to change this angle with the\n ``-a`` option.\n\nLength and loop-based filtering (steps 1 and 2) will not have practical effects\nif no specific thresholds are provided (but will be still executed), since\ndefault values are 0 for the minimum allowed length and infinite for the\nmaximum allowed length and angle.\n\nThe anatomical region endings filtering requires a parcellation or label image\nfile including the cerebrospinal fluid and gray matter (cortical) regions\naccording to the Desikan-Killiany atlas. Intermediate tractograms (results of\neach step and outliers) and volumes can be saved throughout the process.\n\nExample usages:\n\n# Filter length, looping angle and anatomical ending region\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --minL 20 --maxL 200 -a 300\n# Filter only anatomical ending region, with WM dilation and provided csf mask\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --csf_bin csf_bin.nii.gz --ctx_dilation_radius 2\n\nFormerly: scil_filter_streamlines_anatomically.py\n\nNOTE: As of version 2.0.0, the dilation of the cortical mask has changed; from\nan in-house process to scipy's dilation. Results may differ from previous\nversions.\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n in_wmparc Path of the white matter parcellation atlas (.nii or .nii.gz)\n out_path Path to the output files.\n\noptions:\n -h, --help show this help message and exit\n --minL MINL Minimum length of streamlines, in mm. [0.0]\n --maxL MAXL Maximum length of streamlines, in mm. [inf]\n --angle ANGLE Maximum looping (or turning) angle of a streamline, \n in degrees. [inf]\n --csf_bin CSF_BIN Allow CSF endings filtering with this binary\n mask instead of using the atlas (.nii or .nii.gz)\n --dilate_ctx value If set, dilate the cortical labels. Value is the dilation \n radius, in voxels (an integer > 0)\n --save_intermediate_tractograms\n Save accepted and discarded streamlines after each step.\n --save_volumes Save volumetric images (e.g. binarised label \n images, etc) in the filtering process.\n --save_counts Save the streamline counts to a file (.json)\n --save_rejected Save rejected streamlines to output tractogram.\n --no_empty Do not write file if there is no streamlines.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\n References:\n [1] J\u00f6rgens, D., Descoteaux, M., Moreno, R., 2021. Challenges for\n tractogram \ufb01ltering. In: \u00d6zarslan, E., Schultz, T., Zhang, E., Fuster,\n A. (Eds.), Anisotropy Across Fields and Scales. Springer. Mathematics\n and Visualization.\n [2] Legarreta, J., Petit, L., Rheault, F., Theaud, G., Lemaire, C.,\n Descoteaux, M., Jodoin, P.M. Filtering in tractography using\n autoencoders (FINTA). Medical Image Analysis. 2021\n \n", - "synonyms": [], + "synonyms": [ + [ + "long", + "end" + ], + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "brain", + "tissue" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "considered", + "are" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "proposed", + "rejected" + ], + [ + "left", + "from" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "image", + "images" + ], + [ + "areas", + "region", + "regions", + "regions" + ], + [ + "order", + "set" + ], + [ + "limiting", + "limiting" + ], + [ + "long", + "have" + ], + [ + "specific", + "specific" + ], + [ + "step", + "steps" + ], + [ + "processing", + "processing" + ], + [ + "long", + "a" + ], + [ + "action", + "clear", + "future", + "result", + "step", + "change" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "create", + "future", + "step", + "work", + "make" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "thinking", + "i" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "anatomical", + "anatomical" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "atlas", + "atlas" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "cortical", + "cortical" + ], + [ + "action", + "process", + "step", + "step" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "order", + "work", + "instead" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "left", + "after" + ], + [ + "process", + "processes", + "step", + "process" + ], + [ + "streamlines", + "streamlines" + ], + [ + "degree", + "degrees" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "areas", + "across" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "individual", + "lack", + "matter", + "result", + "specific", + "subject", + "certain" + ], + [ + "result", + "may" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "blue", + "dark", + "green", + "grey", + "white", + "gray" + ], + [ + "streamline", + "streamline" + ], + [ + "result", + "results" + ], + [ + "level", + "above" + ], + [ + "order", + "allowed" + ], + [ + "order", + "allow" + ], + [ + "volume", + "volumes", + "volumes" + ], + [ + "clear", + "long", + "question", + "result", + "work", + "there" + ], + [ + "step", + "start" + ], + [ + "thresholds", + "thresholds" + ], + [ + "clear", + "long", + "work", + "still" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "based", + "reported", + "according" + ], + [ + "based", + "based" + ], + [ + "area", + "areas", + "region", + "regions", + "region" + ], + [ + "result", + "since" + ] + ], "keywords": [] }, { "name": "scil_tractogram_filter_by_length", "docstring": "Script to filter streamlines based on their lengths.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Filtering by length is its step1)\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_length.py", "help": "usage: scil_tractogram_filter_by_length.py [-h] [--minL MINL] [--maxL MAXL]\n [--no_empty] [--display_counts]\n [--save_rejected] [--indent INDENT]\n [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nScript to filter streamlines based on their lengths.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Filtering by length is its step1)\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_length.py\n\npositional arguments:\n in_tractogram Streamlines input file name.\n out_tractogram Streamlines output file name.\n\noptions:\n -h, --help show this help message and exit\n --minL MINL Minimum length of streamlines, in mm. [0.0]\n --maxL MAXL Maximum length of streamlines, in mm. [inf]\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering\n --save_rejected Save rejected streamlines to output tractogram.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "proposed", + "rejected" + ], + [ + "left", + "result", + "when" + ], + [ + "meaning", + "name" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "left", + "after" + ], + [ + "streamlines", + "streamlines" + ], + [ + "left", + "before" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "streamline", + "streamline" + ], + [ + "clear", + "long", + "question", + "result", + "work", + "there" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "based", + "based" + ] + ], "keywords": [] }, { "name": "scil_tractogram_filter_by_orientation", "docstring": "Script to filter streamlines based on their distance traveled in a specific\ndimension (x, y, or z).\n\nUseful to help differentiate bundles.\n\nExamples: In a brain aligned with x coordinates in left - right axis and y\ncoordinates in anterior-posterior axis, a streamline from the ...\n - corpus callosum will likely travel a very short distance in the y axis.\n - cingulum will likely travel a very short distance in the x axis.\n\nNote: we consider that x, y, z are the coordinates of the streamlines; we\ndo not verify if they are aligned with the brain's orientation.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_orientation.py", "help": "usage: scil_tractogram_filter_by_orientation.py [-h] [--min_x MIN_X]\n [--max_x MAX_X]\n [--min_y MIN_Y]\n [--max_y MAX_Y]\n [--min_z MIN_Z]\n [--max_z MAX_Z] [--use_abs]\n [--no_empty]\n [--display_counts]\n [--save_rejected filename]\n [--indent INDENT]\n [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_tractogram out_tractogram\n\nScript to filter streamlines based on their distance traveled in a specific\ndimension (x, y, or z).\n\nUseful to help differentiate bundles.\n\nExamples: In a brain aligned with x coordinates in left - right axis and y\ncoordinates in anterior-posterior axis, a streamline from the ...\n - corpus callosum will likely travel a very short distance in the y axis.\n - cingulum will likely travel a very short distance in the x axis.\n\nNote: we consider that x, y, z are the coordinates of the streamlines; we\ndo not verify if they are aligned with the brain's orientation.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_orientation.py\n\npositional arguments:\n in_tractogram Streamlines input file name.\n out_tractogram Streamlines output file name.\n\noptions:\n -h, --help show this help message and exit\n --min_x MIN_X Minimum distance in the first dimension, in mm.[0.0]\n --max_x MAX_X Maximum distance in the first dimension, in mm.[inf]\n --min_y MIN_Y Minimum distance in the second dimension, in mm.[0.0]\n --max_y MAX_Y Maximum distance in the second dimension, in mm.[inf]\n --min_z MIN_Z Minimum distance in the third dimension, in mm.[0.0]\n --max_z MAX_Z Maximum distance in the third dimension, in mm.[inf]\n --use_abs If set, will use the total of distances in absolute value (ex, coming back on yourself will contribute to the total distance instead of cancelling it).\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering.\n --save_rejected filename\n Save the SFT of rejected streamlines.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "considered", + "potential", + "result", + "likely" + ], + [ + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundles" + ], + [ + "orientation", + "orientation" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "considered", + "are" + ], + [ + "clear", + "considered", + "highly", + "long", + "matter", + "true", + "work", + "very" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "proposed", + "rejected" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "left", + "left" + ], + [ + "specific", + "specific" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "long", + "short" + ], + [ + "unique", + "variety", + "examples" + ], + [ + "held", + "on" + ], + [ + "tool", + "useful" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "future", + "will" + ], + [ + "left", + "back" + ], + [ + "anterior", + "dorsal", + "lateral", + "medial", + "posterior", + "ventral", + "posterior" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "order", + "work", + "instead" + ], + [ + "left", + "after" + ], + [ + "left", + "right" + ], + [ + "cingulum", + "cingulum" + ], + [ + "streamlines", + "streamlines" + ], + [ + "left", + "before" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "callosum", + "callosum" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "anterior", + "dorsal", + "medial", + "posterior", + "ventral", + "anterior" + ], + [ + "streamline", + "streamline" + ], + [ + "clear", + "long", + "question", + "result", + "work", + "there" + ], + [ + "corpus", + "corpus" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "based", + "based" + ], + [ + "increase", + "total", + "total" + ], + [ + "considered", + "possibility", + "question", + "step", + "consider" + ] + ], "keywords": [] }, { "name": "scil_tractogram_filter_by_roi", "docstring": "Filtering of a tractogram based on any combination of conditions involving a\nROI (ex: keep streamlines whose endoints are inside the ROI, exclude\nstreamlines not entirely included in a ROI, etc.)\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Can reject streamlines with endpoints in the WM or the CSF based on\n labels)\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n\nCondition\n---------\nFor every type of ROI, two or three values are expected: MODE CRITERIA DISTANCE\n(DISTANCE is always optional)\n- MODE must be one of these values: ['any', 'all', 'either_end', 'both_ends']\n - any: any part of the streamline must be in the mask\n - all: all parts of the streamline must be in the mask.\n - either_end: at least one end of the streamline must be in the mask.\n - both_ends: both ends of the streamline must be in the mask.\n- CRITERIA must be one of these values: ['include', 'exclude']\n - Include: If condition from MODE is respected, streamline is included.\n - Exlucde: If condition from MODE is respected, streamline is excluded.\n- DISTANCE must be an int and is optional.\n\n\nType of ROI\n-----------\n- Drawn ROI: Directly loaded from a binary file.\n- Atlas ROI: Selected label from an atlas.\n - ID is one or multiple integer values in the atlas. If multiple values,\n ID needs to be between quotes.\n Example: \"1:6 9 10:15\" will use values between 1 and 6 and values\n between 10 and 15 included as well as value 9.\n- BDO: The ROI is the interior of a bounding box.\n- Planes: The ROI is the equivalent of a one-voxel plane.\n * Using mode 'all' with x/y/z plane works but makes very little sense.\n\nNote: `--drawn_roi MASK.nii.gz all include` is equivalent to\n `--drawn_roi INVERSE_MASK.nii.gz any exclude`\n\nFor example, this allows to find out all streamlines entirely in the WM in one\ncommand (without manually inverting the mask first) or to remove any streamline\nstaying in the GM without getting out.\n\nSupports multiple filtering conditions\n--------------------------------------\nMultiple filtering conditions can be used, with varied ROI types if necessary.\nCombining two conditions is equivalent to a logical AND between the conditions.\nOrder of application does not matter for the final result, but may change the\nintermediate files, if any.\n\nDistance management\n-------------------\nDISTANCE is optional, and it should be used carefully with large voxel size\n(e.g > 2.5mm). The value is in voxel for ROIs and in mm for bounding boxes.\nAnisotropic data will affect each direction differently.\n When using --overwrite_distance, any filtering option with given criteria\nwill have its DISTANCE value replaced.\n\nFormerly: scil_filter_tractogram.py", "help": "usage: scil_tractogram_filter_by_roi.py [-h]\n [--drawn_roi DRAWN_ROI [DRAWN_ROI ...]]\n [--atlas_roi ATLAS_ROI [ATLAS_ROI ...]]\n [--bdo BDO [BDO ...]]\n [--x_plane X_PLANE [X_PLANE ...]]\n [--y_plane Y_PLANE [Y_PLANE ...]]\n [--z_plane Z_PLANE [Z_PLANE ...]]\n [--filtering_list FILTERING_LIST]\n [--overwrite_distance OVERWRITE_DISTANCE [OVERWRITE_DISTANCE ...]]\n [--save_masks_atlas_roi SAVE_MASKS_ATLAS_ROI]\n [--no_empty] [--display_counts]\n [--save_rejected FILENAME]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nFiltering of a tractogram based on any combination of conditions involving a\nROI (ex: keep streamlines whose endoints are inside the ROI, exclude\nstreamlines not entirely included in a ROI, etc.)\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Can reject streamlines with endpoints in the WM or the CSF based on\n labels)\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n\nCondition\n---------\nFor every type of ROI, two or three values are expected: MODE CRITERIA DISTANCE\n(DISTANCE is always optional)\n- MODE must be one of these values: ['any', 'all', 'either_end', 'both_ends']\n - any: any part of the streamline must be in the mask\n - all: all parts of the streamline must be in the mask.\n - either_end: at least one end of the streamline must be in the mask.\n - both_ends: both ends of the streamline must be in the mask.\n- CRITERIA must be one of these values: ['include', 'exclude']\n - Include: If condition from MODE is respected, streamline is included.\n - Exlucde: If condition from MODE is respected, streamline is excluded.\n- DISTANCE must be an int and is optional.\n\nType of ROI\n-----------\n- Drawn ROI: Directly loaded from a binary file.\n- Atlas ROI: Selected label from an atlas.\n - ID is one or multiple integer values in the atlas. If multiple values,\n ID needs to be between quotes.\n Example: \"1:6 9 10:15\" will use values between 1 and 6 and values\n between 10 and 15 included as well as value 9.\n- BDO: The ROI is the interior of a bounding box.\n- Planes: The ROI is the equivalent of a one-voxel plane.\n * Using mode 'all' with x/y/z plane works but makes very little sense.\n\nNote: `--drawn_roi MASK.nii.gz all include` is equivalent to\n `--drawn_roi INVERSE_MASK.nii.gz any exclude`\n\nFor example, this allows to find out all streamlines entirely in the WM in one\ncommand (without manually inverting the mask first) or to remove any streamline\nstaying in the GM without getting out.\n\nSupports multiple filtering conditions\n--------------------------------------\nMultiple filtering conditions can be used, with varied ROI types if necessary.\nCombining two conditions is equivalent to a logical AND between the conditions.\nOrder of application does not matter for the final result, but may change the\nintermediate files, if any.\n\nDistance management\n-------------------\nDISTANCE is optional, and it should be used carefully with large voxel size\n(e.g > 2.5mm). The value is in voxel for ROIs and in mm for bounding boxes.\nAnisotropic data will affect each direction differently.\n When using --overwrite_distance, any filtering option with given criteria\nwill have its DISTANCE value replaced.\n\nFormerly: scil_filter_tractogram.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n out_tractogram Path of the output tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --drawn_roi DRAWN_ROI [DRAWN_ROI ...]\n ROI_NAME MODE CRITERIA DISTANCE (distance in voxel is optional)\n Filename of a hand drawn ROI (.nii or .nii.gz).\n --atlas_roi ATLAS_ROI [ATLAS_ROI ...]\n ATLAS_NAME ID MODE CRITERIA DISTANCE (distance in voxel is optional)\n Filename of an atlas (.nii or .nii.gz).\n --bdo BDO [BDO ...] BDO_NAME MODE CRITERIA DISTANCE (distance in mm is optional)\n Filename of a bounding box (bdo) file from MI-Brain.\n --x_plane X_PLANE [X_PLANE ...]\n PLANE MODE CRITERIA DISTANCE (distance in voxel is optional)\n Slice number in X, in voxel space.\n --y_plane Y_PLANE [Y_PLANE ...]\n PLANE MODE CRITERIA DISTANCE (distance in voxel is optional)\n Slice number in Y, in voxel space.\n --z_plane Z_PLANE [Z_PLANE ...]\n PLANE MODE CRITERIA DISTANCE (distance in voxel is optional)\n Slice number in Z, in voxel space.\n --filtering_list FILTERING_LIST\n Text file containing one rule per line\n (i.e. drawn_roi mask.nii.gz both_ends include 1).\n --overwrite_distance OVERWRITE_DISTANCE [OVERWRITE_DISTANCE ...]\n MODE CRITERIA DISTANCE (distance in voxel for ROIs and in mm for bounding box).\n If set, it will overwrite the distance associated to a specific mode/criteria.\n --save_masks_atlas_roi SAVE_MASKS_ATLAS_ROI\n If set, will save the atlas roi masks. The value to provide is the \n prefix, ex: my_path/atlas_roi_. Whole filename will be \n my_path/atlas_roi_{id}.nii.gz\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering\n --save_rejected FILENAME\n Save rejected streamlines to output tractogram.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "long", + "end" + ], + [ + "total", + "number" + ], + [ + "planes", + "planes" + ], + [ + "true", + "always" + ], + [ + "anatomy", + "anatomy" + ], + [ + "experience", + "perception", + "thinking", + "true", + "sense" + ], + [ + "average", + "per" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "form", + "combination" + ], + [ + "considered", + "are" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "clear", + "considered", + "highly", + "long", + "matter", + "true", + "work", + "very" + ], + [ + "order", + "order" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "proposed", + "rejected" + ], + [ + "conditions", + "conditions" + ], + [ + "left", + "from" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "lack", + "loss", + "result", + "result" + ], + [ + "involved", + "involving" + ], + [ + "methods", + "use" + ], + [ + "work", + "find" + ], + [ + "direction", + "direction" + ], + [ + "order", + "set" + ], + [ + "long", + "have" + ], + [ + "specific", + "specific" + ], + [ + "long", + "a" + ], + [ + "clear", + "left", + "out" + ], + [ + "voxel", + "voxel" + ], + [ + "action", + "clear", + "future", + "result", + "step", + "change" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "space", + "space" + ], + [ + "held", + "in" + ], + [ + "variety", + "include" + ], + [ + "action", + "clear", + "considered", + "future", + "matter", + "possibility", + "potential", + "question", + "result", + "specific", + "any" + ], + [ + "large", + "larger", + "large" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "atlas", + "atlas" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "supported", + "supports" + ], + [ + "application", + "allows" + ], + [ + "create", + "provide" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "left", + "after" + ], + [ + "increase", + "expected" + ], + [ + "streamlines", + "streamlines" + ], + [ + "application", + "application" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "work", + "works" + ], + [ + "left", + "before" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "result", + "may" + ], + [ + "order", + "rule" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "planes", + "plane" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "larger", + "size", + "size" + ], + [ + "matter", + "question", + "does" + ], + [ + "streamline", + "streamline" + ], + [ + "long", + "two" + ], + [ + "clear", + "long", + "question", + "result", + "work", + "there" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "area", + "main", + "work", + "part" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "based", + "based" + ], + [ + "areas", + "parts" + ], + [ + "forms", + "specific", + "variety", + "types" + ], + [ + "long", + "little" + ], + [ + "considered", + "experience", + "large", + "long", + "result", + "variety", + "work", + "working", + "well" + ], + [ + "thinking", + "getting" + ], + [ + "assigned", + "command" + ], + [ + "order", + "necessary" + ] + ], "keywords": [] }, { "name": "scil_tractogram_fix_trk", "docstring": "This script is made to fix DSI-Studio or Startrack TRK file\n(unknown space/convention) to make it compatible with TrackVis,\nMI-Brain, Dipy Horizon (Stateful Tractogram).\n\nDSI-Studio\n==========\n\nThe script either make it match with an anatomy from DSI-Studio (AC-PC aligned,\nsometimes flipped) or if --in_native_fa is provided it moves it back to native\nDWI space (this involved registration).\n\nSince DSI-Studio sometimes leaves some skull around the brain, the --auto_crop\naims to stabilize registration. If this option fails, manually BET both FA.\nRegistration is more robust at resolution above 2mm (iso), be careful.\n\nIf you are fixing bundles, use this script once with --save_transfo and verify\nresults. Once satisfied, call the scripts on bundles using a bash for loop with\n--load_transfo to save computation.\n\nWe recommand the --cut_invalid to remove invalid points of streamlines rather\nremoving entire streamlines.\n\nThis script was tested on various datasets and worked on all of them. However,\nalways verify the results and if a specific case does not work. Open an issue\non the Scilpy GitHub repository.\n\nStartrack\n==========\n\nThe script will create a new stateful tractogram using the reference in\norder to fix the missing information in the header of the trk.\n\n\nWARNING: This script is still experimental, DSI-Studio and Startrack\nevolve quickly and results may vary depending on the data itself\nas well as DSI-studio/Startrack version.\n\nFormerly: scil_fix_dsi_studio_trk.py", "help": "usage: scil_tractogram_fix_trk.py [-h] [--software string]\n [--cut_invalid | --remove_invalid]\n [--in_dsi_fa IN_DSI_FA]\n [--in_native_fa IN_NATIVE_FA] [--auto_crop]\n [--save_transfo FILE | --load_transfo FILE]\n [--reference REFERENCE] [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script is made to fix DSI-Studio or Startrack TRK file\n(unknown space/convention) to make it compatible with TrackVis,\nMI-Brain, Dipy Horizon (Stateful Tractogram).\n\nDSI-Studio\n==========\n\nThe script either make it match with an anatomy from DSI-Studio (AC-PC aligned,\nsometimes flipped) or if --in_native_fa is provided it moves it back to native\nDWI space (this involved registration).\n\nSince DSI-Studio sometimes leaves some skull around the brain, the --auto_crop\naims to stabilize registration. If this option fails, manually BET both FA.\nRegistration is more robust at resolution above 2mm (iso), be careful.\n\nIf you are fixing bundles, use this script once with --save_transfo and verify\nresults. Once satisfied, call the scripts on bundles using a bash for loop with\n--load_transfo to save computation.\n\nWe recommand the --cut_invalid to remove invalid points of streamlines rather\nremoving entire streamlines.\n\nThis script was tested on various datasets and worked on all of them. However,\nalways verify the results and if a specific case does not work. Open an issue\non the Scilpy GitHub repository.\n\nStartrack\n==========\n\nThe script will create a new stateful tractogram using the reference in\norder to fix the missing information in the header of the trk.\n\nWARNING: This script is still experimental, DSI-Studio and Startrack\nevolve quickly and results may vary depending on the data itself\nas well as DSI-studio/Startrack version.\n\nFormerly: scil_fix_dsi_studio_trk.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file from DSI studio (.trk).\n out_tractogram Path of the output tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --software string Software used to create in_tractogram.\n Choices: ['dsi_studio', 'startrack']\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_invalid Remove the streamlines landing out of the bounding box.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nDSI options:\n --in_dsi_fa IN_DSI_FA\n Path of the input FA from DSI Studio (.nii.gz).\n --in_native_fa IN_NATIVE_FA\n Path of the input FA from Dipy/MRtrix (.nii.gz).\n Move the tractogram back to a \"proper\" space, include registration.\n --auto_crop If both FA are not already BET, perform registration \n using a centered-cube crop to ignore the skull.\n A good BET for both is more robust.\n --save_transfo FILE Save estimated transformation to avoid recomputing (.txt).\n --load_transfo FILE Load estimated transformation to apply to other files (.txt).\n\nStarTrack options:\n --reference REFERENCE\n Reference anatomy (.nii or .nii.gz).\n", - "synonyms": [], + "synonyms": [ + [ + "unknown", + "unknown" + ], + [ + "anatomy", + "anatomy" + ], + [ + "true", + "always" + ], + [ + "bundles", + "bundles" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "order", + "order" + ], + [ + "long", + "work", + "more" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "work", + "working", + "worked" + ], + [ + "work", + "working", + "work" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "invalid", + "valid", + "invalid" + ], + [ + "clear", + "considered", + "create", + "form", + "manner", + "matter", + "result", + "subject", + "thinking", + "true", + "view", + "work", + "rather" + ], + [ + "methods", + "use" + ], + [ + "thinking", + "you" + ], + [ + "specific", + "specific" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "total", + "estimated" + ], + [ + "long", + "a" + ], + [ + "application", + "systems", + "software" + ], + [ + "clear", + "left", + "out" + ], + [ + "held", + "on" + ], + [ + "action", + "clear", + "step", + "move" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "applied", + "apply" + ], + [ + "clear", + "create", + "future", + "step", + "work", + "make" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "possibility", + "avoid" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "specific", + "variety", + "various" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "step", + "moves" + ], + [ + "involved", + "working", + "involved" + ], + [ + "future", + "will" + ], + [ + "left", + "back" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "create" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "space", + "space" + ], + [ + "held", + "in" + ], + [ + "variety", + "include" + ], + [ + "long", + "with" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "experiment", + "experimental" + ], + [ + "clear", + "experience", + "thinking", + "true", + "good" + ], + [ + "methods", + "using" + ], + [ + "variety", + "work", + "other" + ], + [ + "create", + "aims" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "left", + "long", + "work", + "once" + ], + [ + "result", + "may" + ], + [ + "considered", + "subsequently", + "was" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "matter", + "question", + "does" + ], + [ + "result", + "results" + ], + [ + "level", + "above" + ], + [ + "matter", + "question", + "subject", + "issue" + ], + [ + "clear", + "work", + "made" + ], + [ + "clear", + "long", + "work", + "still" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "matter", + "question", + "case" + ], + [ + "considered", + "experience", + "large", + "long", + "result", + "variety", + "work", + "working", + "well" + ], + [ + "result", + "since" + ], + [ + "clear", + "considered", + "result", + "however" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "large", + "work", + "some" + ] + ], "keywords": [] }, { "name": "scil_tractogram_flip", "docstring": "Flip streamlines locally around specific axes.\n\nIMPORTANT: this script should only be used in case of absolute necessity.\nIt's better to fix the real tools than to force flipping streamlines to\nhave them fit in the tools.\n\nFormerly: scil_flip_streamlines.py", "help": "usage: scil_tractogram_flip.py [-h] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram {x,y,z}\n [{x,y,z} ...]\n\nFlip streamlines locally around specific axes.\n\nIMPORTANT: this script should only be used in case of absolute necessity.\nIt's better to fix the real tools than to force flipping streamlines to\nhave them fit in the tools.\n\nFormerly: scil_flip_streamlines.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n out_tractogram Path of the output tractogram file.\n {x,y,z} The axes you want to flip. eg: to flip the x and y axes use: x y.\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "long", + "have" + ], + [ + "specific", + "specific" + ], + [ + "thinking", + "you" + ], + [ + "experience", + "thinking", + "work", + "working", + "better" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "methods", + "using" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "methods", + "tool", + "tools" + ], + [ + "matter", + "question", + "case" + ], + [ + "considered", + "key", + "work", + "important" + ] + ], "keywords": [] }, { "name": "scil_tractogram_math", "docstring": "Performs an operation on a list of streamline files. The supported\noperations are:\n\ndifference: Keep the streamlines from the first file that are not in\n any of the following files.\n\nintersection: Keep the streamlines that are present in all files.\n\nunion: Keep all streamlines while removing duplicates.\n\nconcatenate: Keep all streamlines with duplicates.\n\nlazy_concatenate: Keep all streamlines with duplicates, never load the whole\n tractograms in memory. Only works with trk/tck file,\n metadata will be lost and invalid streamlines are kept.\n\nIf a file 'duplicate.trk' have identical streamlines, calling the script using\nthe difference/intersection/union with a single input will remove these\nduplicated streamlines.\n\nTo allow a soft match, use the --precision option to increase the allowed\nthreshold for similarity. A precision of 1 represents 10**(-1), so a\nmaximum distance of 0.1mm is allowed. If the streamlines are identical, the\ndefault value of 3 (or 0.001mm distance) should work.\n\nIf there is a 0.5mm shift, use a precision of 0 (or 1mm distance) and the\n--robust option. Should make it work, but slightly slower. Will merge all\nstreamlines similar when rounded to that precision level.\n\nThe metadata (data per point, data per streamline) of the streamlines that\nare kept in the output will be preserved. This requires that all input files\nshare the same type of metadata. If this is not the case, use the option\n--no_metadata to strip the metadata from the output. Or --fake_metadata to\ninitialize dummy metadata in the file missing them.\n\nFormerly: scil_streamlines_math.py", "help": "usage: scil_tractogram_math.py [-h] [--precision NBR_OF_DECIMALS] [--robust]\n [--no_metadata] [--fake_metadata]\n [--save_indices OUT_INDEX_FILE] [--save_empty]\n [--no_bbox_check] [--indent INDENT]\n [--sort_keys] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n OPERATION INPUT_FILES [INPUT_FILES ...]\n OUTPUT_FILE\n\nPerforms an operation on a list of streamline files. The supported\noperations are:\n\ndifference: Keep the streamlines from the first file that are not in\n any of the following files.\n\nintersection: Keep the streamlines that are present in all files.\n\nunion: Keep all streamlines while removing duplicates.\n\nconcatenate: Keep all streamlines with duplicates.\n\nlazy_concatenate: Keep all streamlines with duplicates, never load the whole\n tractograms in memory. Only works with trk/tck file,\n metadata will be lost and invalid streamlines are kept.\n\nIf a file 'duplicate.trk' have identical streamlines, calling the script using\nthe difference/intersection/union with a single input will remove these\nduplicated streamlines.\n\nTo allow a soft match, use the --precision option to increase the allowed\nthreshold for similarity. A precision of 1 represents 10**(-1), so a\nmaximum distance of 0.1mm is allowed. If the streamlines are identical, the\ndefault value of 3 (or 0.001mm distance) should work.\n\nIf there is a 0.5mm shift, use a precision of 0 (or 1mm distance) and the\n--robust option. Should make it work, but slightly slower. Will merge all\nstreamlines similar when rounded to that precision level.\n\nThe metadata (data per point, data per streamline) of the streamlines that\nare kept in the output will be preserved. This requires that all input files\nshare the same type of metadata. If this is not the case, use the option\n--no_metadata to strip the metadata from the output. Or --fake_metadata to\ninitialize dummy metadata in the file missing them.\n\nFormerly: scil_streamlines_math.py\n\npositional arguments:\n OPERATION The type of operation to be performed on the streamlines. Must\n be one of the following: difference, intersection, union, concatenate, lazy_concatenate.\n INPUT_FILES The list of files that contain the streamlines to operate on.\n OUTPUT_FILE The file where the remaining streamlines are saved.\n\noptions:\n -h, --help show this help message and exit\n --precision NBR_OF_DECIMALS, -p NBR_OF_DECIMALS\n Precision used to compare streamlines [4].\n --robust, -r Use version robust to small translation/rotation.\n --no_metadata, -n Strip the streamline metadata from the output.\n --fake_metadata Skip the metadata verification, create fake metadata if missing, can lead to unexpected behavior.\n --save_indices OUT_INDEX_FILE, -s OUT_INDEX_FILE\n Save the streamline indices to the supplied json file.\n --save_empty If set, we will save all results, even if tractogram if empty.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "memory", + "memory" + ], + [ + "average", + "per" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "large", + "larger", + "small" + ], + [ + "work", + "and" + ], + [ + "precision", + "precision" + ], + [ + "result", + "following" + ], + [ + "considered", + "are" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "difference", + "point" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "work", + "working", + "work" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "invalid", + "valid", + "invalid" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "long", + "have" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "long", + "matter", + "question", + "result", + "work", + "even" + ], + [ + "clear", + "create", + "future", + "step", + "work", + "make" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "future", + "will" + ], + [ + "loss", + "lost" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "create" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "action", + "clear", + "considered", + "future", + "matter", + "possibility", + "potential", + "question", + "result", + "specific", + "any" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "clear", + "long", + "matter", + "result", + "work", + "so" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "difference", + "difference" + ], + [ + "represent", + "represents" + ], + [ + "methods", + "using" + ], + [ + "contrast", + "form", + "forms", + "larger", + "result", + "specific", + "variety", + "similar" + ], + [ + "streamlines", + "streamlines" + ], + [ + "work", + "works" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "supported", + "supported" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "area", + "work", + "where" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "streamline", + "streamline" + ], + [ + "result", + "results" + ], + [ + "order", + "allowed" + ], + [ + "order", + "allow" + ], + [ + "similarity", + "similarity" + ], + [ + "clear", + "long", + "question", + "result", + "work", + "there" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "matter", + "question", + "case" + ], + [ + "higher", + "increase", + "total", + "increase" + ] + ], "keywords": [] }, { "name": "scil_tractogram_pairwise_comparison", "docstring": "This script is designed to compare and help visualize differences between two\ntractograms. This can be especially useful in studies where multiple\ntractograms from different algorithms or parameters need to be compared.\n\nA similar script (scil_bundle_pairwise_comparison.py) is available for bundles,\nwith metrics more adapted to bundles (and spatial agreement).\n\nThe difference is computed in terms of\n- A voxel-wise spatial distance between streamlines crossing each voxel.\n This can help to see if both tractography reconstructions at each voxel\n look similar (out_diff.nii.gz)\n- An angular correlation (ACC) between streamline orientation from TODI.\n This compares the local orientation of streamlines at each voxel\n (out_acc.nii.gz)\n- A patch-wise correlation between streamline density maps from both\n tractograms. This compares where the high/low density regions agree or not\n (out_corr.nii.gz)\n- A heatmap combining all the previous metrics using a harmonic means of the\n normalized metrics to summarize general agreement (out_heatmap.nii.gz)", "help": "usage: scil_tractogram_pairwise_comparison.py [-h] [--out_dir OUT_DIR]\n [--out_prefix OUT_PREFIX]\n [--in_mask IN_FILE]\n [--skip_streamlines_distance]\n [--processes NBR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram_1 in_tractogram_2\n\nThis script is designed to compare and help visualize differences between two\ntractograms. This can be especially useful in studies where multiple\ntractograms from different algorithms or parameters need to be compared.\n\nA similar script (scil_bundle_pairwise_comparison.py) is available for bundles,\nwith metrics more adapted to bundles (and spatial agreement).\n\nThe difference is computed in terms of\n- A voxel-wise spatial distance between streamlines crossing each voxel.\n This can help to see if both tractography reconstructions at each voxel\n look similar (out_diff.nii.gz)\n- An angular correlation (ACC) between streamline orientation from TODI.\n This compares the local orientation of streamlines at each voxel\n (out_acc.nii.gz)\n- A patch-wise correlation between streamline density maps from both\n tractograms. This compares where the high/low density regions agree or not\n (out_corr.nii.gz)\n- A heatmap combining all the previous metrics using a harmonic means of the\n normalized metrics to summarize general agreement (out_heatmap.nii.gz)\n\npositional arguments:\n in_tractogram_1 Input tractogram 1.\n in_tractogram_2 Input tractogram 2.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Directory where all output files will be saved.\n If not specified, outputs will be saved in the current directory.\n --out_prefix OUT_PREFIX\n Prefix for output files. Useful for distinguishing between different runs [out].\n --in_mask IN_FILE Optional input mask.\n --skip_streamlines_distance\n Skip computation of the spatial distance between streamlines. Slowest part of the computation.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "individual", + "specific", + "unique", + "variety", + "different" + ], + [ + "total", + "number" + ], + [ + "algorithm", + "algorithms" + ], + [ + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundles" + ], + [ + "shape", + "view", + "look" + ], + [ + "orientation", + "orientation" + ], + [ + "work", + "and" + ], + [ + "general", + "general" + ], + [ + "long", + "work", + "more" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "areas", + "region", + "regions", + "regions" + ], + [ + "long", + "a" + ], + [ + "clear", + "left", + "out" + ], + [ + "voxel", + "voxel" + ], + [ + "tool", + "useful" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "areas", + "considered", + "highly", + "especially" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "long", + "with" + ], + [ + "parameters", + "specified" + ], + [ + "clinical", + "literature", + "scientific", + "studies", + "study", + "studies" + ], + [ + "differences", + "differences" + ], + [ + "subject", + "terms" + ], + [ + "difference", + "difference" + ], + [ + "methods", + "using" + ], + [ + "contrast", + "form", + "forms", + "larger", + "result", + "specific", + "variety", + "similar" + ], + [ + "view", + "see" + ], + [ + "future", + "current" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "streamlines", + "streamlines" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "area", + "work", + "where" + ], + [ + "individual", + "each" + ], + [ + "high", + "higher", + "level", + "low" + ], + [ + "streamline", + "streamline" + ], + [ + "create", + "lack", + "step", + "work", + "working", + "need" + ], + [ + "average", + "compared" + ], + [ + "long", + "two" + ], + [ + "step", + "start" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "area", + "main", + "work", + "part" + ], + [ + "maps", + "maps" + ], + [ + "spatial", + "temporal", + "spatial" + ], + [ + "meaning", + "order", + "result", + "step", + "true", + "means" + ] + ], "keywords": [] }, { "name": "scil_tractogram_print_info", "docstring": "Prints information on a loaded tractogram: number of streamlines, and\nmean / min / max / std of\n - length in number of points\n - length in mm\n - step size.\n\nFor trk files: also prints the data_per_point and data_per_streamline keys.\n\nSee also:\n - scil_header_print_info.py to see the header, affine, volume dimension.\n - scil_bundle_shape_measures.py to see bundle-specific information.", "help": "usage: scil_tractogram_print_info.py [-h] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [--indent INDENT] [--sort_keys]\n in_tractogram\n\nPrints information on a loaded tractogram: number of streamlines, and\nmean / min / max / std of\n - length in number of points\n - length in mm\n - step size.\n\nFor trk files: also prints the data_per_point and data_per_streamline keys.\n\nSee also:\n - scil_header_print_info.py to see the header, affine, volume dimension.\n - scil_bundle_shape_measures.py to see bundle-specific information.\n\npositional arguments:\n in_tractogram Tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "specific", + "specific" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "action", + "process", + "step", + "step" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "larger", + "size", + "size" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "bundles", + "bundle" + ] + ], "keywords": [] }, { "name": "scil_tractogram_project_map_to_streamlines", "docstring": "Projects maps extracted from a map onto the points of streamlines.\n\nThe default options will take data from a nifti image (3D or 4D) and project it\nonto the points of streamlines. If the image is 4D, the data is stored as a\nlist of 1D arrays per streamline. If the image is 3D, the data is stored as a\nlist of values per streamline.\n\nSee also scil_tractogram_project_streamlines_to_map.py for the reverse action.\n\n* Note that the data from your maps will be projected only on the coordinates\nof the points of your streamlines. Data underlying the whole segments between\ntwo consecutive points is not used. If your streamlines are strongly\ncompressed, or if they have a very big step size, the result will possibly\nreflect poorly your map. You may use scil_tractogram_resample.py to upsample\nyour streamlines first.\n\n* Hint: The streamlines themselves are not modified here, only their dpp. To\navoid multiplying data on disk, you could use the following arguments to save\nthe new dpp in your current tractogram:\n>> scil_tractogram_project_map_to_streamlines.py $in_bundle $in_bundle\n --keep_all_dpp -f", "help": "usage: scil_tractogram_project_map_to_streamlines.py [-h] --in_maps IN_MAPS\n [IN_MAPS ...]\n --out_dpp_name\n OUT_DPP_NAME\n [OUT_DPP_NAME ...]\n [--trilinear]\n [--endpoints_only]\n [--keep_all_dpp]\n [--overwrite_dpp]\n [--reference REFERENCE]\n [-f]\n [-v [{DEBUG,INFO,WARNING}]]\n in_tractogram\n out_tractogram\n\nProjects maps extracted from a map onto the points of streamlines.\n\nThe default options will take data from a nifti image (3D or 4D) and project it\nonto the points of streamlines. If the image is 4D, the data is stored as a\nlist of 1D arrays per streamline. If the image is 3D, the data is stored as a\nlist of values per streamline.\n\nSee also scil_tractogram_project_streamlines_to_map.py for the reverse action.\n\n* Note that the data from your maps will be projected only on the coordinates\nof the points of your streamlines. Data underlying the whole segments between\ntwo consecutive points is not used. If your streamlines are strongly\ncompressed, or if they have a very big step size, the result will possibly\nreflect poorly your map. You may use scil_tractogram_resample.py to upsample\nyour streamlines first.\n\n* Hint: The streamlines themselves are not modified here, only their dpp. To\navoid multiplying data on disk, you could use the following arguments to save\nthe new dpp in your current tractogram:\n>> scil_tractogram_project_map_to_streamlines.py $in_bundle $in_bundle\n --keep_all_dpp -f\n\npositional arguments:\n in_tractogram Fiber bundle file.\n out_tractogram Output file.\n\noptions:\n -h, --help show this help message and exit\n --in_maps IN_MAPS [IN_MAPS ...]\n Nifti map to project onto streamlines.\n --out_dpp_name OUT_DPP_NAME [OUT_DPP_NAME ...]\n Name of the data_per_point to be saved in the \n output tractogram.\n --trilinear If set, will use trilinear interpolation \n else will use nearest neighbor interpolation \n by default.\n --endpoints_only If set, will only project the map onto the \n endpoints of the streamlines (all other values along \n streamlines will be NaN). If not set, will project \n the map onto all points of the streamlines.\n --keep_all_dpp If set, previous data_per_point will be preserved \n in the output tractogram. Else, only --out_dpp_name \n keys will be saved.\n --overwrite_dpp If set, if --keep_all_dpp is set and some \n --out_dpp_name keys already existed in your \n data_per_point, allow overwriting old data_per_point.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -f Force overwriting of the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "average", + "per" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "result", + "following" + ], + [ + "considered", + "are" + ], + [ + "clear", + "considered", + "highly", + "long", + "matter", + "true", + "work", + "very" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "lack", + "loss", + "result", + "result" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "long", + "have" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "possibility", + "avoid" + ], + [ + "supported", + "support" + ], + [ + "future", + "will" + ], + [ + "project", + "project" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "project", + "projects" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "clear", + "order", + "step", + "work", + "take" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "action", + "process", + "step", + "step" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "variety", + "work", + "other" + ], + [ + "future", + "current" + ], + [ + "area", + "main", + "along" + ], + [ + "streamlines", + "streamlines" + ], + [ + "matter", + "thinking", + "else" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "result", + "may" + ], + [ + "considered", + "possibility", + "result", + "possibly" + ], + [ + "large", + "big" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "larger", + "size", + "size" + ], + [ + "streamline", + "streamline" + ], + [ + "order", + "allow" + ], + [ + "long", + "two" + ], + [ + "action", + "step", + "action" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "maps", + "maps" + ], + [ + "exist", + "existed" + ], + [ + "maps", + "map" + ], + [ + "fundamental", + "underlying" + ], + [ + "supported", + "strongly" + ], + [ + "bundles", + "bundle" + ], + [ + "clear", + "result", + "work", + "could" + ], + [ + "large", + "work", + "some" + ] + ], "keywords": [] }, { "name": "scil_tractogram_project_streamlines_to_map", "docstring": "Projects metrics onto the underlying voxels of a streamlines. This script can\nproject data from data_per_point (dpp) or data_per_streamline (dps) to maps.\n\nYou choose to project data from all points of the streamlines, or from the\nendpoints only. The idea then is to visualize the cortical areas affected by\nmetrics (assuming streamlines start/end in the cortex).\n\nSee also scil_tractogram_project_map_to_streamlines.py for the reverse action.\n\nHow to the data is loaded:\n - From dps: uses the same value for each point of the streamline.\n - From dpp: one value per point.\n\nHow the data is used:\n 1. Average all points of the streamline to get a mean value, set this value\n to all points.\n 2. Average the two endpoints and get their mean value, set this value to\n all points.\n 3. Keep each point individually.\n\nHow the data is projected to a map:\n A. Using each point.\n B. Using the endpoints only.\n\nFor more complex operations than the average per streamline, see\nscil_tractogram_dpp_math.py.", "help": "usage: scil_tractogram_project_streamlines_to_map.py [-h]\n (--use_dps key [key ...] | --use_dpp key [key ...] | --load_dps file [file ...] | --load_dpp file [file ...])\n (--mean_endpoints | --mean_streamline | --point_by_point)\n (--to_endpoints | --to_wm)\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bundle out_prefix\n\nProjects metrics onto the underlying voxels of a streamlines. This script can\nproject data from data_per_point (dpp) or data_per_streamline (dps) to maps.\n\nYou choose to project data from all points of the streamlines, or from the\nendpoints only. The idea then is to visualize the cortical areas affected by\nmetrics (assuming streamlines start/end in the cortex).\n\nSee also scil_tractogram_project_map_to_streamlines.py for the reverse action.\n\nHow to the data is loaded:\n - From dps: uses the same value for each point of the streamline.\n - From dpp: one value per point.\n\nHow the data is used:\n 1. Average all points of the streamline to get a mean value, set this value\n to all points.\n 2. Average the two endpoints and get their mean value, set this value to\n all points.\n 3. Keep each point individually.\n\nHow the data is projected to a map:\n A. Using each point.\n B. Using the endpoints only.\n\nFor more complex operations than the average per streamline, see\nscil_tractogram_dpp_math.py.\n\npositional arguments:\n in_bundle Fiber bundle file.\n out_prefix Folder + prefix to save endpoints metric(s). We will save \n one nifti file per per dpp/dps key given.\n Ex: my_path/subjX_bundleY_ with --use_dpp key1 will output \n my_path/subjX_bundleY_key1.nii.gz\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nWhere to get the statistics from. (Choose one):\n --use_dps key [key ...]\n Use the data_per_streamline from the tractogram.\n It must be a .trk\n --use_dpp key [key ...]\n Use the data_per_point from the tractogram. \n It must be a trk.\n --load_dps file [file ...]\n Load data per streamline (scalar) .txt or .npy.\n Must load an array with the right shape.\n --load_dpp file [file ...]\n Load data per point (scalar) from .txt or .npy.\n Must load an array with the right shape.\n\nProcessing choices. (Choose one):\n --mean_endpoints Uses one single value per streamline: the mean of the two \n endpoints.\n --mean_streamline Use one single value per streamline: the mean of all \n points of the streamline.\n --point_by_point Directly project the streamlines values onto the map.\n\nWhere to send the statistics. (Choose one):\n --to_endpoints Project metrics onto a mask of the endpoints.\n --to_wm Project metrics into streamlines coverage.\n", - "synonyms": [], + "synonyms": [ + [ + "long", + "end" + ], + [ + "anatomy", + "anatomy" + ], + [ + "average", + "per" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "left", + "into" + ], + [ + "long", + "work", + "more" + ], + [ + "difference", + "point" + ], + [ + "fibre", + "fiber" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "cortex", + "thalamus", + "cortex" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "area", + "areas", + "region", + "regions", + "areas" + ], + [ + "thinking", + "you" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "processing", + "processing" + ], + [ + "long", + "a" + ], + [ + "key", + "main", + "key" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "project", + "project" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "project", + "projects" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "matter", + "question", + "thinking", + "true", + "work", + "working", + "how" + ], + [ + "long", + "with" + ], + [ + "average", + "average" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "cortical", + "cortical" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "left", + "right" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "possibility", + "question", + "thinking", + "true", + "view", + "idea" + ], + [ + "area", + "work", + "where" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "streamline", + "streamline" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "long", + "two" + ], + [ + "complex", + "structure", + "structures", + "complex" + ], + [ + "step", + "start" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "action", + "step", + "action" + ], + [ + "work", + "all" + ], + [ + "maps", + "maps" + ], + [ + "maps", + "map" + ], + [ + "fundamental", + "underlying" + ], + [ + "bundles", + "bundle" + ], + [ + "shape", + "structure", + "shape" + ] + ], "keywords": [] }, { "name": "scil_tractogram_qbx", "docstring": "Compute clusters using QuickBundlesX and save them separately.\nWe cannot know the number of clusters in advance.\n\nQuickbundles:\nGaryfallidis, E. et al. (2012). Quickbundles, a method for tractography\nsimplification. Frontiers in neuroscience, 6, 175.\n\nQuickbundlesX:\nGaryfallidis, E. et al. (2016) QuickBundlesX: sequential clustering of millions\nof streamlines in multiple levels of detail at record execution time. 24th\nInternational Society of Magnetic Resonance in Medicine (ISMRM).\n\n\"QuickBundlesX shows a remarkable 20+X speedup over its predecessor\"\n\nFormerly: scil_compute_qbx.py", "help": "usage: scil_tractogram_qbx.py [-h] [--nb_points NB_POINTS]\n [--out_centroids OUT_CENTROIDS]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram dist_thresh out_clusters_dir\n\nCompute clusters using QuickBundlesX and save them separately.\nWe cannot know the number of clusters in advance.\n\nQuickbundles:\nGaryfallidis, E. et al. (2012). Quickbundles, a method for tractography\nsimplification. Frontiers in neuroscience, 6, 175.\n\nQuickbundlesX:\nGaryfallidis, E. et al. (2016) QuickBundlesX: sequential clustering of millions\nof streamlines in multiple levels of detail at record execution time. 24th\nInternational Society of Magnetic Resonance in Medicine (ISMRM).\n\n\"QuickBundlesX shows a remarkable 20+X speedup over its predecessor\"\n\nFormerly: scil_compute_qbx.py\n\npositional arguments:\n in_tractogram Tractogram filename.\n Path of the input tractogram or bundle.\n dist_thresh Last QuickBundlesX threshold in mm. Typically \n the value are between 10-20mm.\n out_clusters_dir Path where to save the clusters directory.\n\noptions:\n -h, --help show this help message and exit\n --nb_points NB_POINTS\n Streamlines will be resampled to have this number of points [20].\n --out_centroids OUT_CENTROIDS\n Output tractogram filename.\n Format must be readable by the Nibabel API.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "methods", + "method" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "matter", + "question", + "thinking", + "true", + "know" + ], + [ + "left", + "result", + "when" + ], + [ + "long", + "over" + ], + [ + "long", + "have" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "last" + ], + [ + "held", + "in" + ], + [ + "long", + "result", + "work", + "working", + "time" + ], + [ + "methods", + "using" + ], + [ + "higher", + "increase", + "level", + "levels" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "area", + "work", + "where" + ], + [ + "global", + "international" + ], + [ + "bundles", + "bundle" + ], + [ + "exist", + "cannot" + ] + ], "keywords": [] }, { "name": "scil_tractogram_register", "docstring": "Generate a linear transformation matrix from the registration of 2 tractograms.\nTypically, this script is run before scil_tractogram_apply_transform.py.\n\nFor more information on how to use the various registration scripts, see the\ndoc at:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_register_tractogram.py", "help": "usage: scil_tractogram_register.py [-h] [--out_name OUT_NAME] [--only_rigid]\n [--moving_tractogram_ref MOVING_TRACTOGRAM_REF]\n [--static_tractogram_ref STATIC_TRACTOGRAM_REF]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n moving_tractogram static_tractogram\n\nGenerate a linear transformation matrix from the registration of 2 tractograms.\nTypically, this script is run before scil_tractogram_apply_transform.py.\n\nFor more information on how to use the various registration scripts, see the\ndoc at:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_register_tractogram.py\n\npositional arguments:\n moving_tractogram Path of the moving tractogram.\n static_tractogram Path of the target tractogram.\n\noptions:\n -h, --help show this help message and exit\n --out_name OUT_NAME Filename of the transformation matrix. \n The registration type will be appended as a suffix,\n [_.txt]. \n Default: [transformation.txt]\n --only_rigid If set, will only use a rigid transformation (uses affine by default).\n --moving_tractogram_ref MOVING_TRACTOGRAM_REF\n Reference anatomy for moving_tractogram (if tck/vtk/fib/dpy) file\n support (.nii or .nii.gz).\n --static_tractogram_ref STATIC_TRACTOGRAM_REF\n Reference anatomy for static_tractogram (if tck/vtk/fib/dpy) file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] E. Garyfallidis, O. Ocegueda, D. Wassermann, M. Descoteaux\nRobust and efficient linear registration of white-matter fascicles in the\nspace of streamlines, NeuroImage, Volume 117, 15 August 2015, Pages 124-140\n(http://www.sciencedirect.com/science/article/pii/S1053811915003961)\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "create", + "generate" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "long", + "work", + "more" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "specific", + "variety", + "various" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "space", + "space" + ], + [ + "held", + "in" + ], + [ + "matter", + "question", + "thinking", + "true", + "work", + "working", + "how" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "streamlines", + "streamlines" + ], + [ + "left", + "before" + ], + [ + "matrices", + "matrix" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "literature", + "scientific", + "studies", + "study", + "science" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ] + ], "keywords": [] }, { "name": "scil_tractogram_remove_invalid", "docstring": "Removal of streamlines that are out of the volume bounding box. In voxel space\nno negative coordinate and no above volume dimension coordinate are possible.\nAny streamline that do not respect these two conditions are removed.\n\nThe --cut_invalid option will cut streamlines so that their longest segment are\nwithin the bounding box\n\nFormerly: scil_remove_invalid_streamlines.py", "help": "usage: scil_tractogram_remove_invalid.py [-h] [--cut_invalid]\n [--remove_single_point]\n [--remove_overlapping_points]\n [--threshold THRESHOLD] [--no_empty]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nRemoval of streamlines that are out of the volume bounding box. In voxel space\nno negative coordinate and no above volume dimension coordinate are possible.\nAny streamline that do not respect these two conditions are removed.\n\nThe --cut_invalid option will cut streamlines so that their longest segment are\nwithin the bounding box\n\nFormerly: scil_remove_invalid_streamlines.py\n\npositional arguments:\n in_tractogram Tractogram filename. Format must be one of \n trk, tck, vtk, fib, dpy.\n out_tractogram Output filename. Format must be one of \n trk, tck, vtk, fib, dpy.\n\noptions:\n -h, --help show this help message and exit\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_single_point\n Consider single point streamlines invalid.\n --remove_overlapping_points\n Consider streamlines with overlapping points invalid.\n --threshold THRESHOLD\n Maximum distance between two points to be considered overlapping [0.001 mm].\n --no_empty Do not save empty tractogram.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "positive", + "negative" + ], + [ + "anatomy", + "anatomy" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "difference", + "point" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "conditions", + "conditions" + ], + [ + "left", + "result", + "when" + ], + [ + "invalid", + "valid", + "invalid" + ], + [ + "clear", + "considered", + "create", + "form", + "manner", + "matter", + "result", + "subject", + "thinking", + "true", + "view", + "work", + "rather" + ], + [ + "clear", + "left", + "out" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "space", + "space" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "action", + "clear", + "considered", + "future", + "matter", + "possibility", + "potential", + "question", + "result", + "specific", + "any" + ], + [ + "long", + "with" + ], + [ + "clear", + "long", + "matter", + "result", + "work", + "so" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "methods", + "using" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "streamline", + "streamline" + ], + [ + "level", + "above" + ], + [ + "long", + "two" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "considered", + "possibility", + "question", + "step", + "consider" + ] + ], "keywords": [] }, { "name": "scil_tractogram_resample", "docstring": "Script to resample a tractogram to a set number of streamlines.\nDefault behavior:\n- IF number of requested streamlines is lower than streamline count: DOWNSAMPLE\n- IF number of requested streamlines is higher than streamline count: UPSAMPLE\nTo prevent upsample if not desired use --never_upsample.\n\nCan be useful to build training sets for machine learning algorithms, to\nupsample under-represented bundles or downsample over-represented bundles.\n\nWorks by either selecting a subset of streamlines or by generating new\nstreamlines by adding gaussian noise to existing ones.\n\nUpsampling:\n Includes smoothing to compensate for the noisiness of new streamlines\n generated by the process.\nDownsampling:\n Includes the possibility of choosing randomly *per Quickbundle cluster* to\n ensure that all clusters are represented in the final tractogram.\n\nExample usage:\n$ scil_tractogram_resample.py input.trk 1000 output.trk --point_wise_std 0.5 --gaussian 5 --keep_invalid_streamlines\n$ scil_visualize_bundles.py output.trk --local_coloring --width=0.1", "help": "usage: scil_tractogram_resample.py [-h] [--never_upsample]\n [--point_wise_std POINT_WISE_STD]\n [--tube_radius TUBE_RADIUS]\n [--gaussian SIGMA] [-e ERROR_RATE]\n [--keep_invalid_streamlines]\n [--downsample_per_cluster]\n [--qbx_thresholds QBX_THRESHOLDS [QBX_THRESHOLDS ...]]\n [--seed SEED] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram nb_streamlines out_tractogram\n\nScript to resample a tractogram to a set number of streamlines.\nDefault behavior:\n- IF number of requested streamlines is lower than streamline count: DOWNSAMPLE\n- IF number of requested streamlines is higher than streamline count: UPSAMPLE\nTo prevent upsample if not desired use --never_upsample.\n\nCan be useful to build training sets for machine learning algorithms, to\nupsample under-represented bundles or downsample over-represented bundles.\n\nWorks by either selecting a subset of streamlines or by generating new\nstreamlines by adding gaussian noise to existing ones.\n\nUpsampling:\n Includes smoothing to compensate for the noisiness of new streamlines\n generated by the process.\nDownsampling:\n Includes the possibility of choosing randomly *per Quickbundle cluster* to\n ensure that all clusters are represented in the final tractogram.\n\nExample usage:\n$ scil_tractogram_resample.py input.trk 1000 output.trk --point_wise_std 0.5 --gaussian 5 --keep_invalid_streamlines\n$ scil_visualize_bundles.py output.trk --local_coloring --width=0.1\n\npositional arguments:\n in_tractogram Input tractography file.\n nb_streamlines Number of streamlines to resample the tractogram to.\n out_tractogram Output tractography file.\n\noptions:\n -h, --help show this help message and exit\n --never_upsample Make sure to never upsample a tractogram.\n Useful when downsample batch of files using bash.\n --seed SEED Use a specific random seed for the resampling.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nUpsampling params:\n --point_wise_std POINT_WISE_STD\n Noise to add to existing streamlines points to generate new ones [1].\n --tube_radius TUBE_RADIUS\n Maximum distance to generate streamlines around the original ones [1].\n --gaussian SIGMA Sigma for smoothing. Use the value of surrounding X,Y,Z points on the streamline to blur the streamlines.\n A good sigma choice would be around 5.\n -e ERROR_RATE Maximum compression distance in mm [0.1].\n --keep_invalid_streamlines\n Keep invalid newly generated streamlines that may go out of the \n bounding box.\n\nDownsampling params:\n --downsample_per_cluster\n If set, downsampling will be done per cluster (computed with \n Quickbundles) to ensure that at least some streamlines are \n kept per bundle. Else, random downsampling is performed (default).\n --qbx_thresholds QBX_THRESHOLDS [QBX_THRESHOLDS ...]\n If you chose option '--downsample_per_cluster', you may set \n the QBx threshold value(s) here. Default: [40, 30, 20]\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "algorithm", + "algorithms" + ], + [ + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundles" + ], + [ + "create", + "generate" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "average", + "per" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "represent", + "represented" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "invalid", + "valid", + "invalid" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "long", + "over" + ], + [ + "specific", + "specific" + ], + [ + "thinking", + "you" + ], + [ + "high", + "higher", + "increase", + "level", + "higher" + ], + [ + "long", + "a" + ], + [ + "clear", + "left", + "out" + ], + [ + "total", + "40" + ], + [ + "tool", + "useful" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "create", + "future", + "step", + "work", + "make" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "future", + "will" + ], + [ + "experience", + "knowledge", + "learning", + "learning" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "random", + "random" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "step", + "go" + ], + [ + "clear", + "adding" + ], + [ + "clear", + "experience", + "thinking", + "true", + "good" + ], + [ + "methods", + "using" + ], + [ + "create", + "build" + ], + [ + "area", + "areas", + "surrounding" + ], + [ + "process", + "processes", + "step", + "process" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "work", + "works" + ], + [ + "matter", + "thinking", + "else" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "result", + "may" + ], + [ + "higher", + "lower" + ], + [ + "streamline", + "streamline" + ], + [ + "work", + "working", + "done" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "matter", + "question", + "thinking", + "sure" + ], + [ + "bundles", + "bundle" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "large", + "work", + "some" + ], + [ + "future", + "possibility", + "potential", + "question", + "possibility" + ] + ], "keywords": [] }, { "name": "scil_tractogram_resample_nb_points", "docstring": "Script to resample a set of streamlines to either a new number of points per\nstreamline or to a fixed step size. WARNING: data_per_point is not carried.\n\nFormerly: scil_resample_streamlines.py", "help": "usage: scil_tractogram_resample_nb_points.py [-h]\n (--nb_pts_per_streamline NB_PTS_PER_STREAMLINE | --step_size STEP_SIZE)\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nScript to resample a set of streamlines to either a new number of points per\nstreamline or to a fixed step size. WARNING: data_per_point is not carried.\n\nFormerly: scil_resample_streamlines.py\n\npositional arguments:\n in_tractogram Streamlines input file name.\n out_tractogram Streamlines output file name.\n\noptions:\n -h, --help show this help message and exit\n --nb_pts_per_streamline NB_PTS_PER_STREAMLINE\n Number of points per streamline in the output.\n --step_size STEP_SIZE\n Step size in the output (in mm).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "larger", + "size", + "size" + ], + [ + "anatomy", + "anatomy" + ], + [ + "streamline", + "streamline" + ], + [ + "action", + "process", + "step", + "step" + ], + [ + "long", + "a" + ], + [ + "average", + "per" + ], + [ + "work", + "and" + ], + [ + "held", + "on" + ], + [ + "methods", + "using" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "streamlines", + "streamlines" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "is" + ], + [ + "held", + "in" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "meaning", + "name" + ], + [ + "order", + "set" + ] + ], "keywords": [] }, { "name": "scil_tractogram_seed_density_map", "docstring": "Compute a density map of seeds saved in .trk file.\n\nFormerly: scil_compute_seed_density_map.py", "help": "usage: scil_tractogram_seed_density_map.py [-h] [--binary [FIXED_VALUE]]\n [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n tractogram_filename\n seed_density_filename\n\nCompute a density map of seeds saved in .trk file.\n\nFormerly: scil_compute_seed_density_map.py\n\npositional arguments:\n tractogram_filename Tracts filename. Format must be .trk. \n File should contain a \"seeds\" value in the data_per_streamline.\n These seeds must be in space: voxel, origin: corner.\n seed_density_filename\n Output seed density filename. Format must be Nifti.\n\noptions:\n -h, --help show this help message and exit\n --binary [FIXED_VALUE]\n If set, will store the same value for all intersected voxels, creating a binary map.\n When set without a value, 1 is used (and dtype uint8).\n If a value is given, will be used as the stored value.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "create", + "creating" + ], + [ + "invalid", + "valid", + "invalid" + ], + [ + "order", + "set" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "space", + "space" + ], + [ + "held", + "in" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "streamlines", + "streamlines" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "intersected", + "intersected" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "tract", + "tracts", + "tracts" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "maps", + "map" + ] + ], "keywords": [] }, { "name": "scil_tractogram_segment_and_score", "docstring": "Scores input tractogram overall and bundlewise.\n\nOutputs\n-------\n\n - results.json: Contains a full tractometry report.\n - processing_stats.json: Contains information on the segmentation of\n bundles (ex: the number of wpc per criteria).\n - Splits the input tractogram into\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (if args.compute_ic)\n segmented_WPC/*_wpc.trk (if args.save_wpc_separately)\n IS.trk OR NC.trk (if args.compute_ic)\n\nBy default, if a streamline fits in many bundles, it will be included in every\none. This means a streamline may be a VS for a bundle and an IS for\n(potentially many) others. If you want to assign each streamline to at most one\nbundle, use the `--unique` flag.\n\nConfig file\n-----------\n\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\nMandatory:\n - endpoints OR [head AND tail]: filename for the endpoints ROI.\n If 'enpoints' is used, we will automatically separate the mask into two\n ROIs, acting as head and tail. Quality check is strongly recommended.\n\nOptional:\n Concerning metrics:\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n Concerning inclusion criteria (other streamlines will be WPC):\n - all_mask: ROI serving as \"all\" criteria: to be included in the bundle,\n ALL points of a streamline must be inside the mask.*\n - any_mask: ROI serving as \"any\" criteria: streamlines\n must touch that mask in at least one point (\"any\" point) to be included\n in the bundle.\n - angle: angle criteria. Streamlines containing loops and sharp turns above\n given angle will be rejected from the bundle.\n - length: maximum and minimum lengths per bundle.\n - length_x / length_x_abs: maximum and minimum total distance in the x\n direction (i.e. first coordinate).**\n - length_y / length_y_abs: maximum and minimum total distance in the y\n direction (i.e. second coordinate).**\n - length_z / length_z_abs: maximum and minimum total distance in the z\n direction (i.e. third coordinate).**\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n** With absolute values: coming back on yourself will contribute to the total\ndistance instead of cancelling it.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n \"angle\": 300,\n \"length\": [140, 150],\n \"endpoints\": \"PATH/file1.nii.gz\"\n }\n}", "help": "usage: scil_tractogram_segment_and_score.py [-h] [--json_prefix p]\n [--gt_dir DIR]\n [--use_gt_masks_as_all_masks]\n [--dilate_endpoints NB_PASS]\n [--remove_invalid]\n [--save_wpc_separately]\n [--compute_ic] [--unique]\n [--remove_wpc_belonging_to_another_bundle]\n [--no_empty] [--indent INDENT]\n [--sort_keys] [--no_bbox_check]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram gt_config out_dir\n\nScores input tractogram overall and bundlewise.\n\nOutputs\n-------\n\n - results.json: Contains a full tractometry report.\n - processing_stats.json: Contains information on the segmentation of\n bundles (ex: the number of wpc per criteria).\n - Splits the input tractogram into\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (if args.compute_ic)\n segmented_WPC/*_wpc.trk (if args.save_wpc_separately)\n IS.trk OR NC.trk (if args.compute_ic)\n\nBy default, if a streamline fits in many bundles, it will be included in every\none. This means a streamline may be a VS for a bundle and an IS for\n(potentially many) others. If you want to assign each streamline to at most one\nbundle, use the `--unique` flag.\n\nConfig file\n-----------\n\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\nMandatory:\n - endpoints OR [head AND tail]: filename for the endpoints ROI.\n If 'enpoints' is used, we will automatically separate the mask into two\n ROIs, acting as head and tail. Quality check is strongly recommended.\n\nOptional:\n Concerning metrics:\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n Concerning inclusion criteria (other streamlines will be WPC):\n - all_mask: ROI serving as \"all\" criteria: to be included in the bundle,\n ALL points of a streamline must be inside the mask.*\n - any_mask: ROI serving as \"any\" criteria: streamlines\n must touch that mask in at least one point (\"any\" point) to be included\n in the bundle.\n - angle: angle criteria. Streamlines containing loops and sharp turns above\n given angle will be rejected from the bundle.\n - length: maximum and minimum lengths per bundle.\n - length_x / length_x_abs: maximum and minimum total distance in the x\n direction (i.e. first coordinate).**\n - length_y / length_y_abs: maximum and minimum total distance in the y\n direction (i.e. second coordinate).**\n - length_z / length_z_abs: maximum and minimum total distance in the z\n direction (i.e. third coordinate).**\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n** With absolute values: coming back on yourself will contribute to the total\ndistance instead of cancelling it.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n \"angle\": 300,\n \"length\": [140, 150],\n \"endpoints\": \"PATH/file1.nii.gz\"\n }\n}\n\nTractometry\n-----------\nGlobal connectivity metrics:\n\n- Computed by default:\n - VS: valid streamlines, belonging to a bundle (i.e. respecting all the\n criteria for that bundle; endpoints, limit_mask, gt_mask.).\n - IS: invalid streamlines. All other streamlines. IS = IC + NC.\n\n- Optional:\n - WPC: wrong path connections, streamlines connecting correct ROIs but not\n respecting the other criteria for that bundle. Such streamlines always\n exist but they are only saved separately if specified in the options.\n Else, they are merged back with the IS.\n By definition. WPC are only computed if \"limits masks\" are provided.\n - IC: invalid connections, streamlines joining an incorrect combination of\n ROIs. Use carefully, quality depends on the quality of your ROIs and no\n analysis is done on the shape of the streamlines.\n - NC: no connections. Invalid streamlines minus invalid connections.\n\n- Fidelity metrics:\n - OL: Overlap. Percentage of ground truth voxels containing streamline(s)\n for a given bundle.\n - OR: Overreach. Amount of voxels containing streamline(s) when they\n shouldn't, for a given bundle. We compute two versions :\n OR_pct_vs = divided by the total number of voxel covered by the bundle.\n (percentage of the voxels touched by VS).\n Values range between 0 and 100%. Values are not defined when we\n recovered no streamline for a bundle, but we set the OR_pct_vs to 0\n in that case.\n OR_pct_gt = divided by the total size of the ground truth bundle mask.\n Values could be higher than 100%.\n - f1 score: which is the same as the Dice score.\n\npositional arguments:\n in_tractogram Input tractogram to score\n gt_config .json dict configured as specified above.\n out_dir Output directory for the resulting segmented bundles.\n\noptions:\n -h, --help show this help message and exit\n --json_prefix p Prefix of the two output json files. Ex: 'study_x_'.Files will be saved inside out_dir.\n Suffixes will be 'processing_stats.json' and 'results.json'.\n --no_empty Do not write file if there is no streamline.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nAdditions to gt_config:\n --gt_dir DIR Root path of the ground truth files listed in the gt_config. \n If not set, filenames in the config file are considered \n as absolute paths.\n --use_gt_masks_as_all_masks\n If set, the gt_config's 'gt_mask' will also be used as\n 'all_mask' for each bundle. Note that this means the\n OR will necessarily be 0.\n\nPreprocessing:\n --dilate_endpoints NB_PASS\n Dilate endpoint masks n-times. Default: 0.\n --remove_invalid Remove invalid streamlines before scoring.\n\nTractometry choices:\n --save_wpc_separately\n If set, streamlines rejected from VC based on the config\n file criteria will be saved separately from IS (and IC)\n in one file *_wpc.tck per bundle.\n --compute_ic If set, IS are split into NC + IC, where IC are computed as one bundle per\n pair of ROI not belonging to a true connection, named\n *_*_IC.tck.\n --unique If set, streamlines are assigned to the first bundle they fit in and not to all.\n --remove_wpc_belonging_to_another_bundle\n If set, WPC actually belonging to any VB (in the \n case of overlapping ROIs) will be removed\n from the WPC classification.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "limiting", + "limits" + ], + [ + "invalid", + "valid", + "invalid" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "order", + "set" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "left", + "back" + ], + [ + "long", + "with" + ], + [ + "reported", + "report" + ], + [ + "order", + "work", + "instead" + ], + [ + "long", + "than" + ], + [ + "thinking", + "wrong" + ], + [ + "unique", + "variety", + "unique" + ], + [ + "large", + "work", + "many" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "assigned", + "assigned" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "connect", + "connected", + "connecting", + "connects", + "connecting" + ], + [ + "create", + "created" + ], + [ + "bundles", + "bundle" + ], + [ + "supported", + "strongly" + ], + [ + "clear", + "result", + "work", + "could" + ], + [ + "meaning", + "order", + "result", + "step", + "true", + "means" + ], + [ + "long", + "full" + ], + [ + "true", + "always" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "average", + "per" + ], + [ + "work", + "also" + ], + [ + "difference", + "point" + ], + [ + "left", + "from" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "defined", + "definition" + ], + [ + "voxel", + "voxel" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "future", + "will" + ], + [ + "parameters", + "specified" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "increase", + "expected" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "meaning", + "true", + "true" + ], + [ + "result", + "resulting" + ], + [ + "result", + "may" + ], + [ + "connect", + "connected", + "connection", + "connections", + "connections" + ], + [ + "streamline", + "streamline" + ], + [ + "connectivity", + "connectivity" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "connection", + "connections", + "connection" + ], + [ + "lack", + "quality" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "shape", + "structure", + "shape" + ], + [ + "considered", + "specific", + "variety", + "such" + ], + [ + "considered", + "highly", + "long", + "work", + "most" + ], + [ + "considered", + "are" + ], + [ + "left", + "into" + ], + [ + "proposed", + "rejected" + ], + [ + "lack", + "loss", + "result", + "result" + ], + [ + "exist", + "exist" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "analysis", + "data", + "methods", + "study", + "analysis" + ], + [ + "supported", + "support" + ], + [ + "thinking", + "i" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "acted", + "role", + "acting" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "involved", + "others" + ], + [ + "methods", + "using" + ], + [ + "variety", + "work", + "other" + ], + [ + "increase", + "total", + "amount" + ], + [ + "defined", + "function", + "defined" + ], + [ + "left", + "before" + ], + [ + "matter", + "thinking", + "else" + ], + [ + "potential", + "potentially" + ], + [ + "exist", + "necessarily" + ], + [ + "area", + "work", + "where" + ], + [ + "individual", + "each" + ], + [ + "long", + "two" + ], + [ + "matter", + "question", + "case" + ], + [ + "based", + "based" + ], + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "global", + "global" + ], + [ + "bundles", + "bundles" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "form", + "combination" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "direction", + "direction" + ], + [ + "invalid", + "valid", + "valid" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "high", + "higher", + "increase", + "level", + "higher" + ], + [ + "average", + "percentage" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "true", + "truth" + ], + [ + "action", + "clear", + "considered", + "future", + "matter", + "possibility", + "potential", + "question", + "result", + "specific", + "any" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "larger", + "size", + "size" + ], + [ + "result", + "results" + ], + [ + "level", + "above" + ], + [ + "work", + "working", + "done" + ], + [ + "clear", + "long", + "question", + "result", + "work", + "there" + ], + [ + "total", + "100" + ], + [ + "increase", + "total", + "total" + ] + ], "keywords": [] }, { "name": "scil_tractogram_segment_bundles", "docstring": "Compute BundleSeg & supports multi-atlas & multi-parameters (RBx-like).\nThe model needs to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are not sure about the transformation 'direction' you can try\nscil_tractogram_segment_bundles.py (with the -v option), a warning will popup\nif the provided transformation is not used correctly.\n\nThe number of folders inside 'models_directories' will increase the number of\nruns. Each folder is considered like an atlas and bundles inside will initiate\nmore BundleSeg executions. The more atlases you have, the more robust the\nrecognition will be.\n\n--minimal_vote_ratio is a value between 0 and 1. If you have 5 input model\ndirectories and a minimal_vote_ratio of 0.5, you will need at least 3 votes\n\nExample data and usage available at: https://zenodo.org/record/7950602\n\nFor RAM usage, it is recommanded to use this heuristic:\n (size of inputs tractogram (GB) * number of processes) < RAM (GB)\nThis is important because many instances of data structures are initialized\nin parallel and can lead to a RAM overflow.\n\nFormerly: scil_recognize_multi_bundles.py", "help": "usage: scil_tractogram_segment_bundles.py [-h] [--out_dir OUT_DIR]\n [--minimal_vote_ratio MINIMAL_VOTE_RATIO]\n [--seed SEED] [--inverse]\n [--reference REFERENCE]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractograms [in_tractograms ...]\n in_config_file in_directory\n in_transfo\n\nCompute BundleSeg & supports multi-atlas & multi-parameters (RBx-like).\nThe model needs to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are not sure about the transformation 'direction' you can try\nscil_tractogram_segment_bundles.py (with the -v option), a warning will popup\nif the provided transformation is not used correctly.\n\nThe number of folders inside 'models_directories' will increase the number of\nruns. Each folder is considered like an atlas and bundles inside will initiate\nmore BundleSeg executions. The more atlases you have, the more robust the\nrecognition will be.\n\n--minimal_vote_ratio is a value between 0 and 1. If you have 5 input model\ndirectories and a minimal_vote_ratio of 0.5, you will need at least 3 votes\n\nExample data and usage available at: https://zenodo.org/record/7950602\n\nFor RAM usage, it is recommanded to use this heuristic:\n (size of inputs tractogram (GB) * number of processes) < RAM (GB)\nThis is important because many instances of data structures are initialized\nin parallel and can lead to a RAM overflow.\n\nFormerly: scil_recognize_multi_bundles.py\n\npositional arguments:\n in_tractograms Input tractogram filename (.trk or .tck).\n in_config_file Path of the config file (.json)\n in_directory Path of parent folder of models directories.\n Each folder inside will be considered as adifferent atlas.\n in_transfo Path for the transformation to model space (.txt, .npy or .mat).\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Path for the output directory [voting_results].\n --minimal_vote_ratio MINIMAL_VOTE_RATIO\n Streamlines will only be considered for saving if\n recognized often enough [0.5].\n --seed SEED Random number generator seed 0.\n --inverse Use the inverse transformation.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] St-Onge, Etienne, Kurt G. Schilling, and Francois Rheault.\n\"BundleSeg: A versatile,reliable and reproducible approach to white\nmatter bundle segmentation.\" International Workshop on Computational\nDiffusion MRI. Cham: Springer Nature Switzerland (2023)\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundles" + ], + [ + "thinking", + "view", + "approach" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "long", + "work", + "more" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "considered", + "recognized" + ], + [ + "direction", + "direction" + ], + [ + "methods", + "use" + ], + [ + "long", + "have" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "step", + "work", + "come" + ], + [ + "clear", + "create", + "enough" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "imaging", + "mri" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "random", + "random" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "space", + "space" + ], + [ + "traditionally", + "often" + ], + [ + "held", + "in" + ], + [ + "true", + "nature" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "complex", + "structure", + "structures", + "structures" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "atlas", + "atlas" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "supported", + "supports" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "clear", + "considered", + "future", + "lack", + "long", + "matter", + "question", + "result", + "work", + "because" + ], + [ + "methods", + "using" + ], + [ + "reliable", + "reliable" + ], + [ + "streamlines", + "streamlines" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "large", + "work", + "many" + ], + [ + "step", + "try" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "larger", + "size", + "size" + ], + [ + "create", + "lack", + "step", + "work", + "working", + "need" + ], + [ + "global", + "international" + ], + [ + "step", + "start" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "matter", + "question", + "thinking", + "sure" + ], + [ + "bundles", + "bundle" + ], + [ + "higher", + "increase", + "total", + "increase" + ], + [ + "considered", + "key", + "work", + "important" + ] + ], "keywords": [] }, { "name": "scil_tractogram_segment_bundles_for_connectivity", "docstring": "Compute a connectivity matrix from a tractogram and a parcellation.\n\nCurrent strategy is to keep the longest streamline segment connecting 2\nregions. If the streamline crosses other gray matter regions before reaching\nits final connected region, the kept connection is still the longest. This is\nrobust to compressed streamlines.\n\nThe output file is a hdf5 (.h5) where the keys are 'LABEL1_LABEL2' and each\ngroup is composed of 'data', 'offsets' and 'lengths' from the array_sequence.\nThe 'data' is stored in VOX/CORNER for simplicity and efficiency. See script\nscil_tractogram_convert_hdf5_to_trk.py to convert to a list of .trk bundles.\n\nFor the --outlier_threshold option the default is a recommended good trade-off\nfor a freesurfer parcellation. With smaller parcels (brainnetome, glasser) the\nthreshold should most likely be reduced.\n\nGood candidate connections to QC are the brainstem to precentral gyrus\nconnection and precentral left to precentral right connection, or equivalent\nin your parcellation.\n\nNOTE: this script can take a while to run. Please be patient.\nExample: on a tractogram with 1.8M streamlines, running on a SSD:\n- 15 minutes without post-processing, only saving final bundles.\n- 30 minutes with full post-processing, only saving final bundles.\n- 60 minutes with full post-processing, saving all possible files.\n\nFormerly: scil_decompose_connectivity.py", "help": "usage: scil_tractogram_segment_bundles_for_connectivity.py [-h] [--no_pruning]\n [--no_remove_loops]\n [--no_remove_outliers]\n [--no_remove_curv_dev]\n [--min_length MIN_LENGTH]\n [--max_length MAX_LENGTH]\n [--outlier_threshold OUTLIER_THRESHOLD]\n [--loop_max_angle LOOP_MAX_ANGLE]\n [--curv_qb_distance CURV_QB_DISTANCE]\n [--out_dir OUT_DIR]\n [--save_raw_connections]\n [--save_intermediate]\n [--save_discarded]\n [--out_labels_list OUT_FILE]\n [--reference REFERENCE]\n [--no_bbox_check]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_tractograms\n [in_tractograms ...]\n in_labels out_hdf5\n\nCompute a connectivity matrix from a tractogram and a parcellation.\n\nCurrent strategy is to keep the longest streamline segment connecting 2\nregions. If the streamline crosses other gray matter regions before reaching\nits final connected region, the kept connection is still the longest. This is\nrobust to compressed streamlines.\n\nThe output file is a hdf5 (.h5) where the keys are 'LABEL1_LABEL2' and each\ngroup is composed of 'data', 'offsets' and 'lengths' from the array_sequence.\nThe 'data' is stored in VOX/CORNER for simplicity and efficiency. See script\nscil_tractogram_convert_hdf5_to_trk.py to convert to a list of .trk bundles.\n\nFor the --outlier_threshold option the default is a recommended good trade-off\nfor a freesurfer parcellation. With smaller parcels (brainnetome, glasser) the\nthreshold should most likely be reduced.\n\nGood candidate connections to QC are the brainstem to precentral gyrus\nconnection and precentral left to precentral right connection, or equivalent\nin your parcellation.\n\nNOTE: this script can take a while to run. Please be patient.\nExample: on a tractogram with 1.8M streamlines, running on a SSD:\n- 15 minutes without post-processing, only saving final bundles.\n- 30 minutes with full post-processing, only saving final bundles.\n- 60 minutes with full post-processing, saving all possible files.\n\nFormerly: scil_decompose_connectivity.py\n\npositional arguments:\n in_tractograms Tractogram filenames. Format must be one of \n trk, tck, vtk, fib, dpy.\n in_labels Labels file name (nifti). Labels must have 0 as background.\n out_hdf5 Output hdf5 file (.h5).\n\noptions:\n -h, --help show this help message and exit\n --out_labels_list OUT_FILE\n Save the labels list as text file.\n Needed for scil_connectivity_compute_matrices.py and others.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nPost-processing options:\n --no_pruning If set, will NOT prune on length.\n Length criteria in --min_length, --max_length.\n --no_remove_loops If set, will NOT remove streamlines making loops.\n Angle criteria based on --loop_max_angle.\n --no_remove_outliers If set, will NOT remove outliers using QB.\n Criteria based on --outlier_threshold.\n --no_remove_curv_dev If set, will NOT remove streamlines that deviate from the mean curvature.\n Threshold based on --curv_qb_distance.\n\nPruning options:\n --min_length MIN_LENGTH\n Pruning minimal segment length. [20.0]\n --max_length MAX_LENGTH\n Pruning maximal segment length. [200.0]\n\nOutliers and loops options:\n --outlier_threshold OUTLIER_THRESHOLD\n Outlier removal threshold when using hierarchical QB. [0.6]\n --loop_max_angle LOOP_MAX_ANGLE\n Maximal winding angle over which a streamline is considered as looping. [330.0]\n --curv_qb_distance CURV_QB_DISTANCE\n Clustering threshold for centroids curvature filtering with QB. [10.0]\n\nSaving options:\n --out_dir OUT_DIR Output directory for each connection as separate file (.trk).\n --save_raw_connections\n If set, will save all raw cut connections in a subdirectory.\n --save_intermediate If set, will save the intermediate results of filtering.\n --save_discarded If set, will save discarded streamlines in subdirectories.\n Includes loops, outliers and qb_loops.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "considered", + "potential", + "result", + "likely" + ], + [ + "anatomy", + "anatomy" + ], + [ + "considered", + "highly", + "long", + "work", + "most" + ], + [ + "bundles", + "bundles" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "left", + "off" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "higher", + "increase", + "result", + "reduced" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "invalid", + "valid", + "invalid" + ], + [ + "areas", + "region", + "regions", + "regions" + ], + [ + "meaning", + "name" + ], + [ + "order", + "set" + ], + [ + "left", + "left" + ], + [ + "thalamus", + "brainstem" + ], + [ + "long", + "have" + ], + [ + "long", + "over" + ], + [ + "processing", + "processing" + ], + [ + "long", + "a" + ], + [ + "gyrus", + "gyrus" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "work", + "making" + ], + [ + "held", + "in" + ], + [ + "clear", + "order", + "step", + "work", + "take" + ], + [ + "parcels", + "parcels" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "involved", + "others" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "connect", + "connected", + "connecting", + "connections", + "connected" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "clear", + "experience", + "thinking", + "true", + "good" + ], + [ + "methods", + "using" + ], + [ + "variety", + "work", + "other" + ], + [ + "view", + "see" + ], + [ + "future", + "current" + ], + [ + "clear", + "future", + "possibility", + "potential", + "question", + "result", + "specific", + "step", + "possible" + ], + [ + "left", + "right" + ], + [ + "precentral", + "precentral" + ], + [ + "streamlines", + "streamlines" + ], + [ + "methods", + "process", + "processes", + "processes" + ], + [ + "left", + "before" + ], + [ + "matrices", + "matrix" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "area", + "work", + "where" + ], + [ + "based", + "group" + ], + [ + "individual", + "each" + ], + [ + "connect", + "connected", + "connection", + "connections", + "connections" + ], + [ + "blue", + "dark", + "green", + "grey", + "white", + "gray" + ], + [ + "lack", + "minimal" + ], + [ + "streamline", + "streamline" + ], + [ + "result", + "results" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "connectivity", + "connectivity" + ], + [ + "step", + "start" + ], + [ + "total", + "60" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "post", + "post" + ], + [ + "clear", + "long", + "work", + "still" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "connection", + "connections", + "connection" + ], + [ + "connect", + "connected", + "connecting", + "connects", + "connecting" + ], + [ + "based", + "based" + ], + [ + "area", + "areas", + "region", + "regions", + "region" + ], + [ + "large", + "larger", + "smaller" + ], + [ + "long", + "full" + ] + ], "keywords": [] }, { "name": "scil_tractogram_segment_one_bundle", "docstring": "Compute a simple Recobundles (single-atlas & single-parameters).\nThe model need to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are unsure about the transformation 'direction' use the verbose\noption (-v) and try with and without the --inverse flag. If you are not using\nthe right transformation 'direction' a warning will popup. If there is no\nwarning in both case it means the transformation is very close to identity and\nboth 'direction' will work.\n\nFormerly: scil_recognize_single_bundles.py", "help": "usage: scil_tractogram_segment_one_bundle.py [-h]\n [--tractogram_clustering_thr TRACTOGRAM_CLUSTERING_THR]\n [--model_clustering_thr MODEL_CLUSTERING_THR]\n [--pruning_thr PRUNING_THR]\n [--slr_threads SLR_THREADS]\n [--seed SEED] [--inverse]\n [--no_empty]\n [--in_pickle IN_PICKLE | --out_pickle OUT_PICKLE]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram in_model in_transfo\n out_tractogram\n\nCompute a simple Recobundles (single-atlas & single-parameters).\nThe model need to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are unsure about the transformation 'direction' use the verbose\noption (-v) and try with and without the --inverse flag. If you are not using\nthe right transformation 'direction' a warning will popup. If there is no\nwarning in both case it means the transformation is very close to identity and\nboth 'direction' will work.\n\nFormerly: scil_recognize_single_bundles.py\n\npositional arguments:\n in_tractogram Input tractogram filename.\n in_model Model to use for recognition.\n in_transfo Path for the transformation to model space (.txt, .npy or .mat).\n out_tractogram Output tractogram filename.\n\noptions:\n -h, --help show this help message and exit\n --tractogram_clustering_thr TRACTOGRAM_CLUSTERING_THR\n Clustering threshold used for the whole brain [8mm].\n --model_clustering_thr MODEL_CLUSTERING_THR\n Clustering threshold used for the model [4mm].\n --pruning_thr PRUNING_THR\n MDF threshold used for final streamlines selection [6mm].\n --slr_threads SLR_THREADS\n Number of threads for SLR [1].\n --seed SEED Random number generator seed [None].\n --inverse Use the inverse transformation.\n --no_empty Do not write file if there is no streamline.\n --in_pickle IN_PICKLE\n Input pickle clusters map file.\n Will override the tractogram_clustering_thr parameter.\n --out_pickle OUT_PICKLE\n Output pickle clusters map file.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nGaryfallidis, E., Cote, M. A., Rheault, F., ... &\nDescoteaux, M. (2018). Recognition of white matter\nbundles using local and global streamline-based registration and\nclustering. NeuroImage, 170, 283-295.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "global", + "global" + ], + [ + "bundles", + "bundles" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "clear", + "considered", + "highly", + "long", + "matter", + "true", + "work", + "very" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "work", + "working", + "work" + ], + [ + "left", + "from" + ], + [ + "matter", + "question", + "subject", + "matter" + ], + [ + "left", + "result", + "when" + ], + [ + "parameter", + "parameters", + "parameter" + ], + [ + "methods", + "use" + ], + [ + "direction", + "direction" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "step", + "work", + "come" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "random", + "random" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "space", + "space" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "atlas", + "atlas" + ], + [ + "methods", + "using" + ], + [ + "left", + "right" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "step", + "try" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "streamline", + "streamline" + ], + [ + "create", + "lack", + "step", + "work", + "working", + "need" + ], + [ + "clear", + "long", + "question", + "result", + "work", + "there" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "matter", + "question", + "case" + ], + [ + "based", + "based" + ], + [ + "maps", + "map" + ], + [ + "meaning", + "order", + "result", + "step", + "true", + "means" + ] + ], "keywords": [] }, { "name": "scil_tractogram_shuffle", "docstring": "Shuffle the ordering of streamlines.\n\nFormerly: scil_shuffle_streamlines.py", "help": "usage: scil_tractogram_shuffle.py [-h] [--seed SEED] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nShuffle the ordering of streamlines.\n\nFormerly: scil_shuffle_streamlines.py\n\npositional arguments:\n in_tractogram Input tractography file.\n out_tractogram Output tractography file.\n\noptions:\n -h, --help show this help message and exit\n --seed SEED Random number generator seed [None].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "work", + "and" + ], + [ + "held", + "on" + ], + [ + "methods", + "using" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "streamlines", + "streamlines" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "random", + "random" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "is" + ] + ], "keywords": [] }, { "name": "scil_tractogram_smooth", "docstring": "This script will smooth the streamlines, usually to remove the 'wiggles' in\nprobabilistic tracking.\n\nTwo choices of methods are available:\n- Gaussian will use the surrounding coordinates for smoothing. Streamlines are\nresampled to 1mm step-size and the smoothing is performed on the coordinate\narray. The sigma will be indicative of the number of points surrounding the\ncenter points to be used for blurring.\n- Spline will fit a spline curve to every streamline using a sigma and the\nnumber of control points. The sigma represents the allowed distance from the\ncontrol points. The final streamlines are obtained by evaluating the spline at\nconstant intervals so that it will have the same number of points as initially.\n\nThis script enforces endpoints to remain the same.\n\nWARNING:\n- too low of a sigma (e.g: 1) with a lot of control points (e.g: 15)\nwill create crazy streamlines that could end up out of the bounding box.\n- data_per_point will be lost.\n\nFormerly: scil_smooth_streamlines.py", "help": "usage: scil_tractogram_smooth.py [-h]\n (--gaussian SIGMA | --spline SIGMA NB_CTRL_POINT)\n [--compress [COMPRESS_TH]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script will smooth the streamlines, usually to remove the 'wiggles' in\nprobabilistic tracking.\n\nTwo choices of methods are available:\n- Gaussian will use the surrounding coordinates for smoothing. Streamlines are\nresampled to 1mm step-size and the smoothing is performed on the coordinate\narray. The sigma will be indicative of the number of points surrounding the\ncenter points to be used for blurring.\n- Spline will fit a spline curve to every streamline using a sigma and the\nnumber of control points. The sigma represents the allowed distance from the\ncontrol points. The final streamlines are obtained by evaluating the spline at\nconstant intervals so that it will have the same number of points as initially.\n\nThis script enforces endpoints to remain the same.\n\nWARNING:\n- too low of a sigma (e.g: 1) with a lot of control points (e.g: 15)\nwill create crazy streamlines that could end up out of the bounding box.\n- data_per_point will be lost.\n\nFormerly: scil_smooth_streamlines.py\n\npositional arguments:\n in_tractogram Input tractography file.\n out_tractogram Output tractography file.\n\noptions:\n -h, --help show this help message and exit\n --gaussian SIGMA Sigma for smoothing. Use the value of surronding\n X,Y,Z points on the streamline to blur the streamlines.\n A good sigma choice would be around 5.\n --spline SIGMA NB_CTRL_POINT\n Sigma for smoothing. Model each streamline as a spline.\n A good sigma choice would be around 5 and control point around 10.\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.[0.1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "long", + "end" + ], + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "clear", + "future", + "order", + "step", + "work", + "would" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "difference", + "point" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "clear", + "long", + "too" + ], + [ + "experience", + "thinking", + "lot" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "long", + "have" + ], + [ + "examining", + "evaluating" + ], + [ + "long", + "a" + ], + [ + "clear", + "left", + "out" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "future", + "will" + ], + [ + "loss", + "lost" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "create" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "subsequently", + "initially" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "clear", + "long", + "matter", + "result", + "work", + "so" + ], + [ + "action", + "process", + "step", + "step" + ], + [ + "clear", + "experience", + "thinking", + "true", + "good" + ], + [ + "represent", + "represents" + ], + [ + "methods", + "using" + ], + [ + "area", + "areas", + "surrounding" + ], + [ + "traditionally", + "usually" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "result", + "resulting" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "analysis", + "methodology", + "methods", + "processes", + "methods" + ], + [ + "high", + "higher", + "level", + "low" + ], + [ + "individual", + "each" + ], + [ + "larger", + "size", + "size" + ], + [ + "streamline", + "streamline" + ], + [ + "order", + "allowed" + ], + [ + "long", + "two" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "result", + "work", + "could" + ], + [ + "data", + "tracking", + "tracking" + ], + [ + "probabilistic", + "probabilistic" + ] + ], "keywords": [] }, { "name": "scil_tractogram_split", "docstring": "Split a tractogram into multiple files, 2 options available :\nSplit into X files, or split into files of Y streamlines.\n\nBy default, streamlines to add to each chunk will be chosen randomly.\nOptionally, you can split streamlines...\n - sequentially (the first n/nb_chunks streamlines in the first chunk and so\n on).\n - randomly, but per Quickbundles clusters.\n\nFormerly: scil_split_tractogram.py", "help": "usage: scil_tractogram_split.py [-h] [--out_dir OUT_DIR]\n (--chunk_size CHUNK_SIZE | --nb_chunks NB_CHUNKS)\n [--split_per_cluster | --do_not_randomize]\n [--qbx_thresholds t [t ...]] [--seed SEED]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_prefix\n\nSplit a tractogram into multiple files, 2 options available :\nSplit into X files, or split into files of Y streamlines.\n\nBy default, streamlines to add to each chunk will be chosen randomly.\nOptionally, you can split streamlines...\n - sequentially (the first n/nb_chunks streamlines in the first chunk and so\n on).\n - randomly, but per Quickbundles clusters.\n\nFormerly: scil_split_tractogram.py\n\npositional arguments:\n in_tractogram Tractogram input file name.\n out_prefix Prefix for the output tractogram, index will be appended \n automatically (ex, _0.trk), based on input type.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Put all output tractogram in a specific directory.\n --chunk_size CHUNK_SIZE\n The maximum number of streamlines per file.\n --nb_chunks NB_CHUNKS\n Divide the file in equal parts.\n --split_per_cluster If set, splitting will be done per cluster (computed with \n Quickbundles) to ensure that at least some streamlines are \n kept from each bundle in each chunk. Else, random splitting is\n performed (default).\n --do_not_randomize If set, splitting is done sequentially through the original \n sft instead of using random indices.\n --qbx_thresholds t [t ...]\n If you chose option '--split_per_cluster', you may set the \n QBx threshold value(s) here. Default: [40, 30, 20]\n --seed SEED Use a specific random seed for the subsampling.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "average", + "per" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "left", + "into" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "thinking", + "you" + ], + [ + "specific", + "specific" + ], + [ + "long", + "a" + ], + [ + "total", + "40" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "supported", + "support" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "random", + "random" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "clear", + "long", + "matter", + "result", + "work", + "so" + ], + [ + "clear", + "left", + "work", + "put" + ], + [ + "methods", + "using" + ], + [ + "order", + "work", + "instead" + ], + [ + "streamlines", + "streamlines" + ], + [ + "matter", + "thinking", + "else" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "result", + "may" + ], + [ + "represent", + "chosen" + ], + [ + "individual", + "each" + ], + [ + "work", + "working", + "done" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "based", + "based" + ], + [ + "areas", + "parts" + ], + [ + "bundles", + "bundle" + ], + [ + "large", + "work", + "some" + ] + ], "keywords": [] }, { "name": "scil_viz_bingham_fit", "docstring": "Visualize 2-dimensional Bingham volume slice loaded from disk. The volume is\nassumed to be saved from scil_fodf_to_bingham.py.\n\nGiven an image of Bingham coefficients, this script displays a slice in a\ngiven orientation.", "help": "usage: scil_viz_bingham_fit.py [-h] [--slice_index SLICE_INDEX]\n [--win_dims WIDTH HEIGHT]\n [--interactor {image,trackball}]\n [--axis_name {sagittal,coronal,axial}]\n [--silent] [--output OUTPUT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--color_per_lobe]\n in_bingham\n\nVisualize 2-dimensional Bingham volume slice loaded from disk. The volume is\nassumed to be saved from scil_fodf_to_bingham.py.\n\nGiven an image of Bingham coefficients, this script displays a slice in a\ngiven orientation.\n\npositional arguments:\n in_bingham Input SH image file.\n\noptions:\n -h, --help show this help message and exit\n --slice_index SLICE_INDEX\n Index of the slice to visualize along a given axis. Defaults to middle of volume.\n --win_dims WIDTH HEIGHT\n The dimensions for the vtk window. [(768, 768)]\n --interactor {image,trackball}\n Specify interactor mode for vtk window. [trackball]\n --axis_name {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n --silent Disable interactive visualization.\n --output OUTPUT Path to output file.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Name of the sphere used to reconstruct SF. [symmetric362]\n --color_per_lobe Color each bingham distribution with a different color. [False]\n", - "synonyms": [], + "synonyms": [ + [ + "individual", + "specific", + "unique", + "variety", + "different" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "orientation", + "orientation" + ], + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "meaning", + "name" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "axial", + "axial" + ], + [ + "methods", + "using" + ], + [ + "area", + "main", + "along" + ], + [ + "sagittal", + "sagittal" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "middle", + "middle" + ], + [ + "individual", + "each" + ], + [ + "coronal", + "coronal" + ], + [ + "false", + "false" + ] + ], "keywords": [] }, { "name": "scil_viz_bundle", "docstring": "Visualize bundles.\n\nExample usages:\n\n# Visualize streamlines as tubes, each bundle with a different color\n>>> scil_viz_bundle.py path_to_bundles/ --shape tube --random_coloring 1337\n\n# Visualize a tractogram with each streamlines drawn as lines, colored with\n# their local orientation, but only load 1 in 10 streamlines\n>>> scil_viz_bundle.py tractogram.trk --shape line --subsample 10\n\n# Visualize CSTs as large tubes and color them from a list of colors in a file\n>>> scil_viz_bundle.py path_to_bundles/CST_* --width 0.5\n --color_dict colors.json", "help": "usage: scil_viz_bundle.py [-h]\n [--random_coloring SEED | --uniform_coloring R G B | --local_coloring | --color_dict JSON | --color_from_streamlines KEY | --color_from_points KEY]\n [--shape {line,tube}] [--width WIDTH]\n [--subsample SUBSAMPLE] [--downsample DOWNSAMPLE]\n [--background R G B] [-v [{DEBUG,INFO,WARNING}]]\n in_bundles [in_bundles ...]\n\nVisualize bundles.\n\nExample usages:\n\n# Visualize streamlines as tubes, each bundle with a different color\n>>> scil_viz_bundle.py path_to_bundles/ --shape tube --random_coloring 1337\n\n# Visualize a tractogram with each streamlines drawn as lines, colored with\n# their local orientation, but only load 1 in 10 streamlines\n>>> scil_viz_bundle.py tractogram.trk --shape line --subsample 10\n\n# Visualize CSTs as large tubes and color them from a list of colors in a file\n>>> scil_viz_bundle.py path_to_bundles/CST_* --width 0.5\n --color_dict colors.json\n\npositional arguments:\n in_bundles List of tractography files supported by nibabel.\n\noptions:\n -h, --help show this help message and exit\n --shape {line,tube} Display streamlines either as lines or tubes.\n [Default: tube]\n --width WIDTH Width of tubes or lines representing streamlines\n [Default: 0.25]\n --subsample SUBSAMPLE\n Only load 1 in N streamlines.\n [Default: 1]\n --downsample DOWNSAMPLE\n Downsample streamlines to N points.\n [Default: None]\n --background R G B RBG values [0, 255] of the color of the background.\n [Default: [0, 0, 0]]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nColouring options:\n --random_coloring SEED\n Assign a random color to bundles.\n --uniform_coloring R G B\n Assign a uniform color to streamlines.\n --local_coloring Assign coloring to streamlines depending on their local orientations.\n --color_dict JSON JSON file containing colors for each bundle.\n Bundle filenames are indicated as keys and colors as values.\n A 'default' key and value can be included.\n --color_from_streamlines KEY\n Extract a color per streamline from the data_per_streamline property of the tractogram at the specified key.\n --color_from_points KEY\n Extract a color per point from the data_per_point property of the tractogram at the specified key.\n", - "synonyms": [], + "synonyms": [ + [ + "individual", + "specific", + "unique", + "variety", + "different" + ], + [ + "bundles", + "bundles" + ], + [ + "average", + "per" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "orientation", + "orientation" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "difference", + "point" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "long", + "a" + ], + [ + "key", + "main", + "key" + ], + [ + "represent", + "representing" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "random", + "random" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "blue", + "colored" + ], + [ + "large", + "larger", + "large" + ], + [ + "long", + "with" + ], + [ + "parameters", + "specified" + ], + [ + "methods", + "using" + ], + [ + "streamlines", + "streamlines" + ], + [ + "supported", + "supported" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "individual", + "each" + ], + [ + "streamline", + "streamline" + ], + [ + "indicating", + "indicated" + ], + [ + "bundles", + "bundle" + ], + [ + "shape", + "structure", + "shape" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_viz_bundle_screenshot_mni", "docstring": "Register bundle to a template for screenshots using a reference.\nThe template can be any MNI152 (any resolution, cropped or not)\nIf your in_anat has a skull, select a MNI152 template with a skull and\nvice-versa.\n\nIf the bundle is already in MNI152 space, do not use --target_template.\n\nAxial, coronal and sagittal slices are captured.\nSagittal can be capture from the left (default) or the right.\n\nFor the --roi argument: If 1 value is provided, the ROI will be white,\nif 4 values are provided, the ROI will be colored with the RGB values\nprovided, if 5 values are provided, it is RGBA (values from 0-255).", "help": "usage: scil_viz_bundle_screenshot_mni.py [-h]\n [--target_template TARGET_TEMPLATE]\n [--local_coloring | --uniform_coloring R G B | --reference_coloring COLORBAR]\n [--roi ROI [ROI ...]] [--right]\n [--anat_opacity ANAT_OPACITY]\n [--output_suffix OUTPUT_SUFFIX]\n [--out_dir OUT_DIR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle in_anat\n\nRegister bundle to a template for screenshots using a reference.\nThe template can be any MNI152 (any resolution, cropped or not)\nIf your in_anat has a skull, select a MNI152 template with a skull and\nvice-versa.\n\nIf the bundle is already in MNI152 space, do not use --target_template.\n\nAxial, coronal and sagittal slices are captured.\nSagittal can be capture from the left (default) or the right.\n\nFor the --roi argument: If 1 value is provided, the ROI will be white,\nif 4 values are provided, the ROI will be colored with the RGB values\nprovided, if 5 values are provided, it is RGBA (values from 0-255).\n\npositional arguments:\n in_bundle Path of the input bundle.\n in_anat Path of the reference file (.nii or nii.gz).\n\noptions:\n -h, --help show this help message and exit\n --target_template TARGET_TEMPLATE\n Path to the target MNI152 template for registration. If in_anat has a skull, select a MNI152 template with a skull and vice-versa.\n --local_coloring Color streamlines using local segments orientation.\n --uniform_coloring R G B\n Color streamlines with uniform coloring.\n --reference_coloring COLORBAR\n Color streamlines with reference coloring (0-255).\n --roi ROI [ROI ...] Path to a ROI file (.nii or nii.gz).\n --right Take screenshot from the right instead of the left for the sagittal plane.\n --anat_opacity ANAT_OPACITY\n Set the opacity for the anatomy, use 0 for complete transparency, 1 for opaque. [0.3]\n --output_suffix OUTPUT_SUFFIX\n Add a suffix to the output, else the axis name is used.\n --out_dir OUT_DIR Put all images in a specific directory.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "anatomy", + "anatomy" + ], + [ + "orientation", + "orientation" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "question", + "argument" + ], + [ + "order", + "set" + ], + [ + "left", + "left" + ], + [ + "specific", + "specific" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "space", + "space" + ], + [ + "held", + "in" + ], + [ + "blue", + "colored" + ], + [ + "clear", + "order", + "step", + "work", + "take" + ], + [ + "action", + "clear", + "considered", + "future", + "matter", + "possibility", + "potential", + "question", + "result", + "specific", + "any" + ], + [ + "long", + "with" + ], + [ + "axial", + "axial" + ], + [ + "clear", + "left", + "work", + "put" + ], + [ + "methods", + "using" + ], + [ + "order", + "work", + "instead" + ], + [ + "left", + "right" + ], + [ + "sagittal", + "sagittal" + ], + [ + "streamlines", + "streamlines" + ], + [ + "matter", + "thinking", + "else" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "blue", + "green", + "red", + "white", + "white" + ], + [ + "planes", + "plane" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "coronal", + "coronal" + ], + [ + "bundles", + "bundle" + ] + ], "keywords": [] }, { "name": "scil_viz_bundle_screenshot_mosaic", "docstring": "Visualize bundles from a list. The script will output a mosaic (image) with\nscreenshots, 6 views per bundle in the list.", "help": "usage: scil_viz_bundle_screenshot_mosaic.py [-h] [--uniform_coloring R G B]\n [--random_coloring SEED]\n [--zoom ZOOM] [--ttf TTF]\n [--ttf_size TTF_SIZE]\n [--opacity_background OPACITY_BACKGROUND]\n [--resolution_of_thumbnails RESOLUTION_OF_THUMBNAILS]\n [--light_screenshot]\n [--no_information]\n [--no_bundle_name]\n [--no_streamline_number]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_volume in_bundles\n [in_bundles ...] out_image\n\nVisualize bundles from a list. The script will output a mosaic (image) with\nscreenshots, 6 views per bundle in the list.\n\npositional arguments:\n in_volume Volume used as background (e.g. T1, FA, b0).\n in_bundles List of tractography files supported by nibabel or binary mask files.\n out_image Name of the output image mosaic (e.g. mosaic.jpg, mosaic.png).\n\noptions:\n -h, --help show this help message and exit\n --uniform_coloring R G B\n Assign an uniform color to streamlines (or ROIs).\n --random_coloring SEED\n Assign a random color to streamlines (or ROIs).\n --zoom ZOOM Rendering zoom. A value greater than 1 is a zoom-in,\n a value less than 1 is a zoom-out [1.0].\n --ttf TTF Path of the true type font to use for legends.\n --ttf_size TTF_SIZE Font size (int) to use for the legends [35].\n --opacity_background OPACITY_BACKGROUND\n Opacity of background image, between 0 and 1.0 [0.4].\n --resolution_of_thumbnails RESOLUTION_OF_THUMBNAILS\n Resolution of thumbnails used in mosaic [300].\n --light_screenshot Keep only 3 views instead of 6 [False].\n --no_information Don't display axis and bundle information [False].\n --no_bundle_name Don't display bundle name [False].\n --no_streamline_number\n Don't display bundle streamlines number [False].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "anatomy", + "anatomy" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "bundles", + "bundles" + ], + [ + "average", + "per" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "rendered", + "rendering", + "rendering" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "long", + "a" + ], + [ + "clear", + "left", + "out" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "supported", + "support" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "future", + "will" + ], + [ + "random", + "random" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "order", + "work", + "instead" + ], + [ + "long", + "than" + ], + [ + "streamlines", + "streamlines" + ], + [ + "highly", + "less" + ], + [ + "meaning", + "true", + "true" + ], + [ + "supported", + "supported" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "view", + "views", + "views" + ], + [ + "larger", + "size", + "size" + ], + [ + "false", + "false" + ], + [ + "bundles", + "bundle" + ], + [ + "greater", + "greater" + ] + ], "keywords": [] }, { "name": "scil_viz_connectivity", "docstring": "Script to display a connectivity matrix and adjust the desired visualization.\nMade to work with scil_tractogram_segment_bundles_for_connectivity.py and\nscil_connectivity_reorder_rois.py.\n\nThis script can either display the axis labels as:\n- Coordinates (0..N)\n- Labels (using --labels_list)\n- Names (using --labels_list and --lookup_table)\nExamples of labels_list.txt and lookup_table.json can be found in the\nfreesurfer_flow output (https://github.com/scilus/freesurfer_flow)\n\nIf the matrix was made from a bigger matrix using\nscil_connectivity_reorder_rois.py, provide the text file(s), using\n--labels_list and/or --reorder_txt.\n\nThe chord chart is always displaying parting in the order they are defined\n(clockwise), the color is attributed in that order following a colormap. The\nthickness of the line represent the 'size/intensity', the greater the value is\nthe thicker the line will be. In order to hide the low values, two options are\navailable:\n- Angle threshold + alpha, any connections with a small angle on the chord\n chart will be slightly transparent to increase the focus on bigger\n connections.\n- Percentile, hide any connections with a value below that percentile", "help": "", - "synonyms": [], + "synonyms": [ + [ + "true", + "always" + ], + [ + "large", + "larger", + "small" + ], + [ + "work", + "and" + ], + [ + "result", + "following" + ], + [ + "considered", + "are" + ], + [ + "order", + "order" + ], + [ + "work", + "working", + "work" + ], + [ + "left", + "from" + ], + [ + "clear", + "long", + "work", + "they" + ], + [ + "long", + "a" + ], + [ + "unique", + "variety", + "examples" + ], + [ + "held", + "on" + ], + [ + "represent", + "represent" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "action", + "clear", + "considered", + "future", + "matter", + "possibility", + "potential", + "question", + "result", + "specific", + "any" + ], + [ + "long", + "with" + ], + [ + "create", + "provide" + ], + [ + "methods", + "using" + ], + [ + "larger", + "bigger" + ], + [ + "defined", + "function", + "defined" + ], + [ + "matrices", + "matrix" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "considered", + "subsequently", + "was" + ], + [ + "high", + "higher", + "level", + "low" + ], + [ + "connect", + "connected", + "connection", + "connections", + "connections" + ], + [ + "larger", + "size", + "size" + ], + [ + "long", + "two" + ], + [ + "clear", + "work", + "made" + ], + [ + "connectivity", + "connectivity" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "higher", + "increase", + "total", + "increase" + ], + [ + "attention", + "future", + "work", + "focus" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "greater", + "greater" + ] + ], "keywords": [] }, { "name": "scil_viz_dti_screenshot", "docstring": "Register DWI to a template for screenshots.\nThe templates are on http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009\n\nFor quick quality control, the MNI template can be downsampled to 2mm iso.\nAxial, coronal and sagittal slices are captured.", "help": "usage: scil_viz_dti_screenshot.py [-h] [--shells SHELLS [SHELLS ...]]\n [--out_suffix OUT_SUFFIX]\n [--out_dir OUT_DIR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_template\n\nRegister DWI to a template for screenshots.\nThe templates are on http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009\n\nFor quick quality control, the MNI template can be downsampled to 2mm iso.\nAxial, coronal and sagittal slices are captured.\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bval file, in FSL format.\n in_bvec Path of the bvec file, in FSL format.\n in_template Path to the target MNI152 template for \n registration, use the one provided online.\n\noptions:\n -h, --help show this help message and exit\n --shells SHELLS [SHELLS ...]\n Shells to use for DTI fit (usually below 1200), b0 must be listed.\n --out_suffix OUT_SUFFIX\n Add a suffix to the output, else the axis name is used.\n --out_dir OUT_DIR Put all images in a specific directory.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "volume", + "volumes", + "volume" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "specific", + "specific" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "axial", + "axial" + ], + [ + "clear", + "left", + "work", + "put" + ], + [ + "methods", + "using" + ], + [ + "sagittal", + "sagittal" + ], + [ + "traditionally", + "usually" + ], + [ + "matter", + "thinking", + "else" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "coronal", + "coronal" + ], + [ + "lack", + "quality" + ] + ], "keywords": [] }, { "name": "scil_viz_fodf", "docstring": "Visualize 2-dimensional fODF slice loaded from disk.\n\nGiven an image of SH coefficients, this script displays a slice in a\ngiven orientation. The user can also add a background on top of which the\nfODF are to be displayed. Using a full SH basis, the script can be used to\nvisualize asymmetric fODF. The user can supply a peaks image to visualize\npeaks on top of fODF.\n\nIf a transparency_mask is given (e.g. a brain mask), all values outside the\nmask non-zero values are set to full transparency in the saved scene.\n\n!!! CAUTION !!! The script is memory intensive about (9kB of allocated RAM per\nvoxel, or 9GB for a 1M voxel volume) with a sphere interpolated to 362 points.", "help": "usage: scil_viz_fodf.py [-h] [--slice_index SLICE_INDEX]\n [--win_dims WIDTH HEIGHT]\n [--interactor {image,trackball}]\n [--axis_name {sagittal,coronal,axial}] [--silent]\n [--in_transparency_mask IN_TRANSPARENCY_MASK]\n [--output OUTPUT] [-f]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--sphere {repulsion200,repulsion100,symmetric724,symmetric642,symmetric362,repulsion724}]\n [--sph_subdivide SPH_SUBDIVIDE] [--mask MASK]\n [--colormap COLORMAP | --color_rgb COLOR_RGB COLOR_RGB COLOR_RGB]\n [--scale SCALE] [--radial_scale_off] [--norm_off]\n [-v [{DEBUG,INFO,WARNING}]] [--background BACKGROUND]\n [--bg_range MIN MAX] [--bg_opacity BG_OPACITY]\n [--bg_offset BG_OFFSET]\n [--bg_interpolation {nearest,linear}]\n [--bg_color BG_COLOR BG_COLOR BG_COLOR]\n [--peaks PEAKS]\n [--peaks_color PEAKS_COLOR PEAKS_COLOR PEAKS_COLOR]\n [--peaks_width PEAKS_WIDTH]\n [--peaks_values PEAKS_VALUES | --peaks_length PEAKS_LENGTH]\n [--variance VARIANCE] [--variance_k VARIANCE_K]\n [--var_color VAR_COLOR VAR_COLOR VAR_COLOR]\n in_fodf\n\nVisualize 2-dimensional fODF slice loaded from disk.\n\nGiven an image of SH coefficients, this script displays a slice in a\ngiven orientation. The user can also add a background on top of which the\nfODF are to be displayed. Using a full SH basis, the script can be used to\nvisualize asymmetric fODF. The user can supply a peaks image to visualize\npeaks on top of fODF.\n\nIf a transparency_mask is given (e.g. a brain mask), all values outside the\nmask non-zero values are set to full transparency in the saved scene.\n\n!!! CAUTION !!! The script is memory intensive about (9kB of allocated RAM per\nvoxel, or 9GB for a 1M voxel volume) with a sphere interpolated to 362 points.\n\npositional arguments:\n in_fodf Input SH image file.\n\noptions:\n -h, --help show this help message and exit\n --slice_index SLICE_INDEX\n Index of the slice to visualize along a given axis. Defaults to middle of volume.\n --win_dims WIDTH HEIGHT\n The dimensions for the vtk window. [(768, 768)]\n --interactor {image,trackball}\n Specify interactor mode for vtk window. [trackball]\n --axis_name {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n --silent Disable interactive visualization.\n --in_transparency_mask IN_TRANSPARENCY_MASK\n Input mask image file.\n --output OUTPUT Path to output file.\n -f Force overwriting of the output files.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --sphere {repulsion200,repulsion100,symmetric724,symmetric642,symmetric362,repulsion724}\n Name of the sphere used to reconstruct SF. [symmetric362]\n --sph_subdivide SPH_SUBDIVIDE\n Number of subdivisions for given sphere. If not supplied, use the given sphere as is.\n --mask MASK Optional mask file. Only fODF inside the mask are displayed.\n --colormap COLORMAP Colormap for the ODF slicer. If None, then a RGB colormap will be used. [None]\n --color_rgb COLOR_RGB COLOR_RGB COLOR_RGB\n Uniform color for the ODF slicer given as RGB, scaled between 0 and 1. [None]\n --scale SCALE Scaling factor for FODF. [0.5]\n --radial_scale_off Disable radial scale for ODF slicer.\n --norm_off Disable normalization of ODF slicer.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nBackground arguments:\n --background BACKGROUND\n Background image file. If RGB, values must be between 0 and 255.\n --bg_range MIN MAX The range of values mapped to range [0, 1] for background image. [(bg.min(), bg.max())]\n --bg_opacity BG_OPACITY\n The opacity of the background image. Opacity of 0.0 means transparent and 1.0 is completely visible. [1.0]\n --bg_offset BG_OFFSET\n The offset of the background image. [0.5]\n --bg_interpolation {nearest,linear}\n Interpolation mode for the background image. [nearest]\n --bg_color BG_COLOR BG_COLOR BG_COLOR\n The color of the overall background, behind everything. Must be RGB values scaled between 0 and 1. [(0, 0, 0)]\n\nPeaks arguments:\n --peaks PEAKS Peaks image file.\n --peaks_color PEAKS_COLOR PEAKS_COLOR PEAKS_COLOR\n Color used for peaks, as RGB values scaled between 0 and 1. If None, then a RGB colormap is used. [None]\n --peaks_width PEAKS_WIDTH\n Width of peaks segments. [1.0]\n\nPeaks scaling arguments:\n Choose between peaks values and arbitrary length.\n\n --peaks_values PEAKS_VALUES\n Peaks values file.\n --peaks_length PEAKS_LENGTH\n Length of the peaks segments. [0.65]\n\nVariance arguments:\n For the visualization of fodf uncertainty, the variance is used as follow: mean + k * sqrt(variance), where mean is the input fodf (in_fodf) and k is the scaling factor (variance_k).\n\n --variance VARIANCE FODF variance file.\n --variance_k VARIANCE_K\n Scaling factor (k) for the computation of the fodf uncertainty. [1]\n --var_color VAR_COLOR VAR_COLOR VAR_COLOR\n Color of variance outline. Must be RGB values scaled between 0 and 1. [(1, 1, 1)]\n", - "synonyms": [], + "synonyms": [ + [ + "application", + "database", + "user" + ], + [ + "total", + "number" + ], + [ + "memory", + "memory" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "average", + "per" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "orientation", + "orientation" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "considered", + "are" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "papers", + "paper" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "process", + "implementation" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "axial", + "axial" + ], + [ + "methods", + "using" + ], + [ + "thinking", + "everything" + ], + [ + "area", + "main", + "along" + ], + [ + "sagittal", + "sagittal" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "variance", + "variance" + ], + [ + "step", + "follow" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "middle", + "middle" + ], + [ + "area", + "work", + "where" + ], + [ + "left", + "behind" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "work", + "all" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "coronal", + "coronal" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "meaning", + "order", + "result", + "step", + "true", + "means" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "long", + "full" + ], + [ + "increase", + "offset" + ] + ], "keywords": [] }, { "name": "scil_viz_gradients_screenshot", "docstring": "Vizualisation for directions on a sphere, either from a gradient sampling (i.e.\na list of b-vectors) or from a Dipy sphere.", "help": "usage: scil_viz_gradients_screenshot.py [-h]\n (--in_gradient_scheme IN_GRADIENT_SCHEME [IN_GRADIENT_SCHEME ...] | --dipy_sphere {symmetric362,symmetric642,symmetric724,repulsion724,repulsion100,repulsion200})\n [--dis-sym]\n [--out_basename OUT_BASENAME]\n [--res RES] [--dis-sphere]\n [--dis-proj] [--plot_shells]\n [--same-color] [--opacity OPACITY]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n\nVizualisation for directions on a sphere, either from a gradient sampling (i.e.\na list of b-vectors) or from a Dipy sphere.\n\noptions:\n -h, --help show this help message and exit\n --in_gradient_scheme IN_GRADIENT_SCHEME [IN_GRADIENT_SCHEME ...]\n Gradient sampling filename. (only accepts .bvec and\n .bval together or only .b).\n --dipy_sphere {symmetric362,symmetric642,symmetric724,repulsion724,repulsion100,repulsion200}\n Dipy sphere choice.\n --dis-sym Disable antipodal symmetry.\n --out_basename OUT_BASENAME\n Output file name picture without extension (will be\n png file(s)).\n --res RES Resolution of the output picture(s).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n\nEnable/Disable renderings.:\n --dis-sphere Disable the rendering of the sphere.\n --dis-proj Disable rendering of the projection supershell.\n --plot_shells Enable rendering each shell individually.\n\nRendering options.:\n --same-color Use same color for all shell.\n --opacity OPACITY Opacity for the shells.\n", - "synonyms": [], + "synonyms": [ + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "working", + "together" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "rendered", + "rendering", + "rendering" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "direction", + "directions" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "thinking", + "i" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "methods", + "using" + ], + [ + "projection", + "projection" + ], + [ + "considered", + "is" + ], + [ + "image", + "picture" + ], + [ + "individual", + "each" + ], + [ + "work", + "all" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_viz_tractogram_seeds", "docstring": "Visualize seeds used to generate the tractogram or bundle.\nWhen tractography was run, each streamline produced by the tracking algorithm\nsaved its seeding point (its origin).\n\nThe tractogram must have been generated from scil_tracking_local.py or\nscil_tracking_pft.py with the --save_seeds option.", "help": "usage: scil_viz_tractogram_seeds.py [-h] [--save SAVE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n tractogram\n\nVisualize seeds used to generate the tractogram or bundle.\nWhen tractography was run, each streamline produced by the tracking algorithm\nsaved its seeding point (its origin).\n\nThe tractogram must have been generated from scil_tracking_local.py or\nscil_tracking_pft.py with the --save_seeds option.\n\npositional arguments:\n tractogram Tractogram file (must be trk)\n\noptions:\n -h, --help show this help message and exit\n --save SAVE If set, save a screenshot of the result in the specified filename\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "create", + "generate" + ], + [ + "work", + "and" + ], + [ + "difference", + "point" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "lack", + "loss", + "result", + "result" + ], + [ + "seeding", + "seeding" + ], + [ + "order", + "set" + ], + [ + "long", + "have" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "parameters", + "specified" + ], + [ + "considered", + "involved", + "work", + "been" + ], + [ + "algorithm", + "algorithm" + ], + [ + "methods", + "using" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "considered", + "subsequently", + "was" + ], + [ + "individual", + "each" + ], + [ + "streamline", + "streamline" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "bundles", + "bundle" + ], + [ + "data", + "tracking", + "tracking" + ] + ], "keywords": [] }, { "name": "scil_viz_tractogram_seeds_3d", "docstring": "Visualize seeds as 3D points, with heatmaps corresponding to seed density\n\nExample usages:\n\n>>> scil_viz_tractogram_seeds_3d.py seeds.nii.gz --tractogram tractogram.trk", "help": "usage: scil_viz_tractogram_seeds_3d.py [-h] [--tractogram TRACTOGRAM]\n [--colormap COLORMAP]\n [--seed_opacity SEED_OPACITY]\n [--tractogram_shape {line,tube}]\n [--tractogram_opacity TRACTOGRAM_OPACITY]\n [--tractogram_width TRACTOGRAM_WIDTH]\n [--tractogram_color R G B [R G B ...]]\n [--background R G B [R G B ...]]\n [-v [{DEBUG,INFO,WARNING}]]\n in_seed_map\n\nVisualize seeds as 3D points, with heatmaps corresponding to seed density\n\nExample usages:\n\n>>> scil_viz_tractogram_seeds_3d.py seeds.nii.gz --tractogram tractogram.trk\n\npositional arguments:\n in_seed_map Seed density map.\n\noptions:\n -h, --help show this help message and exit\n --tractogram TRACTOGRAM\n Tractogram coresponding to the seeds.\n --colormap COLORMAP Name of the map for the density coloring. Can be any colormap that matplotlib offers.\n [Default: bone]\n --seed_opacity SEED_OPACITY\n Opacity of the contour generated.\n [Default: 0.5]\n --tractogram_shape {line,tube}\n Display streamlines either as lines or tubes.\n [Default: tube]\n --tractogram_opacity TRACTOGRAM_OPACITY\n Opacity of the streamlines.\n [Default: 0.5]\n --tractogram_width TRACTOGRAM_WIDTH\n Width of tubes or lines representing streamlines.\n [Default: 0.05]\n --tractogram_color R G B [R G B ...]\n Color for the tractogram.\n --background R G B [R G B ...]\n RBG values [0, 255] of the color of the background.\n [Default: [0, 0, 0]]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "meaning", + "name" + ], + [ + "represent", + "representing" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "action", + "clear", + "considered", + "future", + "matter", + "possibility", + "potential", + "question", + "result", + "specific", + "any" + ], + [ + "long", + "with" + ], + [ + "methods", + "using" + ], + [ + "streamlines", + "streamlines" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "maps", + "map" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_viz_volume_histogram", "docstring": "Script to display a histogram of a metric (FA, MD, etc.) from a binary mask\n(wm mask, vascular mask, ect.).\nThese two images must be coregister with each other.\n\n>>> scil_viz_volume_histogram.py metric.nii.gz mask_bin.nii.gz 8\n out_filename_image.png", "help": "usage: scil_viz_volume_histogram.py [-h] [--title TITLE] [--x_label X_LABEL]\n [--colors COLORS] [--show_only]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_metric in_mask n_bins out_png\n\nScript to display a histogram of a metric (FA, MD, etc.) from a binary mask\n(wm mask, vascular mask, ect.).\nThese two images must be coregister with each other.\n\n>>> scil_viz_volume_histogram.py metric.nii.gz mask_bin.nii.gz 8\n out_filename_image.png\n\npositional arguments:\n in_metric Metric map ex : FA, MD,... .\n in_mask Binary mask data to extract value.\n n_bins Number of bins to use for the histogram.\n out_png Output filename for the figure.\n\noptions:\n -h, --help show this help message and exit\n --show_only Do not save the figure, only display.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nHistogram options:\n --title TITLE Use the provided info for the histogram title. [Histogram]\n --x_label X_LABEL Use the provided info for the x axis name.\n --colors COLORS Use the provided info for the bars color. [#0504aa]\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "long", + "a" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "long", + "with" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "variety", + "work", + "other" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "individual", + "each" + ], + [ + "long", + "two" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "maps", + "map" + ] + ], "keywords": [] }, { "name": "scil_viz_volume_scatterplot", "docstring": "Script to display scatter plot between two maps (ex. FA and MD, ihMT and MT).\nBy default, no mask is applied to the data.\nDifferent options are available to mask or threshold data:\n - a binary mask\n - two probability maps, which can be used to threshold maps with\n --in_prob_maps. A same threshold is applied on these two maps (--thr).\n - parcellation, which can be used to plot values for each region of\n an atlas (--in_atlas) or a subset of regions (--specific_label).\n Atlas option required a json file (--atlas_lut) with indices and\n names of each label corresponding to the atlas as following:\n \"1\": \"lh_A8m\",\n \"2\": \"rh_A8m\",\n The numbers must be corresponding to the label indices in the json file.\n\nBe careful, you can not use all of them at the same time.\n\nFor general scatter plot without mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n\nFor scatter plot with mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_bin_mask mask_wm.nii.gz\n\nFor tissue probability scatter plot:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --prob_maps wm_map.nii.gz gm_map.nii.gz\n\nFor scatter plot using atlas:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n --specific_label 34 67 87", "help": "usage: scil_viz_volume_scatterplot.py [-h] [--out_dir OUT_DIR] [--thr THR]\n [--not_exclude_zero]\n [--in_bin_mask IN_BIN_MASK | --in_prob_maps IN_PROB_MAPS IN_PROB_MAPS | --in_atlas IN_ATLAS]\n [--atlas_lut ATLAS_LUT]\n [--specific_label SPECIFIC_LABEL [SPECIFIC_LABEL ...]]\n [--in_folder] [--title TITLE]\n [--x_label X_LABEL] [--y_label Y_LABEL]\n [--label LABEL]\n [--label_prob LABEL_PROB]\n [--marker MARKER]\n [--marker_size MARKER_SIZE]\n [--transparency TRANSPARENCY]\n [--dpi DPI] [--colors color1 color2]\n [--show_only]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_x_map in_y_map out_name\n\nScript to display scatter plot between two maps (ex. FA and MD, ihMT and MT).\nBy default, no mask is applied to the data.\nDifferent options are available to mask or threshold data:\n - a binary mask\n - two probability maps, which can be used to threshold maps with\n --in_prob_maps. A same threshold is applied on these two maps (--thr).\n - parcellation, which can be used to plot values for each region of\n an atlas (--in_atlas) or a subset of regions (--specific_label).\n Atlas option required a json file (--atlas_lut) with indices and\n names of each label corresponding to the atlas as following:\n \"1\": \"lh_A8m\",\n \"2\": \"rh_A8m\",\n The numbers must be corresponding to the label indices in the json file.\n\nBe careful, you can not use all of them at the same time.\n\nFor general scatter plot without mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n\nFor scatter plot with mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_bin_mask mask_wm.nii.gz\n\nFor tissue probability scatter plot:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --prob_maps wm_map.nii.gz gm_map.nii.gz\n\nFor scatter plot using atlas:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n --specific_label 34 67 87\n\npositional arguments:\n in_x_map Map in x axis, FA for example.\n in_y_map Map in y axis, MD for example.\n out_name Output filename for the figure without extension.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Output directory to save scatter plot.\n --thr THR Use to apply threshold only on probability maps (same for both map) with --in_prob_maps option. [0.9]\n --not_exclude_zero Keep zero value in data.\n --in_bin_mask IN_BIN_MASK\n Binary mask. Use this option to extract x and y maps value from specific mask or region: wm_mask or roi_mask for example.\n --in_prob_maps IN_PROB_MAPS IN_PROB_MAPS\n Probability maps, WM and GW for example.\n --in_atlas IN_ATLAS Path to the input atlas image.\n --show_only Do not save the figure, only display. Not avalaible with --in_atlas option.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nAtlas options:\n --atlas_lut ATLAS_LUT\n Path of the LUT file corresponding to atlas used to name the regions of interest.\n --specific_label SPECIFIC_LABEL [SPECIFIC_LABEL ...]\n Label list to use to do scatter plot. Label must corresponding to atlas LUT file. [None]\n --in_folder Save label plots in subfolder \"Label_plots\".\n\nScatter plot options:\n --title TITLE Use the provided info for the title name. [Scatter Plot]\n --x_label X_LABEL Use the provided info for the x axis name. [x]\n --y_label Y_LABEL Use the provided info for the y axis name. [y]\n --label LABEL Use the provided info for the legend box corresponding to mask or first probability map. [None]\n --label_prob LABEL_PROB\n Use the provided info for the legend box corresponding to the second probability map. [Threshold prob_map 2]\n --marker MARKER Use the provided info for the marker shape. [.]\n --marker_size MARKER_SIZE\n Use the provided info for the marker size. [15]\n --transparency TRANSPARENCY\n Use the provided info for the point transparency. [0.4]\n --dpi DPI Use the provided info for the dpi resolution. [300]\n --colors color1 color2\n", - "synonyms": [], + "synonyms": [ + [ + "individual", + "specific", + "unique", + "variety", + "different" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "order", + "required" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "general", + "general" + ], + [ + "brain", + "tissue" + ], + [ + "applied", + "applied" + ], + [ + "result", + "following" + ], + [ + "considered", + "are" + ], + [ + "probability", + "probability" + ], + [ + "difference", + "point" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "areas", + "region", + "regions", + "regions" + ], + [ + "meaning", + "name" + ], + [ + "methods", + "use" + ], + [ + "thinking", + "you" + ], + [ + "specific", + "specific" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "applied", + "apply" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "higher", + "interest" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "long", + "result", + "work", + "working", + "time" + ], + [ + "atlas", + "atlas" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "future", + "result", + "specific", + "variety", + "work", + "these" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "individual", + "each" + ], + [ + "larger", + "size", + "size" + ], + [ + "long", + "two" + ], + [ + "work", + "all" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "maps", + "maps" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "area", + "areas", + "region", + "regions", + "region" + ], + [ + "maps", + "map" + ], + [ + "shape", + "structure", + "shape" + ] + ], "keywords": [] }, { "name": "scil_viz_volume_screenshot", "docstring": "Take screenshot(s) of one or more slices in a given image volume along the\nrequested axis. If slice indices are not provided, all slices in the volume\nare used. The name of the output images are suffixed with _slice_{id}, with\nid being the slice number in the volume. If a labelmap image is provided (e.g.\na tissue segmentation map), it is overlaid on the volume slices. Same goes if\na mask is provided, with the difference that it can be rendered as a\ntransparency overlay as well as a contour.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nExample:\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_annotated.png\n --display_slice_number --display_lr\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_masked.png\n --transparency brainmask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial.png\n --slices 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_sagittal.png --axis sagittal\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_plasma_cmap.png\n --slices 30 40 50 60 70 80 90 100 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_overlay.png\n --slices 30 40 50 60 70 80 90 100 --overlays brain_mask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_contour.png\n --slices 30 40 50 60 70 80 90 100\n --overlays brain_mask.nii.gz --overlays_as_contours\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_map.png\n --slices 30 40 50 60 70 80 90 100 --labelmap tissue_map.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_viridis_cmap.png\n --slices 30 40 50 60 70 80 90 100\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_peaks.png\n --slices 30 40 50 60 70 80 90 100 --peaks peaks.nii.gz --volume_opacity 0.5", "help": "usage: scil_viz_volume_screenshot.py [-h]\n [--volume_cmap_name VOLUME_CMAP_NAME]\n [--volume_opacity VOLUME_OPACITY]\n [--transparency TRANSPARENCY]\n [--slices SID [SID ...]]\n [--axis {sagittal,coronal,axial}]\n [--size WIDTH HEIGHT]\n [--display_slice_number] [--display_lr]\n [--labelmap LABELMAP]\n [--labelmap_cmap_name LABELMAP_CMAP_NAME]\n [--labelmap_opacity LABELMAP_OPACITY]\n [--overlays OVERLAYS [OVERLAYS ...]]\n [--overlays_as_contours]\n [--overlays_colors R G B [R G B ...]]\n [--overlays_opacity OVERLAYS_OPACITY]\n [--peaks PEAKS [PEAKS ...]]\n [--peaks_width PEAKS_WIDTH]\n [--peaks_opacity PEAKS_OPACITY]\n [-v [{DEBUG,INFO,WARNING}]]\n volume out_fname\n\nTake screenshot(s) of one or more slices in a given image volume along the\nrequested axis. If slice indices are not provided, all slices in the volume\nare used. The name of the output images are suffixed with _slice_{id}, with\nid being the slice number in the volume. If a labelmap image is provided (e.g.\na tissue segmentation map), it is overlaid on the volume slices. Same goes if\na mask is provided, with the difference that it can be rendered as a\ntransparency overlay as well as a contour.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nExample:\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_annotated.png\n --display_slice_number --display_lr\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_masked.png\n --transparency brainmask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial.png\n --slices 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_sagittal.png --axis sagittal\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_plasma_cmap.png\n --slices 30 40 50 60 70 80 90 100 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_overlay.png\n --slices 30 40 50 60 70 80 90 100 --overlays brain_mask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_contour.png\n --slices 30 40 50 60 70 80 90 100\n --overlays brain_mask.nii.gz --overlays_as_contours\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_map.png\n --slices 30 40 50 60 70 80 90 100 --labelmap tissue_map.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_viridis_cmap.png\n --slices 30 40 50 60 70 80 90 100\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_peaks.png\n --slices 30 40 50 60 70 80 90 100 --peaks peaks.nii.gz --volume_opacity 0.5\n\npositional arguments:\n volume Input 3D Nifti file (.nii/.nii.gz).\n out_fname Name of the output image(s). If multiple slices are provided (or none), their index will be append to the name (e.g. volume.jpg, volume.png becomes volume_slice_0.jpg, volume_slice_0.png).\n\noptions:\n -h, --help show this help message and exit\n --transparency TRANSPARENCY\n Transparency Nifti image (.nii/.nii.gz). Can either be a binary mask or a scalar image in the range [0, 1].\n --size WIDTH HEIGHT Size of the output image. [(768, 768)]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nSlicing:\n --slices SID [SID ...]\n Slice indices to screenshot. If None are supplied, all slices inside the transparency mask are selected.\n --axis {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n\nInput overlays:\n --labelmap LABELMAP Input labelmap file (.nii/.nii.gz).\n --overlays OVERLAYS [OVERLAYS ...]\n 3D Nifti image(s) to overlay (.nii/.nii.gz).\n --peaks PEAKS [PEAKS ...]\n Peaks Nifti image (.nii/.nii.gz).\n\nVolume rendering:\n --volume_cmap_name VOLUME_CMAP_NAME\n Colormap name for the 3D Nifti image data. [None]\n --volume_opacity VOLUME_OPACITY\n Opacity value for the 3D Nifti image data. [1.0]\n --labelmap_cmap_name LABELMAP_CMAP_NAME\n Colormap name for the labelmap image data. [viridis]\n --labelmap_opacity LABELMAP_OPACITY\n Opacity value for the labelmap image data. [0.5]\n\nPeaks rendering:\n --peaks_width PEAKS_WIDTH\n Width of the peaks lines. [3.0]\n --peaks_opacity PEAKS_OPACITY\n Opacity value for the peaks overlay. [1.0]\n\nOverlay rendering:\n --overlays_as_contours\n Display overlays contours and reduce the opacity of their inner region (see the `--overlays_opacity` argument).\n --overlays_colors R G B [R G B ...]\n Colors for the overlays or contours. You may provide a single color, for all overlays/contours, or one color for each. Each color is given as three values: R G B\n --overlays_opacity OVERLAYS_OPACITY\n Opacity value for the masks overlays. When combined with `--overlays_as_contours`, this will be the opacity of the region inside the computed contours. [0.5]\n\nAnnotations:\n --display_slice_number\n If true, displays the slice number in the upper left corner.\n --display_lr If true, add left and right annotations to the images.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "total", + "90" + ], + [ + "work", + "and" + ], + [ + "brain", + "tissue" + ], + [ + "considered", + "are" + ], + [ + "considered", + "involved", + "being" + ], + [ + "long", + "work", + "more" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "image", + "images" + ], + [ + "meaning", + "name" + ], + [ + "rendered", + "rendering", + "rendering" + ], + [ + "rendered", + "rendering", + "rendered" + ], + [ + "question", + "argument" + ], + [ + "left", + "left" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "total", + "40" + ], + [ + "total", + "70" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "clear", + "order", + "step", + "work", + "take" + ], + [ + "long", + "with" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "total", + "50" + ], + [ + "axial", + "axial" + ], + [ + "create", + "provide" + ], + [ + "difference", + "difference" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "left", + "right" + ], + [ + "area", + "main", + "along" + ], + [ + "sagittal", + "sagittal" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "meaning", + "true", + "true" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "result", + "may" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "larger", + "size", + "size" + ], + [ + "total", + "combined" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "total", + "60" + ], + [ + "work", + "all" + ], + [ + "total", + "100" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "coronal", + "coronal" + ], + [ + "total", + "80" + ], + [ + "area", + "areas", + "region", + "regions", + "region" + ], + [ + "maps", + "map" + ], + [ + "increase", + "reduce" + ], + [ + "considered", + "experience", + "large", + "long", + "result", + "variety", + "work", + "working", + "well" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ] + ], "keywords": [] }, { "name": "scil_viz_volume_screenshot_mosaic", "docstring": "Compose a mosaic of screenshots of the given image volume slices along the\nrequested axis. The provided transparency mask (e.g. a brain mask volume) is\nused to set the screenshot values outside the mask non-zero values to full\ntransparency. Additionally, if a labelmap image is provided (e.g. a tissue\nsegmentation map), it is overlaid on the volume slices. Also, a series of\nmasks can be provided and will be used to generate contours overlaid on each\nvolume slice.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nThe screenshots are overlapped according to the given factors.\n\nThe mosaic supports either horizontal, vertical or matrix arrangements.\n\nExample:\n>>> scil_viz_volume_screenshot_mosaic.py 1 8 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial.png 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --overlap_factor 0.6 0.5 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 tissues.nii.gz brain_mask.nii.gz\n mosaic_overlap_tissues_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_viridis_cmap.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_contours.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --overlays wm_mask.nii.gz gm_mask.nii.gz csf_mask.nii.gz", "help": "usage: scil_viz_volume_screenshot_mosaic.py [-h]\n [--volume_cmap_name VOLUME_CMAP_NAME]\n [--volume_opacity VOLUME_OPACITY]\n [--axis {sagittal,coronal,axial}]\n [--size WIDTH HEIGHT]\n [--labelmap LABELMAP]\n [--labelmap_cmap_name LABELMAP_CMAP_NAME]\n [--labelmap_opacity LABELMAP_OPACITY]\n [--overlays OVERLAYS [OVERLAYS ...]]\n [--overlays_as_contours]\n [--overlays_colors R G B [R G B ...]]\n [--overlays_opacity OVERLAYS_OPACITY]\n [--overlap rWIDTH rHEIGHT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n rows cols volume transparency\n out_fname SID [SID ...]\n\nCompose a mosaic of screenshots of the given image volume slices along the\nrequested axis. The provided transparency mask (e.g. a brain mask volume) is\nused to set the screenshot values outside the mask non-zero values to full\ntransparency. Additionally, if a labelmap image is provided (e.g. a tissue\nsegmentation map), it is overlaid on the volume slices. Also, a series of\nmasks can be provided and will be used to generate contours overlaid on each\nvolume slice.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nThe screenshots are overlapped according to the given factors.\n\nThe mosaic supports either horizontal, vertical or matrix arrangements.\n\nExample:\n>>> scil_viz_volume_screenshot_mosaic.py 1 8 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial.png 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --overlap_factor 0.6 0.5 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 tissues.nii.gz brain_mask.nii.gz\n mosaic_overlap_tissues_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_viridis_cmap.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_contours.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --overlays wm_mask.nii.gz gm_mask.nii.gz csf_mask.nii.gz\n\npositional arguments:\n rows The mosaic row count.\n cols The mosaic column count.\n volume Input 3D Nifti file (.nii/.nii.gz).\n transparency Transparency Nifti image (.nii/.nii.gz). Can either be a binary mask or a scalar image in the range [0, 1].\n out_fname Name of the output image (e.g. img.jpg, img.png).\n SID Slice indices to screenshot.\n\noptions:\n -h, --help show this help message and exit\n --axis {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n --size WIDTH HEIGHT Size of the output image. [(768, 768)]\n --overlap rWIDTH rHEIGHT\n The overlap factor as a ratio of each image dimension. [(0.6, 0.0)]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nInput overlays:\n --labelmap LABELMAP Input labelmap file (.nii/.nii.gz).\n --overlays OVERLAYS [OVERLAYS ...]\n 3D Nifti image(s) to overlay (.nii/.nii.gz).\n\nVolume rendering:\n --volume_cmap_name VOLUME_CMAP_NAME\n Colormap name for the 3D Nifti image data. [None]\n --volume_opacity VOLUME_OPACITY\n Opacity value for the 3D Nifti image data. [1.0]\n --labelmap_cmap_name LABELMAP_CMAP_NAME\n Colormap name for the labelmap image data. [viridis]\n --labelmap_opacity LABELMAP_OPACITY\n Opacity value for the labelmap image data. [0.5]\n\nOverlay rendering:\n --overlays_as_contours\n Display overlays contours and reduce the opacity of their inner region (see the `--overlays_opacity` argument).\n --overlays_colors R G B [R G B ...]\n Colors for the overlays or contours. You may provide a single color, for all overlays/contours, or one color for each. Each color is given as three values: R G B\n --overlays_opacity OVERLAYS_OPACITY\n Opacity value for the masks overlays. When combined with `--overlays_as_contours`, this will be the opacity of the region inside the computed contours. [0.5]\n", - "synonyms": [], + "synonyms": [ + [ + "volume", + "volumes", + "volume" + ], + [ + "create", + "generate" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "total", + "90" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "brain", + "tissue" + ], + [ + "considered", + "are" + ], + [ + "brain", + "brain" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "contrast", + "difference", + "form", + "result", + "specific", + "subject", + "true", + "unique", + "work", + "example" + ], + [ + "meaning", + "name" + ], + [ + "rendered", + "rendering", + "rendering" + ], + [ + "question", + "argument" + ], + [ + "order", + "set" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "total", + "40" + ], + [ + "total", + "70" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "intersected", + "overlapped" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "total", + "50" + ], + [ + "supported", + "supports" + ], + [ + "axial", + "axial" + ], + [ + "create", + "provide" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "view", + "see" + ], + [ + "area", + "main", + "along" + ], + [ + "sagittal", + "sagittal" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "matrices", + "matrix" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "result", + "may" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "larger", + "size", + "size" + ], + [ + "total", + "combined" + ], + [ + "total", + "60" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "total", + "100" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "coronal", + "coronal" + ], + [ + "based", + "reported", + "according" + ], + [ + "total", + "80" + ], + [ + "area", + "areas", + "region", + "regions", + "region" + ], + [ + "maps", + "map" + ], + [ + "increase", + "reduce" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "long", + "full" + ] + ], "keywords": [] }, { "name": "scil_volume_apply_transform", "docstring": "Transform Nifti (.nii.gz) using an affine/rigid transformation.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_apply_transform_to_image.py.", "help": "usage: scil_volume_apply_transform.py [-h] [--inverse] [--keep_dtype]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file in_target_file in_transfo\n out_name\n\nTransform Nifti (.nii.gz) using an affine/rigid transformation.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_apply_transform_to_image.py.\n\npositional arguments:\n in_file Path of the file to be transformed (nii or nii.gz)\n in_target_file Path of the reference target file (.nii.gz).\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_name Output filename of the transformed data.\n\noptions:\n -h, --help show this help message and exit\n --inverse Apply the inverse transformation.\n --keep_dtype If True, keeps the data_type of the input image (in_file) when saving the output image (out_name).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "long", + "work", + "more" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "methods", + "use" + ], + [ + "held", + "on" + ], + [ + "applied", + "apply" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "matter", + "question", + "thinking", + "true", + "work", + "working", + "how" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "methods", + "using" + ], + [ + "meaning", + "true", + "true" + ], + [ + "matrices", + "matrix" + ], + [ + "step", + "follow" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "connection", + "connections", + "link" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ] + ], "keywords": [] }, { "name": "scil_volume_b0_synthesis", "docstring": "Wrapper for SyNb0 available in Dipy, to run it on a single subject.\nRequires Skull-Strip b0 and t1w images as input, the script will normalize the\nt1w's WM to 110, co-register both images, then register it to the appropriate\ntemplate, run SyNb0 and then transform the result back to the original space.\n\nSyNb0 is a deep learning model that predicts a synthetic a distortion-free\nb0 image from a distorted b0 and T1w.\n\nThis script must be used carefully, as it is meant to be used in an\nenvironment with the following dependencies already installed (not installed by\ndefault in Scilpy):\n- tensorflow-addons\n- tensorrt\n- tensorflow", "help": "usage: scil_volume_b0_synthesis.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_b0 in_b0_mask in_t1 in_t1_mask out_b0\n\nWrapper for SyNb0 available in Dipy, to run it on a single subject.\nRequires Skull-Strip b0 and t1w images as input, the script will normalize the\nt1w's WM to 110, co-register both images, then register it to the appropriate\ntemplate, run SyNb0 and then transform the result back to the original space.\n\nSyNb0 is a deep learning model that predicts a synthetic a distortion-free\nb0 image from a distorted b0 and T1w.\n\nThis script must be used carefully, as it is meant to be used in an\nenvironment with the following dependencies already installed (not installed by\ndefault in Scilpy):\n- tensorflow-addons\n- tensorrt\n- tensorflow\n\npositional arguments:\n in_b0 Input b0 image.\n in_b0_mask Input b0 mask.\n in_t1 Input t1w image.\n in_t1_mask Input t1w mask.\n out_b0 Output b0 image without distortion.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Schilling, Kurt G., et al. \"Synthesized b0 for diffusion distortion\n correction (Synb0-DisCo).\" Magnetic resonance imaging 64 (2019): 62-70.\n", - "synonyms": [], + "synonyms": [ + [ + "work", + "and" + ], + [ + "result", + "following" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "lack", + "loss", + "result", + "result" + ], + [ + "long", + "a" + ], + [ + "total", + "70" + ], + [ + "held", + "on" + ], + [ + "clear", + "order", + "step", + "must" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "lack", + "result", + "without" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "future", + "will" + ], + [ + "experience", + "knowledge", + "learning", + "learning" + ], + [ + "left", + "back" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "space", + "space" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "clear", + "order", + "result", + "meant" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "methods", + "using" + ], + [ + "imaging", + "imaging" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "manner", + "specific", + "appropriate" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "matter", + "question", + "subject", + "subjects", + "subject" + ] + ], "keywords": [] }, { "name": "scil_volume_count_non_zero_voxels", "docstring": "Count the number of non-zero voxels in an image file.\n\nIf you give it an image with more than 3 dimensions, it will summarize the 4th\n(or more) dimension to one voxel, and then find non-zero voxels over this.\nThis means that if there is at least one non-zero voxel in the 4th dimension,\nthis voxel of the 3D volume will be considered as non-zero.\n\nFormerly: scil_count_non_zero_voxels.py", "help": "usage: scil_volume_count_non_zero_voxels.py [-h] [--out OUT_FILE] [--stats]\n [--id VALUE_ID]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n IN_FILE\n\nCount the number of non-zero voxels in an image file.\n\nIf you give it an image with more than 3 dimensions, it will summarize the 4th\n(or more) dimension to one voxel, and then find non-zero voxels over this.\nThis means that if there is at least one non-zero voxel in the 4th dimension,\nthis voxel of the 3D volume will be considered as non-zero.\n\nFormerly: scil_count_non_zero_voxels.py\n\npositional arguments:\n IN_FILE Input file name, in nifti format.\n\noptions:\n -h, --help show this help message and exit\n --out OUT_FILE Name of the output file, which will be saved as a text file.\n --stats If set, output the value using a stats format. Using this synthax will append\n a line to the output file, instead of creating a file with only one line.\n This is useful to create a file to be used as the source of data for a graph.\n Can be combined with --id\n --id VALUE_ID Id of the current count. If used, the value of this argument will be\n output (followed by a \":\") before the count value.\n Mostly useful with --stats.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "long", + "work", + "more" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "create", + "creating" + ], + [ + "meaning", + "name" + ], + [ + "work", + "find" + ], + [ + "order", + "set" + ], + [ + "question", + "argument" + ], + [ + "long", + "over" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "clear", + "left", + "out" + ], + [ + "voxel", + "voxel" + ], + [ + "tool", + "useful" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "result", + "followed" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "create" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "left", + "subsequently", + "then" + ], + [ + "methods", + "using" + ], + [ + "future", + "current" + ], + [ + "order", + "work", + "instead" + ], + [ + "long", + "than" + ], + [ + "left", + "before" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "clear", + "give" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "total", + "combined" + ], + [ + "clear", + "long", + "question", + "result", + "work", + "there" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "meaning", + "order", + "result", + "step", + "true", + "means" + ] + ], "keywords": [] }, { "name": "scil_volume_crop", "docstring": "Crop a volume using a given or an automatically computed bounding box. If a\npreviously computed bounding box file is given, the cropping will be applied\nand the affine fixed accordingly.\n\nWarning: This works well on masked images (like with FSL-Bet) volumes since\nit's looking for non-zero data. Therefore, you should validate the results on\nother types of images that haven't been masked.\n\nFormerly: scil_crop_volume.py", "help": "usage: scil_volume_crop.py [-h] [--ignore_voxel_size]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--input_bbox INPUT_BBOX | --output_bbox OUTPUT_BBOX]\n in_image out_image\n\nCrop a volume using a given or an automatically computed bounding box. If a\npreviously computed bounding box file is given, the cropping will be applied\nand the affine fixed accordingly.\n\nWarning: This works well on masked images (like with FSL-Bet) volumes since\nit's looking for non-zero data. Therefore, you should validate the results on\nother types of images that haven't been masked.\n\nFormerly: scil_crop_volume.py\n\npositional arguments:\n in_image Path of the nifti file to crop.\n out_image Path of the cropped nifti file to write.\n\noptions:\n -h, --help show this help message and exit\n --ignore_voxel_size Ignore voxel size compatibility test between input bounding box and data. Warning, use only if you know what you are doing.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --input_bbox INPUT_BBOX\n Path of the pickle file from which to take the bounding box to crop input file.\n --output_bbox OUTPUT_BBOX\n Path of the pickle file where to write the computed bounding box. (.pickle extension)\n", - "synonyms": [], + "synonyms": [ + [ + "volume", + "volumes", + "volume" + ], + [ + "subsequently", + "previously" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "applied", + "applied" + ], + [ + "considered", + "are" + ], + [ + "considered", + "result", + "therefore" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "matter", + "question", + "thinking", + "true", + "know" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "methods", + "use" + ], + [ + "thinking", + "you" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "clear", + "order", + "step", + "work", + "take" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "clear", + "matter", + "question", + "thinking", + "true", + "view", + "work", + "what" + ], + [ + "considered", + "involved", + "work", + "been" + ], + [ + "methods", + "using" + ], + [ + "variety", + "work", + "other" + ], + [ + "thinking", + "work", + "working", + "doing" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "work", + "works" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "thinking", + "working", + "looking" + ], + [ + "considered", + "is" + ], + [ + "area", + "work", + "where" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "larger", + "size", + "size" + ], + [ + "result", + "results" + ], + [ + "volume", + "volumes", + "volumes" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "forms", + "specific", + "variety", + "types" + ], + [ + "considered", + "experience", + "large", + "long", + "result", + "variety", + "work", + "working", + "well" + ], + [ + "result", + "since" + ] + ], "keywords": [] }, { "name": "scil_volume_flip", "docstring": "Flip the volume according to the specified axis.\n\nFormerly: scil_flip_volume.py", "help": "usage: scil_volume_flip.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image dimension [dimension ...]\n\nFlip the volume according to the specified axis.\n\nFormerly: scil_flip_volume.py\n\npositional arguments:\n in_image Path of the input volume (nifti).\n out_image Path of the output volume (nifti).\n dimension The axes you want to flip. eg: to flip the x and y axes use: x y.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "thinking", + "you" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "work", + "and" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "methods", + "using" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "based", + "reported", + "according" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "is" + ], + [ + "methods", + "use" + ], + [ + "parameters", + "specified" + ] + ], "keywords": [] }, { "name": "scil_volume_math", "docstring": "Performs an operation on a list of images. The supported operations are\nlisted below.\n\nThis script is loading all images in memory, will often crash after a few\nhundred images.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of images.\n> scil_volume_math.py multiplication img.nii.gz 10 mult_10.nii.gz\n\nFormerly: scil_image_math.py", "help": "usage: scil_volume_math.py [-h] [--data_type DATA_TYPE] [--exclude_background]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference,concatenate,dilation,erosion,closing,opening,blur}\n in_args [in_args ...] out_image\n\nPerforms an operation on a list of images. The supported operations are\nlisted below.\n\nThis script is loading all images in memory, will often crash after a few\nhundred images.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of images.\n> scil_volume_math.py multiplication img.nii.gz 10 mult_10.nii.gz\n\nFormerly: scil_image_math.py\n\n lower_threshold: IMG THRESHOLD\n All values below the threshold will be set to zero.\n All values above the threshold will be set to one.\n \n upper_threshold: IMG THRESHOLD\n All values below the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_eq: IMG THRESHOLD\n All values below the threshold will be set to zero.\n All values above or equal the threshold will be set to one.\n \n upper_threshold_eq: IMG THRESHOLD\n All values below or equal the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_otsu: IMG\n All values below or equal to the Otsu threshold will be set to zero.\n All values above the Otsu threshold will be set to one.\n (Otsu's method is an algorithm to perform automatic image thresholding\n of the background.)\n \n upper_threshold_otsu: IMG\n All values below the Otsu threshold will be set to one.\n All values above or equal to the Otsu threshold will be set to zero.\n Equivalent to lower_threshold_otsu followed by an inversion.\n \n lower_clip: IMG THRESHOLD\n All values below the threshold will be set to threshold.\n \n upper_clip: IMG THRESHOLD\n All values above the threshold will be set to threshold.\n \n absolute_value: IMG\n All negative values will become positive.\n \n round: IMG\n Round all decimal values to the closest integer.\n \n ceil: IMG\n Ceil all decimal values to the next integer.\n \n floor: IMG\n Floor all decimal values to the previous integer.\n \n normalize_sum: IMG\n Normalize the image so the sum of all values is one.\n \n normalize_max: IMG\n Normalize the image so the maximum value is one.\n \n log_10: IMG\n Apply a log (base 10) to all non zeros values of an image.\n \n log_e: IMG\n Apply a natural log to all non zeros values of an image.\n \n convert: IMG\n Perform no operation, but simply change the data type.\n \n invert: IMG\n Operation on binary image to interchange 0s and 1s in a binary mask.\n \n addition: IMGs\n Add multiple images together.\n \n subtraction: IMG_1 IMG_2\n Subtract first image by the second (IMG_1 - IMG_2).\n \n multiplication: IMGs\n Multiply multiple images together (danger of underflow and overflow)\n \n division: IMG_1 IMG_2\n Divide first image by the second (danger of underflow and overflow)\n Ignore zeros values, excluded from the operation.\n \n mean: IMGs\n Compute the mean of images.\n If a single 4D image is provided, average along the last dimension.\n \n std: IMGs\n Compute the standard deviation average of multiple images.\n If a single 4D image is provided, compute the STD along the last\n dimension.\n \n correlation: IMGs\n Computes the correlation of the 3x3x3 neighborhood of each voxel, for\n all pair of input images. The final image is the average correlation\n (through all pairs).\n For a given pair of images\n - Background is considered as 0. May lead to very high correlations\n close to the border of the background regions, or very poor ones if the\n background in both images differ.\n - Images are zero-padded. For the same reason as higher, may lead to\n very high correlations if you have data close to the border of the\n image.\n - NaN values (if a voxel's neighborhood is entirely uniform; std 0) are\n replaced by\n - 0 if at least one neighborhood was entirely containing background.\n - 1 if the voxel's neighborhoods are uniform in both images\n - 0 if the voxel's neighborhoods is uniform in one image, but not\n the other.\n\n UPDATE AS OF VERSION 2.0: Random noise was previously added in the\n process to help avoid NaN values. Now replaced by either 0 or 1 as\n explained above.\n \n union: IMGs\n Operation on binary image to keep voxels, that are non-zero, in at\n least one file.\n \n intersection: IMGs\n Operation on binary image to keep the voxels, that are non-zero,\n are present in all files.\n \n difference: IMG_1 IMG_2\n Operation on binary image to keep voxels from the first file that are\n not in the second file (non-zeros).\n \n concatenate: IMGs\n Concatenate a list of 3D and 4D images into a single 4D image.\n \n dilation: IMG, VALUE\n Binary morphological operation to spatially extend the values of an\n image to their neighbors. VALUE is in voxels: an integer > 0.\n \n erosion: IMG, VALUE\n Binary morphological operation to spatially shrink the volume contained\n in a binary image. VALUE is in voxels: an integer > 0.\n \n closing: IMG, VALUE\n Binary morphological operation, dilation followed by an erosion.\n \n opening: IMG, VALUE\n Binary morphological operation, erosion followed by a dilation.\n \n blur: IMG, VALUE\n Apply a gaussian blur to a single image. VALUE is sigma, the standard\n deviation of the Gaussian kernel.\n \n\npositional arguments:\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference,concatenate,dilation,erosion,closing,opening,blur}\n The type of operation to be performed on the images.\n in_args The list of image files or parameters. Refer to each operation's documentation of the expected arguments.\n out_image Output image path.\n\noptions:\n -h, --help show this help message and exit\n --data_type DATA_TYPE\n Data type of the output image. Use the format: \n uint8, int16, int/float32, int/float64.\n --exclude_background Does not affect the background of the original images.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "positive", + "negative" + ], + [ + "memory", + "memory" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "subsequently", + "previously" + ], + [ + "methods", + "method" + ], + [ + "clear", + "left", + "long", + "result", + "work", + "but" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "clear", + "considered", + "highly", + "long", + "matter", + "true", + "work", + "very" + ], + [ + "left", + "into" + ], + [ + "working", + "together" + ], + [ + "long", + "work", + "working", + "now" + ], + [ + "possibility", + "danger" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "areas", + "region", + "regions", + "regions" + ], + [ + "positive", + "positive" + ], + [ + "methods", + "use" + ], + [ + "order", + "set" + ], + [ + "long", + "have" + ], + [ + "thinking", + "you" + ], + [ + "areas", + "neighborhoods" + ], + [ + "high", + "higher", + "increase", + "level", + "higher" + ], + [ + "meaning", + "refer" + ], + [ + "long", + "a" + ], + [ + "action", + "clear", + "future", + "result", + "step", + "change" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "applied", + "apply" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "methods", + "using" + ], + [ + "clear", + "considered", + "form", + "long", + "meaning", + "result", + "true", + "work", + "same" + ], + [ + "clear", + "lack", + "matter", + "possibility", + "question", + "result", + "thinking", + "true", + "reason" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "possibility", + "avoid" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "clear", + "result", + "work", + "that" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "result", + "followed" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "random", + "random" + ], + [ + "traditionally", + "often" + ], + [ + "threshold", + "threshold" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "last" + ], + [ + "held", + "in" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "average", + "average" + ], + [ + "clear", + "long", + "matter", + "result", + "work", + "so" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "considered", + "highly", + "considered" + ], + [ + "difference", + "difference" + ], + [ + "algorithm", + "algorithm" + ], + [ + "binary", + "binary" + ], + [ + "thinking", + "simply" + ], + [ + "variety", + "work", + "other" + ], + [ + "order", + "work", + "instead" + ], + [ + "left", + "after" + ], + [ + "process", + "processes", + "step", + "process" + ], + [ + "area", + "main", + "along" + ], + [ + "increase", + "expected" + ], + [ + "high", + "higher", + "level", + "high" + ], + [ + "considered", + "order", + "result", + "subject", + "given" + ], + [ + "supported", + "supported" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "result", + "may" + ], + [ + "considered", + "subsequently", + "was" + ], + [ + "long", + "result", + "work", + "both" + ], + [ + "individual", + "each" + ], + [ + "matter", + "question", + "does" + ], + [ + "level", + "above" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "considered", + "long", + "work", + "one" + ], + [ + "work", + "all" + ], + [ + "large", + "larger", + "variety", + "work", + "addition" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "clear", + "question", + "result", + "true", + "no" + ], + [ + "anatomical", + "similarity", + "morphological" + ], + [ + "future", + "held", + "step", + "next" + ], + [ + "area", + "neighborhood" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "large", + "work", + "some" + ], + [ + "large", + "long", + "few" + ], + [ + "considered", + "become" + ], + [ + "considered", + "specific", + "variety", + "such" + ] + ], "keywords": [] }, { "name": "scil_volume_remove_outliers_ransac", "docstring": "Remove outliers from image using the RANSAC algorithm.\nThe RANSAC algorithm parameters are sensitive to the input data.\n\nNOTE: Current default parameters are tuned for ad/md/rd images only.\n\nFormerly: scil_remove_outliers_ransac.py", "help": "usage: scil_volume_remove_outliers_ransac.py [-h] [--min_fit MIN_FIT]\n [--max_iter MAX_ITER]\n [--fit_thr FIT_THR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image\n\nRemove outliers from image using the RANSAC algorithm.\nThe RANSAC algorithm parameters are sensitive to the input data.\n\nNOTE: Current default parameters are tuned for ad/md/rd images only.\n\nFormerly: scil_remove_outliers_ransac.py\n\npositional arguments:\n in_image Nifti image.\n out_image Corrected Nifti image.\n\noptions:\n -h, --help show this help message and exit\n --min_fit MIN_FIT The minimum number of data values required to fit the model. [50]\n --max_iter MAX_ITER The maximum number of iterations allowed in the algorithm. [1000]\n --fit_thr FIT_THR Threshold value for determining when a data point fits a model. [0.01]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "total", + "number" + ], + [ + "order", + "required" + ], + [ + "clear", + "considered", + "long", + "result", + "work", + "only" + ], + [ + "work", + "and" + ], + [ + "considered", + "are" + ], + [ + "difference", + "point" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "image", + "images" + ], + [ + "long", + "a" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "threshold", + "threshold" + ], + [ + "held", + "in" + ], + [ + "parameter", + "parameters", + "parameters" + ], + [ + "analysis", + "data", + "database", + "tracking", + "data" + ], + [ + "total", + "50" + ], + [ + "algorithm", + "algorithm" + ], + [ + "methods", + "using" + ], + [ + "future", + "current" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "considered", + "is" + ], + [ + "order", + "allowed" + ] + ], "keywords": [] }, { "name": "scil_volume_resample", "docstring": "Script to resample a dataset to match the resolution of another\nreference dataset or to the resolution specified as in argument.\n\nFormerly: scil_resample_volume.py", "help": "usage: scil_volume_resample.py [-h]\n (--ref REF | --volume_size VOLUME_SIZE [VOLUME_SIZE ...] | --voxel_size VOXEL_SIZE [VOXEL_SIZE ...] | --iso_min)\n [--interp {nn,lin,quad,cubic}]\n [--enforce_dimensions]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image\n\nScript to resample a dataset to match the resolution of another\nreference dataset or to the resolution specified as in argument.\n\nFormerly: scil_resample_volume.py\n\npositional arguments:\n in_image Path of the input volume.\n out_image Path of the resampled volume.\n\noptions:\n -h, --help show this help message and exit\n --ref REF Reference volume to resample to.\n --volume_size VOLUME_SIZE [VOLUME_SIZE ...]\n Sets the size for the volume. If the value is set to is Y, it will resample to a shape of Y x Y x Y.\n --voxel_size VOXEL_SIZE [VOXEL_SIZE ...]\n Sets the voxel size. If the value is set to is Y, it will set a voxel size of Y x Y x Y.\n --iso_min Resample the volume to R x R x R with R being the smallest current voxel dimension.\n --interp {nn,lin,quad,cubic}\n Interpolation mode.\n nn: nearest neighbour\n lin: linear\n quad: quadratic\n cubic: cubic\n Defaults to linear\n --enforce_dimensions Enforce the reference volume dimension.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "volume", + "volumes", + "volume" + ], + [ + "work", + "and" + ], + [ + "considered", + "involved", + "being" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "order", + "set" + ], + [ + "question", + "argument" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "future", + "will" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "long", + "with" + ], + [ + "parameters", + "specified" + ], + [ + "methods", + "using" + ], + [ + "future", + "current" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "larger", + "size", + "size" + ], + [ + "left", + "another" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "shape", + "structure", + "shape" + ] + ], "keywords": [] }, { "name": "scil_volume_reshape_to_reference", "docstring": "Reshape / reslice / resample *.nii or *.nii.gz using a reference.\nThis script can be used to align freesurfer/civet output, as .mgz,\nto the original input image.\n\n>>> scil_volume_reshape_to_reference.py wmparc.mgz t1.nii.gz wmparc_t1.nii.gz\\\n --interpolation nearest\n\nFormerly: scil_reshape_to_reference.py", "help": "usage: scil_volume_reshape_to_reference.py [-h]\n [--interpolation {linear,nearest}]\n [--keep_dtype]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file in_ref_file out_file\n\nReshape / reslice / resample *.nii or *.nii.gz using a reference.\nThis script can be used to align freesurfer/civet output, as .mgz,\nto the original input image.\n\n>>> scil_volume_reshape_to_reference.py wmparc.mgz t1.nii.gz wmparc_t1.nii.gz\\\n --interpolation nearest\n\nFormerly: scil_reshape_to_reference.py\n\npositional arguments:\n in_file Path of the image (.nii or .mgz) to be reshaped.\n in_ref_file Path of the reference image (.nii).\n out_file Output filename of the reshaped image (.nii).\n\noptions:\n -h, --help show this help message and exit\n --interpolation {linear,nearest}\n Interpolation: \"linear\" or \"nearest\". [linear]\n --keep_dtype If True, keeps the data_type of the input image (in_file) when saving the output image (out_file).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "long", + "a" + ], + [ + "work", + "and" + ], + [ + "methods", + "using" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "meaning", + "true", + "true" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "considered", + "role", + "work", + "as" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "image", + "image" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "left", + "result", + "when" + ], + [ + "considered", + "is" + ] + ], "keywords": [] }, { "name": "scil_volume_stats_in_ROI", "docstring": "Compute the statistics (mean, std) of scalar maps, which can represent\ndiffusion metrics, in a ROI. Prints the results.\n\nThe mask can either be a binary mask, or a weighting mask. If the mask is\na weighting mask it should either contain floats between 0 and 1 or should be\nnormalized with --normalize_weights. IMPORTANT: if the mask contains weights\n(and not 0 and 1 exclusively), the standard deviation will also be weighted.", "help": "usage: scil_volume_stats_in_ROI.py [-h]\n (--metrics_dir dir | --metrics file [file ...])\n [--bin] [--normalize_weights]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_mask\n\nCompute the statistics (mean, std) of scalar maps, which can represent\ndiffusion metrics, in a ROI. Prints the results.\n\nThe mask can either be a binary mask, or a weighting mask. If the mask is\na weighting mask it should either contain floats between 0 and 1 or should be\nnormalized with --normalize_weights. IMPORTANT: if the mask contains weights\n(and not 0 and 1 exclusively), the standard deviation will also be weighted.\n\npositional arguments:\n in_mask Mask volume filename.\n Can be a binary mask or a weighted mask.\n\noptions:\n -h, --help show this help message and exit\n --bin If set, will consider every value of the mask higherthan 0 to be \n part of the mask (equivalent weighting for every voxel).\n --normalize_weights If set, the weights will be normalized to the [0,1] range.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics input options:\n --metrics_dir dir Name of the directory containing metrics files: we will \n load all nifti files.\n --metrics file [file ...]\n Metrics nifti filename. List of the names of the metrics file, \n in nifti format.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [], + "synonyms": [ + [ + "volume", + "volumes", + "volume" + ], + [ + "based", + "form", + "result", + "which" + ], + [ + "work", + "and" + ], + [ + "work", + "also" + ], + [ + "create", + "experience", + "matter", + "thinking", + "true", + "sort" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "left", + "result", + "when" + ], + [ + "meaning", + "name" + ], + [ + "order", + "set" + ], + [ + "clear", + "thinking", + "work", + "we" + ], + [ + "long", + "a" + ], + [ + "voxel", + "voxel" + ], + [ + "held", + "on" + ], + [ + "represent", + "represent" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "diffusion", + "diffusion" + ], + [ + "future", + "will" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "held", + "in" + ], + [ + "clear", + "matter", + "order", + "question", + "step", + "work", + "should" + ], + [ + "long", + "with" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "using" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "result", + "results" + ], + [ + "weighted", + "weighted" + ], + [ + "difference", + "meaning", + "result", + "mean" + ], + [ + "work", + "all" + ], + [ + "area", + "main", + "work", + "part" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "clear", + "considered", + "long", + "matter", + "order", + "question", + "result", + "work", + "not" + ], + [ + "maps", + "maps" + ], + [ + "clear", + "considered", + "form", + "result", + "either" + ], + [ + "considered", + "possibility", + "question", + "step", + "consider" + ], + [ + "considered", + "key", + "work", + "important" + ] + ], "keywords": [] }, { "name": "scil_volume_stats_in_labels", "docstring": "Computes the information from the input map for each cortical region\n(corresponding to an atlas).\n\nHint: For instance, this script could be useful if you have a seed map from a\nspecific bundle, to know from which regions it originated.\n\nFormerly: scil_compute_seed_by_labels.py", "help": "usage: scil_volume_stats_in_labels.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_labels in_labels_lut in_map\n\nComputes the information from the input map for each cortical region\n(corresponding to an atlas).\n\nHint: For instance, this script could be useful if you have a seed map from a\nspecific bundle, to know from which regions it originated.\n\nFormerly: scil_compute_seed_by_labels.py\n\npositional arguments:\n in_labels Path of the input label file.\n in_labels_lut Path of the LUT file corresponding to labels,used to name the regions of interest.\n in_map Path of the input map file. Expecting a 3D file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [], + "synonyms": [ + [ + "based", + "form", + "result", + "which" + ], + [ + "work", + "and" + ], + [ + "high", + "higher", + "highest", + "level", + "level" + ], + [ + "matter", + "question", + "thinking", + "true", + "know" + ], + [ + "left", + "from" + ], + [ + "left", + "result", + "when" + ], + [ + "areas", + "region", + "regions", + "regions" + ], + [ + "meaning", + "name" + ], + [ + "long", + "have" + ], + [ + "thinking", + "you" + ], + [ + "specific", + "specific" + ], + [ + "long", + "a" + ], + [ + "tool", + "useful" + ], + [ + "held", + "on" + ], + [ + "clear", + "long", + "main", + "result", + "the" + ], + [ + "higher", + "interest" + ], + [ + "clear", + "considered", + "form", + "long", + "result", + "true", + "work", + "this" + ], + [ + "future", + "work", + "working", + "for" + ], + [ + "data", + "knowledge", + "information" + ], + [ + "clear", + "considered", + "result", + "work", + "be" + ], + [ + "create", + "work", + "working", + "help" + ], + [ + "atlas", + "atlas" + ], + [ + "cortical", + "cortical" + ], + [ + "methods", + "using" + ], + [ + "contrast", + "specific", + "subject", + "instance" + ], + [ + "create", + "order", + "work", + "to" + ], + [ + "considered", + "is" + ], + [ + "clear", + "considered", + "result", + "work", + "it" + ], + [ + "individual", + "each" + ], + [ + "clear", + "matter", + "question", + "result", + "if" + ], + [ + "area", + "areas", + "region", + "regions", + "region" + ], + [ + "maps", + "map" + ], + [ + "bundles", + "bundle" + ], + [ + "clear", + "result", + "work", + "could" + ] + ], "keywords": [] } ] diff --git a/scilpy-bot-scripts/json_files/knowledge_base_word2vec.json b/scilpy-bot-scripts/json_files/knowledge_base_word2vec.json new file mode 100644 index 000000000..b5d1f96bd --- /dev/null +++ b/scilpy-bot-scripts/json_files/knowledge_base_word2vec.json @@ -0,0 +1,9715 @@ +{ + "scripts": [ + { + "name": "scil_NODDI_maps", + "docstring": "Compute NODDI [1] maps using AMICO.\nMulti-shell DWI necessary.\n\nFormerly: scil_compute_NODDI.py", + "help": "usage: scil_NODDI_maps.py [-h] [--mask MASK] [--out_dir OUT_DIR]\n [--tolerance tol] [--skip_b0_check]\n [--para_diff PARA_DIFF] [--iso_diff ISO_DIFF]\n [--lambda1 LAMBDA1] [--lambda2 LAMBDA2]\n [--save_kernels DIRECTORY | --load_kernels DIRECTORY]\n [--compute_only] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec\n\nCompute NODDI [1] maps using AMICO.\nMulti-shell DWI necessary.\n\nFormerly: scil_compute_NODDI.py\n\npositional arguments:\n in_dwi DWI file acquired with a NODDI compatible protocol (single-shell data not suited).\n in_bval b-values filename, in FSL format (.bval).\n in_bvec b-vectors filename, in FSL format (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Brain mask filename.\n --out_dir OUT_DIR Output directory for the NODDI results. [results]\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nModel options:\n --para_diff PARA_DIFF\n Axial diffusivity (AD) in the CC. [0.0017]\n --iso_diff ISO_DIFF Mean diffusivity (MD) in ventricles. [0.003]\n --lambda1 LAMBDA1 First regularization parameter. [0.5]\n --lambda2 LAMBDA2 Second regularization parameter. [0.001]\n\nKernels options:\n --save_kernels DIRECTORY\n Output directory for the COMMIT kernels.\n --load_kernels DIRECTORY\n Input directory where the COMMIT kernels are located.\n --compute_only Compute kernels only, --save_kernels must be used.\n\nReference:\n [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC.\n NODDI: practical in vivo neurite orientation dispersion\n and density imaging of the human brain.\n NeuroImage. 2012 Jul 16;61:1000-16.\n", + "synonyms": [ + [ + "human", + "human" + ], + [ + "axial", + "axial" + ], + [ + "orientation", + "orientation" + ], + [ + "imaging", + "imaging" + ], + [ + "high", + "high" + ], + [ + "maps", + "maps" + ], + [ + "parameter", + "parameter" + ], + [ + "Data", + "data", + "data" + ], + [ + "processes", + "processes" + ], + [ + "vivo", + "vivo" + ], + [ + "level", + "level" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_NODDI_priors", + "docstring": "Compute the axial (para_diff), radial (perp_diff), and mean (iso_diff)\ndiffusivity priors for NODDI.\n\nFormerly: scil_compute_NODDI_priors.py", + "help": "usage: scil_NODDI_priors.py [-h] [--fa_min_single_fiber FA_MIN_SINGLE_FIBER]\n [--fa_max_ventricles FA_MAX_VENTRICLES]\n [--md_min_ventricles MD_MIN_VENTRICLES]\n [--roi_radius ROI_RADIUS]\n [--roi_center pos pos pos]\n [--out_txt_1fiber_para FILE]\n [--out_txt_1fiber_perp FILE]\n [--out_mask_1fiber FILE]\n [--out_txt_ventricles FILE]\n [--out_mask_ventricles FILE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_FA in_AD in_RD in_MD\n\nCompute the axial (para_diff), radial (perp_diff), and mean (iso_diff)\ndiffusivity priors for NODDI.\n\nFormerly: scil_compute_NODDI_priors.py\n\npositional arguments:\n in_FA Path to the FA volume.\n in_AD Path to the axial diffusivity (AD) volume.\n in_RD Path to the radial diffusivity (RD) volume.\n in_MD Path to the mean diffusivity (MD) volume.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics options:\n --fa_min_single_fiber FA_MIN_SINGLE_FIBER\n Minimal threshold of FA (voxels above that threshold are considered in \n the single fiber mask). [0.7]\n --fa_max_ventricles FA_MAX_VENTRICLES\n Maximal threshold of FA (voxels under that threshold are considered in \n the ventricles). [0.1]\n --md_min_ventricles MD_MIN_VENTRICLES\n Minimal threshold of MD in mm2/s (voxels above that threshold are considered \n for in the ventricles). [0.003]\n\nRegions options:\n --roi_radius ROI_RADIUS\n Radius of the region used to estimate the priors. The roi will be a cube spanning \n from ROI_CENTER in each direction. [20]\n --roi_center pos pos pos\n Center of the roi of size roi_radius used to estimate the priors; a 3-value coordinate. \n If not set, uses the center of the 3D volume.\n\nOutputs:\n --out_txt_1fiber_para FILE\n Output path for the text file containing the single fiber average value of AD.\n If not set, the file will not be saved.\n --out_txt_1fiber_perp FILE\n Output path for the text file containing the single fiber average value of RD.\n If not set, the file will not be saved.\n --out_mask_1fiber FILE\n Output path for single fiber mask. If not set, the mask will not be saved.\n --out_txt_ventricles FILE\n Output path for the text file containing the ventricles average value of MD.\n If not set, the file will not be saved.\n --out_mask_ventricles FILE\n Output path for the ventricule mask.\n If not set, the mask will not be saved.\n\nReference:\n [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC.\n NODDI: practical in vivo neurite orientation dispersion and density\n imaging of the human brain. NeuroImage. 2012 Jul 16;61:1000-16.\n", + "synonyms": [ + [ + "region", + "regions", + "region" + ], + [ + "human", + "human" + ], + [ + "axial", + "axial" + ], + [ + "direction", + "direction" + ], + [ + "orientation", + "orientation" + ], + [ + "region", + "regions", + "regions" + ], + [ + "imaging", + "imaging" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "voxel", + "voxels" + ], + [ + "vivo", + "vivo" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "average", + "average" + ], + [ + "considered", + "considered" + ], + [ + "level", + "level" + ], + [ + "brain", + "brain" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_aodf_metrics", + "docstring": "Script to compute various metrics derivated from asymmetric ODF.\n\nThese metrics include the asymmetric peak directions image, a number of fiber\ndirections (nufid) map [1], the asymmetry index (ASI) map [2] and an odd-power\nmap [3].\n\nThe asymmetric peak directions image contains peaks per hemisphere, considering\nantipodal sphere directions as distinct. On a symmetric signal, the number of\nasymmetric peaks extracted is then twice the number of symmetric peaks.\n\nThe nufid map is the asymmetric alternative to NuFO maps. It counts the\nnumber of asymmetric peaks extracted and ranges in [0..N] with N the maximum\nnumber of peaks.\n\nThe asymmetric index is a cosine-based metric in the range [0..1], with 0\ncorresponding to a perfectly symmetric signal and 1 to a perfectly asymmetric\nsignal.\n\nThe odd-power map is also in the range [0..1], with 0 corresponding to a\nperfectly symmetric signal and 1 to a perfectly anti-symmetric signal. It is\ngiven as the ratio of the L2-norm of odd SH coefficients on the L2-norm of all\nSH coefficients.\n\nFormerly: scil_compute_asym_odf_metrics.py", + "help": "usage: scil_aodf_metrics.py [-h] [--mask MASK] [--asi_map ASI_MAP]\n [--odd_power_map ODD_POWER_MAP] [--peaks PEAKS]\n [--peak_values PEAK_VALUES]\n [--peak_indices PEAK_INDICES] [--nufid NUFID]\n [--not_all] [--at A_THRESHOLD] [--rt R_THRESHOLD]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--processes NBR]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh\n\nScript to compute various metrics derivated from asymmetric ODF.\n\nThese metrics include the asymmetric peak directions image, a number of fiber\ndirections (nufid) map [1], the asymmetry index (ASI) map [2] and an odd-power\nmap [3].\n\nThe asymmetric peak directions image contains peaks per hemisphere, considering\nantipodal sphere directions as distinct. On a symmetric signal, the number of\nasymmetric peaks extracted is then twice the number of symmetric peaks.\n\nThe nufid map is the asymmetric alternative to NuFO maps. It counts the\nnumber of asymmetric peaks extracted and ranges in [0..N] with N the maximum\nnumber of peaks.\n\nThe asymmetric index is a cosine-based metric in the range [0..1], with 0\ncorresponding to a perfectly symmetric signal and 1 to a perfectly asymmetric\nsignal.\n\nThe odd-power map is also in the range [0..1], with 0 corresponding to a\nperfectly symmetric signal and 1 to a perfectly anti-symmetric signal. It is\ngiven as the ratio of the L2-norm of odd SH coefficients on the L2-norm of all\nSH coefficients.\n\nFormerly: scil_compute_asym_odf_metrics.py\n\npositional arguments:\n in_sh Input SH image.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Optional mask.\n --asi_map ASI_MAP Output asymmetry index (ASI) map.\n --odd_power_map ODD_POWER_MAP\n Output odd power map.\n --peaks PEAKS Output filename for the extracted peaks.\n --peak_values PEAK_VALUES\n Output filename for the extracted peaks values.\n --peak_indices PEAK_INDICES\n Output filename for the generated peaks indices on the sphere.\n --nufid NUFID Output filename for the nufid file.\n --not_all If set, only saves the files specified using the file flags [False].\n --at A_THRESHOLD Absolute threshold on fODF amplitude. This value should be set to\n approximately 1.5 to 2 times the maximum fODF amplitude in isotropic voxels\n (ie. ventricles).\n Use scil_fodf_max_in_ventricles.py to find the maximal value.\n See [Dell'Acqua et al HBM 2013] [0.0].\n --rt R_THRESHOLD Relative threshold on fODF amplitude in percentage [0.1].\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Sphere to use for peak directions estimation [symmetric724].\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] C. Poirier and M. Descoteaux, \"Filtering Methods for Asymmetric ODFs:\nWhere and How Asymmetry Occurs in the White Matter.\" bioRxiv. 2022 Jan 1;\n2022.12.18.520881. doi: https://doi.org/10.1101/2022.12.18.520881\n\n[2] S. Cetin Karayumak, E. \u00d6zarslan, and G. Unal,\n\"Asymmetric Orientation Distribution Functions (AODFs) revealing intravoxel\ngeometry in diffusion MRI,\" Magnetic Resonance Imaging, vol. 49, pp. 145-158,\nJun. 2018, doi: https://doi.org/10.1016/j.mri.2018.03.006.\n\n[3] C. Poirier, E. St-Onge, and M. Descoteaux, \"Investigating the Occurence of\nAsymmetric Patterns in White Matter Fiber Orientation Distribution Functions\"\n[Abstract], In: Proc. Intl. Soc. Mag. Reson. Med. 29 (2021), 2021 May 15-20,\nVancouver, BC, Abstract number 0865.\n", + "synonyms": [ + [ + "variety", + "various" + ], + [ + "maps", + "map" + ], + [ + "white", + "white" + ], + [ + "orientation", + "orientation" + ], + [ + "imaging", + "imaging" + ], + [ + "image", + "image" + ], + [ + "diffusion", + "diffusion" + ], + [ + "maps", + "maps" + ], + [ + "signal", + "signal" + ], + [ + "processes", + "processes" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "voxel", + "voxels" + ], + [ + "based", + "based" + ], + [ + "occurrence", + "occurence" + ], + [ + "matter", + "matter" + ], + [ + "hemisphere", + "hemisphere" + ], + [ + "methods", + "methods" + ], + [ + "level", + "level" + ], + [ + "false", + "false" + ], + [ + "function", + "functions", + "functions" + ] + ], + "keywords": [] + }, + { + "name": "scil_bids_validate", + "docstring": "Create a json file from a BIDS dataset detailling all info\nneeded for tractoflow\n- DWI/rev_DWI\n- T1\n- fmap/sbref (based on IntendedFor entity)\n- Freesurfer (optional - could be one per participant\n or one per participant/session)\n\nThe BIDS dataset MUST be homogeneous.\nThe metadata need to be uniform across all participants/sessions/runs\n\nMandatory entity: IntendedFor\nSensitive entities: PhaseEncodingDirection, TotalReadoutTime, direction\n\nFormerly: scil_validate_bids.py", + "help": "usage: scil_bids_validate.py [-h] [--bids_ignore BIDS_IGNORE] [--fs FS]\n [--clean] [--readout READOUT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bids out_json\n\nCreate a json file from a BIDS dataset detailling all info\nneeded for tractoflow\n- DWI/rev_DWI\n- T1\n- fmap/sbref (based on IntendedFor entity)\n- Freesurfer (optional - could be one per participant\n or one per participant/session)\n\nThe BIDS dataset MUST be homogeneous.\nThe metadata need to be uniform across all participants/sessions/runs\n\nMandatory entity: IntendedFor\nSensitive entities: PhaseEncodingDirection, TotalReadoutTime, direction\n\nFormerly: scil_validate_bids.py\n\npositional arguments:\n in_bids Input BIDS folder.\n out_json Output json file.\n\noptions:\n -h, --help show this help message and exit\n --bids_ignore BIDS_IGNORE\n If you want to ignore some subjects or some files, you\n can provide an extra bidsignore file.Check:\n https://github.com/bids-standard/bids-\n validator#bidsignore\n --fs FS Output freesurfer path. It will add keys wmparc and\n aparc+aseg.\n --clean If set, it will remove all the participants that are\n missing any information.\n --readout READOUT Default total readout time value [0.062].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "participants", + "participants" + ], + [ + "direction", + "direction" + ], + [ + "create", + "create" + ], + [ + "participant", + "participant" + ], + [ + "subjects", + "subjects" + ], + [ + "based", + "based" + ], + [ + "level", + "level" + ], + [ + "total", + "total" + ] + ], + "keywords": [] + }, + { + "name": "scil_bingham_metrics", + "docstring": "Script to compute fODF lobe-specific metrics derived from a Bingham\ndistribution fit, as described in [1]. Resulting metrics are fiber density\n(FD), fiber spread (FS) and fiber fraction (FF) [2].\n\nThe Bingham coefficients volume comes from scil_fodf_to_bingham.py.\n\nA lobe's FD is the integral of the Bingham function on the sphere. It\nrepresents the density of fibers going through a given voxel for a given\nfODF lobe (fixel). A lobe's FS is the ratio of its FD on its maximum AFD. It\nis at its minimum for a sharp lobe and at its maximum for a wide lobe. A lobe's\nFF is the ratio of its FD on the total FD in the voxel.\n\nUsing 12 threads, the execution takes 10 minutes for FD estimation for a brain\nwith 1mm isotropic resolution. Other metrics take less than a second.\n\nFormerly: scil_compute_lobe_specific_fodf_metrics.py", + "help": "usage: scil_bingham_metrics.py [-h] [--out_fd OUT_FD] [--out_fs OUT_FS]\n [--out_ff OUT_FF] [--not_all] [--mask MASK]\n [--nbr_integration_steps NBR_INTEGRATION_STEPS]\n [-v [{DEBUG,INFO,WARNING}]] [--processes NBR]\n [-f]\n in_bingham\n\nScript to compute fODF lobe-specific metrics derived from a Bingham\ndistribution fit, as described in [1]. Resulting metrics are fiber density\n(FD), fiber spread (FS) and fiber fraction (FF) [2].\n\nThe Bingham coefficients volume comes from scil_fodf_to_bingham.py.\n\nA lobe's FD is the integral of the Bingham function on the sphere. It\nrepresents the density of fibers going through a given voxel for a given\nfODF lobe (fixel). A lobe's FS is the ratio of its FD on its maximum AFD. It\nis at its minimum for a sharp lobe and at its maximum for a wide lobe. A lobe's\nFF is the ratio of its FD on the total FD in the voxel.\n\nUsing 12 threads, the execution takes 10 minutes for FD estimation for a brain\nwith 1mm isotropic resolution. Other metrics take less than a second.\n\nFormerly: scil_compute_lobe_specific_fodf_metrics.py\n\npositional arguments:\n in_bingham Input Bingham nifti image.\n\noptions:\n -h, --help show this help message and exit\n --out_fd OUT_FD Path to output fiber density. [fd.nii.gz]\n --out_fs OUT_FS Path to output fiber spread. [fs.nii.gz]\n --out_ff OUT_FF Path to fiber fraction file. [ff.nii.gz]\n --not_all Do not compute all metrics. Then, please provide the output paths of the files you need.\n --mask MASK Optional mask image. Only voxels inside the mask are computed.\n --nbr_integration_steps NBR_INTEGRATION_STEPS\n Number of integration steps along the theta axis for fiber density estimation. [50]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -f Force overwriting of the output files.\n\n[1] T. W. Riffert, J. Schreiber, A. Anwander, and T. R. Kn\u00f6sche, \u201cBeyond\n fractional anisotropy: Extraction of bundle-specific structural metrics\n from crossing fiber models,\u201d NeuroImage, vol. 100, pp. 176-191, Oct. 2014,\n doi: 10.1016/j.neuroimage.2014.06.015.\n\n[2] J. Schreiber, T. Riffert, A. Anwander, and T. R. Kn\u00f6sche, \u201cPlausibility\n Tracking: A method to evaluate anatomical connectivity and microstructural\n properties along fiber pathways,\u201d NeuroImage, vol. 90, pp. 163-178, Apr.\n 2014, doi: 10.1016/j.neuroimage.2014.01.002.\n", + "synonyms": [ + [ + "lobe", + "lobe" + ], + [ + "methods", + "method" + ], + [ + "examine", + "evaluate" + ], + [ + "connectivity", + "connectivity" + ], + [ + "bundles", + "bundle" + ], + [ + "image", + "image" + ], + [ + "pathway", + "pathways", + "pathways" + ], + [ + "processes", + "processes" + ], + [ + "function", + "functions", + "function" + ], + [ + "voxel", + "voxels" + ], + [ + "integrating", + "integration" + ], + [ + "structural", + "structural" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "anatomical", + "anatomy", + "anatomical" + ], + [ + "voxel", + "voxel" + ], + [ + "tracking", + "tracking" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ], + [ + "total", + "total" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_btensor_metrics", + "docstring": "Script to compute microstructure metrics using the DIVIDE method. In order to\noperate, the script needs at leats two different types of b-tensor encodings.\nNote that custom encodings are not yet supported, so that only the linear\ntensor encoding (LTE, b_delta = 1), the planar tensor encoding\n(PTE, b_delta = -0.5), the spherical tensor encoding (STE, b_delta = 0) and\nthe cigar shape tensor encoding (b_delta = 0.5) are available. Moreover, all\nof `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names. Thus, this\nscript outputs the results from the DIVIDE fit or direct derivatives:\nmean diffusivity (MD), isotropic mean kurtosis (mk_i), anisotropic mean\nkurtosis (mk_a), total mean kurtosis (mk_t) and finally micro-FA (uFA).\nSpecific names can be specified using the\nfile flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output. The order parameter can also be computed from the uFA and a\nprecomputed FA, using separate input parameters.\n\n>>> scil_btensor_metrics.py --in_dwis LTE.nii.gz PTE.nii.gz STE.nii.gz\n --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs LTE.bvec PTE.bvec STE.bvec\n --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nIMPORTANT: If the script does not converge to a solution, it is probably due to\nnoise outside the brain. Thus, it is strongly recommanded to provide a brain\nmask with --mask.\n\nBased on Markus Nilsson, Filip Szczepankiewicz, Bj\u00f6rn Lampinen, Andr\u00e9 Ahlgren,\nJo\u00e3o P. de Almeida Martins, Samo Lasic, Carl-Fredrik Westin,\nand Daniel Topgaard. An open-source framework for analysis of multidimensional\ndiffusion MRI data implemented in MATLAB.\nProc. Intl. Soc. Mag. Reson. Med. (26), Paris, France, 2018.\n\nFormerly: scil_compute_divide.py", + "help": "usage: scil_btensor_metrics.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals\n IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS\n [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5}\n [{0,1,-0.5,0.5} ...] [--mask MASK]\n [--tolerance tol] [--skip_b0_check]\n [--fit_iters FIT_ITERS]\n [--random_iters RANDOM_ITERS]\n [--do_weight_bvals] [--do_weight_pa]\n [--do_multiple_s0] [--op OP] [--fa FA]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n [-f] [--not_all] [--md file] [--ufa file]\n [--mk_i file] [--mk_a file] [--mk_t file]\n\nScript to compute microstructure metrics using the DIVIDE method. In order to\noperate, the script needs at leats two different types of b-tensor encodings.\nNote that custom encodings are not yet supported, so that only the linear\ntensor encoding (LTE, b_delta = 1), the planar tensor encoding\n(PTE, b_delta = -0.5), the spherical tensor encoding (STE, b_delta = 0) and\nthe cigar shape tensor encoding (b_delta = 0.5) are available. Moreover, all\nof `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names. Thus, this\nscript outputs the results from the DIVIDE fit or direct derivatives:\nmean diffusivity (MD), isotropic mean kurtosis (mk_i), anisotropic mean\nkurtosis (mk_a), total mean kurtosis (mk_t) and finally micro-FA (uFA).\nSpecific names can be specified using the\nfile flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output. The order parameter can also be computed from the uFA and a\nprecomputed FA, using separate input parameters.\n\n>>> scil_btensor_metrics.py --in_dwis LTE.nii.gz PTE.nii.gz STE.nii.gz\n --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs LTE.bvec PTE.bvec STE.bvec\n --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nIMPORTANT: If the script does not converge to a solution, it is probably due to\nnoise outside the brain. Thus, it is strongly recommanded to provide a brain\nmask with --mask.\n\nBased on Markus Nilsson, Filip Szczepankiewicz, Bj\u00f6rn Lampinen, Andr\u00e9 Ahlgren,\nJo\u00e3o P. de Almeida Martins, Samo Lasic, Carl-Fredrik Westin,\nand Daniel Topgaard. An open-source framework for analysis of multidimensional\ndiffusion MRI data implemented in MATLAB.\nProc. Intl. Soc. Mag. Reson. Med. (26), Paris, France, 2018.\n\nFormerly: scil_compute_divide.py\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n Path to the input diffusion volume for each b-tensor encoding type.\n --in_bvals IN_BVALS [IN_BVALS ...]\n Path to the bval file, in FSL format, for each b-tensor encoding type.\n --in_bvecs IN_BVECS [IN_BVECS ...]\n Path to the bvec file, in FSL format, for each b-tensor encoding type.\n --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...]\n Value of b_delta for each b-tensor encoding type, in the same order as dwi, bval and bvec inputs.\n --mask MASK Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --fit_iters FIT_ITERS\n The number of time the gamma fit will be done [1]\n --random_iters RANDOM_ITERS\n The number of iterations for the initial parameters search. [50]\n --do_weight_bvals If set, does not do a weighting on the bvalues in the gamma fit.\n --do_weight_pa If set, does not do a powder averaging weighting in the gamma fit.\n --do_multiple_s0 If set, does not take into account multiple baseline signals.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --not_all If set, only saves the files specified using the file flags. (Default: False)\n\nOrder parameter (OP):\n --op OP Output filename for the order parameter. The OP will not be output if this is not given. Computation of the OP also requires a precomputed FA map (given using --fa).\n --fa FA Path to a FA map. Needed for calculating the OP.\n\nFile flags:\n --md file Output filename for the MD.\n --ufa file Output filename for the microscopic FA.\n --mk_i file Output filename for the isotropic mean kurtosis.\n --mk_a file Output filename for the anisotropic mean kurtosis.\n --mk_t file Output filename for the total mean kurtosis.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "methods", + "method" + ], + [ + "signal", + "signals" + ], + [ + "maps", + "map" + ], + [ + "level", + "level" + ], + [ + "supported", + "supported" + ], + [ + "high", + "high" + ], + [ + "diffusion", + "diffusion" + ], + [ + "parameter", + "parameter" + ], + [ + "Data", + "data", + "data" + ], + [ + "processes", + "processes" + ], + [ + "based", + "based" + ], + [ + "shape", + "shape" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "specific", + "specific" + ], + [ + "total", + "total" + ], + [ + "parameters", + "parameters" + ], + [ + "analysis", + "analysis" + ], + [ + "false", + "false" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_clean_qbx_clusters", + "docstring": "Render clusters sequentially to either accept or reject them based on\nvisual inspection. Useful for cleaning bundles for RBx, BST or for figures.\nThe VTK window does not handle well opacity of streamlines, this is a\nnormal rendering behavior.\nOften use in pair with scil_tractogram_qbx.py.\n\nKey mapping:\n- a/A: accept displayed clusters\n- r/R: reject displayed clusters\n- z/Z: Rewing one element\n- c/C: Stop rendering of the background concatenation of streamlines\n- q/Q: Early window exist, everything remaining will be rejected", + "help": "usage: scil_bundle_clean_qbx_clusters.py [-h]\n [--out_accepted_dir OUT_ACCEPTED_DIR]\n [--out_rejected_dir OUT_REJECTED_DIR]\n [--min_cluster_size MIN_CLUSTER_SIZE]\n [--background_opacity BACKGROUND_OPACITY]\n [--background_linewidth BACKGROUND_LINEWIDTH]\n [--clusters_linewidth CLUSTERS_LINEWIDTH]\n [--reference REFERENCE]\n [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...]\n out_accepted out_rejected\n\n Render clusters sequentially to either accept or reject them based on\n visual inspection. Useful for cleaning bundles for RBx, BST or for figures.\n The VTK window does not handle well opacity of streamlines, this is a\n normal rendering behavior.\n Often use in pair with scil_tractogram_qbx.py.\n\n Key mapping:\n - a/A: accept displayed clusters\n - r/R: reject displayed clusters\n - z/Z: Rewing one element\n - c/C: Stop rendering of the background concatenation of streamlines\n - q/Q: Early window exist, everything remaining will be rejected\n\npositional arguments:\n in_bundles List of the clusters filename.\n out_accepted Filename of the concatenated accepted clusters.\n out_rejected Filename of the concatenated rejected clusters.\n\noptions:\n -h, --help show this help message and exit\n --out_accepted_dir OUT_ACCEPTED_DIR\n Directory to save all accepted clusters separately.\n --out_rejected_dir OUT_REJECTED_DIR\n Directory to save all rejected clusters separately.\n --min_cluster_size MIN_CLUSTER_SIZE\n Minimum cluster size for consideration [1].Must be at least 1.\n --background_opacity BACKGROUND_OPACITY\n Opacity of the background streamlines.Keep low between 0 and 0.5 [0.1].\n --background_linewidth BACKGROUND_LINEWIDTH\n Linewidth of the background streamlines [1].\n --clusters_linewidth CLUSTERS_LINEWIDTH\n Linewidth of the current cluster [1].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "visual", + "visual" + ], + [ + "streamlines", + "streamlines" + ], + [ + "key", + "key" + ], + [ + "invalid", + "invalid" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "high", + "low" + ], + [ + "rendered", + "rendering", + "rendering" + ], + [ + "based", + "based" + ], + [ + "bundles", + "bundles" + ], + [ + "exist", + "exist" + ], + [ + "level", + "level" + ], + [ + "rendered", + "rendering", + "render" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_compute_centroid", + "docstring": "Compute a single bundle centroid, using an 'infinite' QuickBundles threshold.\n\nFormerly: scil_compute_centroid.py", + "help": "usage: scil_bundle_compute_centroid.py [-h] [--nb_points NB_POINTS]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_centroid\n\nCompute a single bundle centroid, using an 'infinite' QuickBundles threshold.\n\nFormerly: scil_compute_centroid.py\n\npositional arguments:\n in_bundle Fiber bundle file.\n out_centroid Output centroid streamline filename.\n\noptions:\n -h, --help show this help message and exit\n --nb_points NB_POINTS\n Number of points defining the centroid streamline[20].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "level", + "level" + ], + [ + "bundles", + "bundle" + ], + [ + "streamline", + "streamline" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_compute_endpoints_map", + "docstring": "Computes the endpoint map of a bundle. The endpoint map is simply a count of\nthe number of streamlines that start or end in each voxel.\n\nThe idea is to estimate the cortical area affected by the bundle (assuming\nstreamlines start/end in the cortex).\n\nNote: If the streamlines are not ordered the head/tail are random and not\nreally two coherent groups. Use the following script to order streamlines:\nscil_tractogram_uniformize_endpoints.py\n\nFormerly: scil_compute_endpoints_map.py", + "help": "usage: scil_bundle_compute_endpoints_map.py [-h] [--swap] [--binary]\n [--nb_points NB_POINTS]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle endpoints_map_head\n endpoints_map_tail\n\nComputes the endpoint map of a bundle. The endpoint map is simply a count of\nthe number of streamlines that start or end in each voxel.\n\nThe idea is to estimate the cortical area affected by the bundle (assuming\nstreamlines start/end in the cortex).\n\nNote: If the streamlines are not ordered the head/tail are random and not\nreally two coherent groups. Use the following script to order streamlines:\nscil_tractogram_uniformize_endpoints.py\n\nFormerly: scil_compute_endpoints_map.py\n\npositional arguments:\n in_bundle Fiber bundle filename.\n endpoints_map_head Output endpoints map head filename.\n endpoints_map_tail Output endpoints map tail filename.\n\noptions:\n -h, --help show this help message and exit\n --swap Swap head<->tail convention. Can be useful when the reference is not in RAS.\n --binary Save outputs as a binary mask instead of a heat map.\n --nb_points NB_POINTS\n Number of points to consider at the extremities of the streamlines. [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "streamlines", + "streamlines" + ], + [ + "maps", + "map" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "cortex", + "cortical", + "cortex" + ], + [ + "random", + "random" + ], + [ + "area", + "area" + ], + [ + "cortex", + "cortical", + "parietal", + "cortical" + ], + [ + "binary", + "binary" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_diameter", + "docstring": "Script to estimate the diameter of bundle(s) along their length.\nSee also scil_bundle_shape_measures.py, which prints a quick estimate of\nthe diameter (volume / length). The computation here is more complex and done\nfor each section of the bundle.\n\nThe script expects:\n- bundles with coherent endpoints from scil_tractogram_uniformize_endpoints.py\n- labels maps with around 5-50 points scil_bundle_label_map.py\n <5 is not enough, high risk of bad fit\n >50 is too much, high risk of bad fit\n- bundles that are close to a tube\n without major fanning in a single axis\n fanning is in 2 directions (uniform dispersion) good approximation\n\nThe scripts prints a JSON file with mean/std to be compatible with tractometry.\nWARNING: STD is in fact an ERROR measure from the fit and NOT an STD.\n\nSince the estimation and fit quality is not always intuitive for some bundles\nand the tube with varying diameter is not easy to color/visualize,\nthe script comes with its own VTK rendering to allow exploration of the data.\n(optional).\n\nFormerly: scil_estimate_bundles_diameter.py", + "help": "usage: scil_bundle_diameter.py [-h]\n [--fitting_func {lin_up,lin_down,exp,inv,log}]\n [--show_rendering | --save_rendering OUT_FOLDER]\n [--wireframe] [--error_coloring]\n [--width WIDTH] [--opacity OPACITY]\n [--win_dims WIDTH HEIGHT] [--background R G B]\n [--reference REFERENCE] [--indent INDENT]\n [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...] in_labels\n [in_labels ...]\n\nScript to estimate the diameter of bundle(s) along their length.\nSee also scil_bundle_shape_measures.py, which prints a quick estimate of\nthe diameter (volume / length). The computation here is more complex and done\nfor each section of the bundle.\n\nThe script expects:\n- bundles with coherent endpoints from scil_tractogram_uniformize_endpoints.py\n- labels maps with around 5-50 points scil_bundle_label_map.py\n <5 is not enough, high risk of bad fit\n >50 is too much, high risk of bad fit\n- bundles that are close to a tube\n without major fanning in a single axis\n fanning is in 2 directions (uniform dispersion) good approximation\n\nThe scripts prints a JSON file with mean/std to be compatible with tractometry.\nWARNING: STD is in fact an ERROR measure from the fit and NOT an STD.\n\nSince the estimation and fit quality is not always intuitive for some bundles\nand the tube with varying diameter is not easy to color/visualize,\nthe script comes with its own VTK rendering to allow exploration of the data.\n(optional).\n\nFormerly: scil_estimate_bundles_diameter.py\n\npositional arguments:\n in_bundles List of tractography files.\n in_labels List of labels maps that match the bundles.\n\noptions:\n -h, --help show this help message and exit\n --fitting_func {lin_up,lin_down,exp,inv,log}\n Function to weigh points using their distance.\n [Default: None]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nVisualization options:\n --show_rendering Display VTK window (optional).\n --save_rendering OUT_FOLDER\n Save VTK render in the specified folder (optional)\n --wireframe Use wireframe for the tube rendering.\n --error_coloring Use the fitting error to color the tube.\n --width WIDTH Width of tubes or lines representing streamlines\n [Default: 0.2]\n --opacity OPACITY Opacity for the streamlines rendered with the tube.\n [Default: 0.2]\n --win_dims WIDTH HEIGHT\n The dimensions for the vtk window. [(1920, 1080)]\n --background R G B RBG values [0, 255] of the color of the background.\n [Default: [1, 1, 1]]\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "high", + "high" + ], + [ + "rendered", + "rendering", + "rendering" + ], + [ + "maps", + "maps" + ], + [ + "rendered", + "rendering", + "rendered" + ], + [ + "Data", + "data", + "data" + ], + [ + "function", + "functions", + "function" + ], + [ + "tractography", + "tractography" + ], + [ + "bundles", + "bundles" + ], + [ + "complex", + "complex" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "level", + "level" + ], + [ + "error", + "error" + ], + [ + "exploration", + "exploration" + ], + [ + "rendered", + "rendering", + "render" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_filter_by_occurence", + "docstring": "Use multiple versions of a same bundle and detect the most probable voxels by\nusing a threshold on the occurence, voxel-wise. With threshold 0.5, this is\na majority vote. This is useful to generate an average representation from\nbundles of a given population.\n\nIf streamlines originate from the same tractogram (ex, to compare various\nbundle clustering techniques), streamline-wise vote is available to find the\nstreamlines most often included in the bundle.\n\nFormerly: scil_perform_majority_vote.py", + "help": "usage: scil_bundle_filter_by_occurence.py [-h] [--ratio_voxels [RATIO_VOXELS]]\n [--ratio_streamlines [RATIO_STREAMLINES]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...]\n output_prefix\n\nUse multiple versions of a same bundle and detect the most probable voxels by\nusing a threshold on the occurence, voxel-wise. With threshold 0.5, this is\na majority vote. This is useful to generate an average representation from\nbundles of a given population.\n\nIf streamlines originate from the same tractogram (ex, to compare various\nbundle clustering techniques), streamline-wise vote is available to find the\nstreamlines most often included in the bundle.\n\nFormerly: scil_perform_majority_vote.py\n\npositional arguments:\n in_bundles Input bundles filename(s). All tractograms must have identical headers.\n output_prefix Output prefix. Ex: my_path/voting_. The suffixes will be: streamlines.trk and voxels.nii.gz\n\noptions:\n -h, --help show this help message and exit\n --ratio_voxels [RATIO_VOXELS]\n Threshold on the ratio of bundles with at least one streamine in a \n given voxel to consider it as part of the 'gold standard'. Default if set: 0.5.\n --ratio_streamlines [RATIO_STREAMLINES]\n If all bundles come from the same tractogram, use this to generate \n a voting for streamlines too. The associated value is the threshold on the ratio of \n bundles including the streamline to consider it as part of the 'gold standard'. [0.5]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "variety", + "various" + ], + [ + "population", + "population" + ], + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "voxel", + "voxels" + ], + [ + "occurrence", + "occurence" + ], + [ + "bundles", + "bundles" + ], + [ + "methods", + "techniques" + ], + [ + "average", + "average" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_generate_priors", + "docstring": "Generation of priors and enhanced-FOD from an example/template bundle.\nThe bundle must have been cleaned thorougly before use. The E-FOD can then\nbe used for bundle-specific tractography, but not for FOD metrics.\n\nFormerly: scil_generate_priors_from_bundle.py", + "help": "usage: scil_bundle_generate_priors.py [-h]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--todi_sigma {0,1,2,3,4}]\n [--sf_threshold SF_THRESHOLD]\n [--out_prefix OUT_PREFIX]\n [--out_dir OUT_DIR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle in_fodf in_mask\n\nGeneration of priors and enhanced-FOD from an example/template bundle.\nThe bundle must have been cleaned thorougly before use. The E-FOD can then\nbe used for bundle-specific tractography, but not for FOD metrics.\n\nFormerly: scil_generate_priors_from_bundle.py\n\npositional arguments:\n in_bundle Input bundle filename.\n in_fodf Input FOD filename.\n in_mask Mask to constrain the TODI spatial smoothing,\n for example a WM mask.\n\noptions:\n -h, --help show this help message and exit\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --todi_sigma {0,1,2,3,4}\n Smooth the orientation histogram.\n --sf_threshold SF_THRESHOLD\n Relative threshold for sf masking (0.0-1.0).\n --out_prefix OUT_PREFIX\n Add a prefix to all output filename, \n default is no prefix.\n --out_dir OUT_DIR Output directory for all generated files,\n default is current directory.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Rheault, Francois, et al. \"Bundle-specific tractography with\n incorporated anatomical and orientational priors.\"\n NeuroImage 186 (2019): 382-398\n \n", + "synonyms": [ + [ + "spatial", + "spatial" + ], + [ + "orientation", + "orientation" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "tractography", + "tractography" + ], + [ + "anatomical", + "anatomy", + "anatomical" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_label_map", + "docstring": "Compute the label image (Nifti) from a centroid and tractograms (all\nrepresenting the same bundle). The label image represents the coverage of\nthe bundle, segmented into regions labelled from 0 to --nb_pts, starting from\nthe head, ending in the tail.\n\nEach voxel will have the label of its nearest centroid point.\n\nThe number of labels will be the same as the centroid's number of points.\n\nFormerly: scil_compute_bundle_voxel_label_map.py", + "help": "usage: scil_bundle_label_map.py [-h] [--nb_pts NB_PTS] [--colormap COLORMAP]\n [--new_labelling] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...] in_centroid\n out_dir\n\nCompute the label image (Nifti) from a centroid and tractograms (all\nrepresenting the same bundle). The label image represents the coverage of\nthe bundle, segmented into regions labelled from 0 to --nb_pts, starting from\nthe head, ending in the tail.\n\nEach voxel will have the label of its nearest centroid point.\n\nThe number of labels will be the same as the centroid's number of points.\n\nFormerly: scil_compute_bundle_voxel_label_map.py\n\npositional arguments:\n in_bundles Fiber bundle file.\n in_centroid Centroid streamline corresponding to bundle.\n out_dir Directory to save all mapping and coloring files:\n - correlation_map.nii.gz\n - session_x/labels_map.nii.gz\n - session_x/distance_map.nii.gz\n - session_x/correlation_map.nii.gz\n - session_x/labels.trk\n - session_x/distance.trk\n - session_x/correlation.trk\n Where session_x is numbered with each bundle.\n\noptions:\n -h, --help show this help message and exit\n --nb_pts NB_PTS Number of divisions for the bundles.\n Default is the number of points of the centroid.\n --colormap COLORMAP Select the colormap for colored trk (data_per_point) [jet].\n --new_labelling Use the new labelling method (multi-centroids).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "methods", + "method" + ], + [ + "streamline", + "streamline" + ], + [ + "region", + "regions", + "regions" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "image", + "image" + ], + [ + "bundles", + "bundles" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_mean_fixel_afd", + "docstring": "Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps along a bundle.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof the bundle provided, averaged at every voxel.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_fixel_afd_from_bundles.py", + "help": "usage: scil_bundle_mean_fixel_afd.py [-h] [--length_weighting]\n [--reference REFERENCE]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle in_fodf afd_mean_map\n\nCompute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps along a bundle.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof the bundle provided, averaged at every voxel.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_fixel_afd_from_bundles.py\n\npositional arguments:\n in_bundle Path of the bundle file.\n in_fodf Path of the fODF volume in spherical harmonics (SH).\n afd_mean_map Path of the output mean AFD map.\n\noptions:\n -h, --help show this help message and exit\n --length_weighting If set, will weigh the AFD values according to segment lengths. [False]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReference:\n [1] Raffelt, D., Tournier, JD., Rose, S., Ridgway, GR., Henderson, R.,\n Crozier, S., Salvado, O., & Connelly, A. (2012).\n Apparent Fibre Density: a novel measure for the analysis of\n diffusion-weighted magnetic resonance images. NeuroImage, 59(4),\n 3976--3994.\n", + "synonyms": [ + [ + "streamline", + "streamline" + ], + [ + "maps", + "map" + ], + [ + "weighted", + "weighted" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "diffusion", + "diffusion" + ], + [ + "maps", + "maps" + ], + [ + "based", + "based" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "false", + "false" + ], + [ + "analysis", + "analysis" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_mean_fixel_afd_from_hdf5", + "docstring": "Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps for every connections within a hdf5 (.h5) file.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof each connection, averaged at every voxel.\n\nPlease use a hdf5 (.h5) file containing decomposed connections\n\nFormerly: scil_compute_fixel_afd_from_hdf5.py", + "help": "usage: scil_bundle_mean_fixel_afd_from_hdf5.py [-h] [--length_weighting]\n [--processes NBR]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_hdf5 in_fodf out_hdf5\n\nCompute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps for every connections within a hdf5 (.h5) file.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof each connection, averaged at every voxel.\n\nPlease use a hdf5 (.h5) file containing decomposed connections\n\nFormerly: scil_compute_fixel_afd_from_hdf5.py\n\npositional arguments:\n in_hdf5 HDF5 filename (.h5) containing decomposed connections.\n in_fodf Path of the fODF volume in spherical harmonics (SH).\n out_hdf5 Path of the output HDF5 filenames (.h5).\n\noptions:\n -h, --help show this help message and exit\n --length_weighting If set, will weigh the AFD values according to segment lengths. [False]\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReference:\n [1] Raffelt, D., Tournier, JD., Rose, S., Ridgway, GR., Henderson, R.,\n Crozier, S., Salvado, O., & Connelly, A. (2012).\n Apparent Fibre Density: a novel measure for the analysis of\n diffusion-weighted magnetic resonance images. NeuroImage,\n 59(4), 3976--3994.\n", + "synonyms": [ + [ + "streamline", + "streamline" + ], + [ + "weighted", + "weighted" + ], + [ + "diffusion", + "diffusion" + ], + [ + "maps", + "maps" + ], + [ + "processes", + "processes" + ], + [ + "connections", + "connections" + ], + [ + "based", + "based" + ], + [ + "connection", + "connection" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "false", + "false" + ], + [ + "analysis", + "analysis" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_mean_fixel_bingham_metric", + "docstring": "Given a bundle and Bingham coefficients, compute the average Bingham\nmetric at each voxel intersected by the bundle. Intersected voxels are\nfound by computing the intersection between the voxel grid and each streamline\nin the input tractogram.\n\nThis script behaves like scil_compute_mean_fixel_afd_from_bundles.py for fODFs,\nbut here for Bingham distributions. These add the unique possibility to capture\nfixel-based fiber spread (FS) and fiber fraction (FF). FD from the bingham\nshould be \"equivalent\" to the AFD_fixel we are used to.\n\nBingham coefficients volume must come from scil_fodf_to_bingham.py\nand Bingham metrics comes from scil_bingham_metrics.py.\n\nBingham metrics are extracted from Bingham distributions fitted to fODF. There\nare as many values per voxel as there are lobes extracted. The values chosen\nfor a given voxelis the one belonging to the lobe better aligned with the\ncurrent streamline segment.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_mean_fixel_obe_metric_from_bundles.py", + "help": "usage: scil_bundle_mean_fixel_bingham_metric.py [-h] [--length_weighting]\n [--max_theta MAX_THETA]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bundle in_bingham\n in_bingham_metric out_mean_map\n\nGiven a bundle and Bingham coefficients, compute the average Bingham\nmetric at each voxel intersected by the bundle. Intersected voxels are\nfound by computing the intersection between the voxel grid and each streamline\nin the input tractogram.\n\nThis script behaves like scil_compute_mean_fixel_afd_from_bundles.py for fODFs,\nbut here for Bingham distributions. These add the unique possibility to capture\nfixel-based fiber spread (FS) and fiber fraction (FF). FD from the bingham\nshould be \"equivalent\" to the AFD_fixel we are used to.\n\nBingham coefficients volume must come from scil_fodf_to_bingham.py\nand Bingham metrics comes from scil_bingham_metrics.py.\n\nBingham metrics are extracted from Bingham distributions fitted to fODF. There\nare as many values per voxel as there are lobes extracted. The values chosen\nfor a given voxelis the one belonging to the lobe better aligned with the\ncurrent streamline segment.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_mean_fixel_obe_metric_from_bundles.py\n\npositional arguments:\n in_bundle Path of the bundle file.\n in_bingham Path of the Bingham volume.\n in_bingham_metric Path of the Bingham metric (FD, FS, or FF) volume.\n out_mean_map Path of the output mean map.\n\noptions:\n -h, --help show this help message and exit\n --length_weighting If set, will weigh the FD values according to segment lengths.\n --max_theta MAX_THETA\n Maximum angle (in degrees) condition on lobe alignment. [60]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "lobe", + "lobe" + ], + [ + "streamline", + "streamline" + ], + [ + "maps", + "map" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "possibility", + "possibility" + ], + [ + "unique", + "unique" + ], + [ + "voxel", + "voxels" + ], + [ + "lobes", + "lobes" + ], + [ + "based", + "based" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "average", + "average" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "intersected", + "intersected" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_mean_std", + "docstring": "Compute mean and std for each metric.\n\n- Default: For the whole bundle. This is achieved by averaging the metric\n values of all voxels occupied by the bundle.\n- Option --per_point: For all streamlines points in the bundle for each metric\n combination, along the bundle, i.e. for each point.\n **To create label_map and distance_map, see\n scil_bundle_label_map.py\n\nDensity weighting modifies the contribution of voxel with lower/higher\nstreamline count to reduce influence of spurious streamlines.\n\nFormerly: scil_compute_bundle_mean_std_per_point.py or\nscil_compute_bundle_mean_std.py", + "help": "usage: scil_bundle_mean_std.py [-h] [--per_point in_labels | --include_dps]\n [--density_weighting]\n [--distance_weighting DISTANCE_NII]\n [--correlation_weighting CORRELATION_NII]\n [--out_json OUT_JSON] [--reference REFERENCE]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n in_bundle in_metrics [in_metrics ...]\n\nCompute mean and std for each metric.\n\n- Default: For the whole bundle. This is achieved by averaging the metric\n values of all voxels occupied by the bundle.\n- Option --per_point: For all streamlines points in the bundle for each metric\n combination, along the bundle, i.e. for each point.\n **To create label_map and distance_map, see\n scil_bundle_label_map.py\n\nDensity weighting modifies the contribution of voxel with lower/higher\nstreamline count to reduce influence of spurious streamlines.\n\nFormerly: scil_compute_bundle_mean_std_per_point.py or\nscil_compute_bundle_mean_std.py\n\npositional arguments:\n in_bundle Fiber bundle file to compute statistics on.\n in_metrics Nifti file to compute statistics on. Probably some tractometry measure(s) such as FA, MD, RD, ...\n\noptions:\n -h, --help show this help message and exit\n --per_point in_labels\n If set, computes the metrics per point instead of on the whole bundle.\n You must then give the label map (.nii.gz) of the corresponding fiber bundle.\n --include_dps Save values from data_per_streamline.\n Currently not offered with option --per_point.\n --density_weighting If set, weights statistics by the number of fibers passing through each voxel.\n --distance_weighting DISTANCE_NII\n If set, weights statistics by the inverse of the distance between a streamline and the centroid.\n --correlation_weighting CORRELATION_NII\n If set, weight statistics by the correlation strength between longitudinal data.\n --out_json OUT_JSON Path of the output file. If not given, the output is simply printed on screen.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "maps", + "map" + ], + [ + "higher", + "lower" + ], + [ + "create", + "create" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "Data", + "data", + "data" + ], + [ + "voxel", + "voxels" + ], + [ + "longitudinal", + "longitudinal" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "higher", + "higher" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_pairwise_comparison", + "docstring": "Evaluate pair-wise similarity measures of bundles.\nAll tractograms must be in the same space (aligned to one reference).\n\nFor the voxel representation, the computed similarity measures are:\n bundle_adjacency_voxels, dice_voxels, w_dice_voxels, density_correlation\n volume_overlap, volume_overreach\nThe same measures are also evluated for the endpoints.\n\nFor the streamline representation, the computed similarity measures are:\n bundle_adjacency_streamlines, dice_streamlines, streamlines_count_overlap,\n streamlines_count_overreach\n\nFormerly: scil_evaluate_bundles_pairwise_agreement_measures.py", + "help": "usage: scil_bundle_pairwise_comparison.py [-h] [--streamline_dice]\n [--bundle_adjency_no_overlap]\n [--disable_streamline_distance]\n [--single_compare SINGLE_COMPARE]\n [--keep_tmp] [--ratio]\n [--processes NBR]\n [--reference REFERENCE]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...] out_json\n\nEvaluate pair-wise similarity measures of bundles.\nAll tractograms must be in the same space (aligned to one reference).\n\nFor the voxel representation, the computed similarity measures are:\n bundle_adjacency_voxels, dice_voxels, w_dice_voxels, density_correlation\n volume_overlap, volume_overreach\nThe same measures are also evluated for the endpoints.\n\nFor the streamline representation, the computed similarity measures are:\n bundle_adjacency_streamlines, dice_streamlines, streamlines_count_overlap,\n streamlines_count_overreach\n\nFormerly: scil_evaluate_bundles_pairwise_agreement_measures.py\n\npositional arguments:\n in_bundles Path of the input bundles.\n out_json Path of the output json file.\n\noptions:\n -h, --help show this help message and exit\n --streamline_dice Compute streamline-wise dice coefficient.\n Tractograms must be identical [False].\n --bundle_adjency_no_overlap\n If set, do not count zeros in the average BA.\n --disable_streamline_distance\n Will not compute the streamlines distance \n [False].\n --single_compare SINGLE_COMPARE\n Compare inputs to this single file.\n --keep_tmp Will not delete the tmp folder at the end.\n --ratio Compute overlap and overreach as a ratio over the\n reference tractogram in a Tractometer-style way.\n Can only be used if also using the `single_compare` option.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "similarity", + "similarity" + ], + [ + "examine", + "evaluate" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "space", + "space" + ], + [ + "processes", + "processes" + ], + [ + "bundles", + "bundles" + ], + [ + "average", + "average" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "false", + "false" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_reject_outliers", + "docstring": "Clean a bundle (inliers/outliers) using hiearchical clustering.\nhttp://archive.ismrm.org/2015/2844.html\n\nIf spurious streamlines are dense, it is possible they will not be recognized\nas outliers. Manual cleaning may be required to overcome this limitation.", + "help": "usage: scil_bundle_reject_outliers.py [-h]\n [--remaining_bundle REMAINING_BUNDLE]\n [--alpha ALPHA] [--display_counts]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_bundle\n\nClean a bundle (inliers/outliers) using hiearchical clustering.\nhttp://archive.ismrm.org/2015/2844.html\n\nIf spurious streamlines are dense, it is possible they will not be recognized\nas outliers. Manual cleaning may be required to overcome this limitation.\n\npositional arguments:\n in_bundle Fiber bundle file to remove outliers from.\n out_bundle Fiber bundle without outliers.\n\noptions:\n -h, --help show this help message and exit\n --remaining_bundle REMAINING_BUNDLE\n Removed outliers.\n --alpha ALPHA Percent of the length of the tree that clusters of individual streamlines will be pruned. [0.6]\n --display_counts Print streamline count before and after filtering\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "limitation", + "limitations", + "limitation" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "individual", + "individual" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_score_many_bundles_one_tractogram", + "docstring": "This script is intended to score all bundles from a single tractogram. Each\nvalid bundle is compared to its ground truth.\nEx: It was used for the ISMRM 2015 Challenge scoring.\n\nSee also scil_bundle_score_same_bundle_many_segmentations.py to score many\nversions of a same bundle, compared to ONE ground truth / gold standard.\n\nThis script is the second part of script scil_score_tractogram, which also\nsegments the wholebrain tractogram into bundles first.\n\nHere we suppose that the bundles are already segmented and saved as follows:\n main_dir/\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (optional)\n segmented_WPC/*_wpc.trk (optional)\n IS.trk OR NC.trk (if segmented_IB is present)\n\nConfig file\n-----------\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n }\n}\n\nFormerly: scil_score_bundles.py", + "help": "usage: scil_bundle_score_many_bundles_one_tractogram.py [-h] [--json_prefix p]\n [--gt_dir DIR]\n [--indent INDENT]\n [--sort_keys]\n [--reference REFERENCE]\n [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n gt_config bundles_dir\n\nThis script is intended to score all bundles from a single tractogram. Each\nvalid bundle is compared to its ground truth.\nEx: It was used for the ISMRM 2015 Challenge scoring.\n\nSee also scil_bundle_score_same_bundle_many_segmentations.py to score many\nversions of a same bundle, compared to ONE ground truth / gold standard.\n\nThis script is the second part of script scil_score_tractogram, which also\nsegments the wholebrain tractogram into bundles first.\n\nHere we suppose that the bundles are already segmented and saved as follows:\n main_dir/\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (optional)\n segmented_WPC/*_wpc.trk (optional)\n IS.trk OR NC.trk (if segmented_IB is present)\n\nConfig file\n-----------\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n }\n}\n\nFormerly: scil_score_bundles.py\n\nTractometry\n-----------\nGlobal connectivity metrics:\n\n- Computed by default:\n - VS: valid streamlines, belonging to a bundle (i.e. respecting all the\n criteria for that bundle; endpoints, limit_mask, gt_mask.).\n - IS: invalid streamlines. All other streamlines. IS = IC + NC.\n\n- Optional:\n - WPC: wrong path connections, streamlines connecting correct ROIs but not\n respecting the other criteria for that bundle. Such streamlines always\n exist but they are only saved separately if specified in the options.\n Else, they are merged back with the IS.\n By definition. WPC are only computed if \"limits masks\" are provided.\n - IC: invalid connections, streamlines joining an incorrect combination of\n ROIs. Use carefully, quality depends on the quality of your ROIs and no\n analysis is done on the shape of the streamlines.\n - NC: no connections. Invalid streamlines minus invalid connections.\n\n- Fidelity metrics:\n - OL: Overlap. Percentage of ground truth voxels containing streamline(s)\n for a given bundle.\n - OR: Overreach. Amount of voxels containing streamline(s) when they\n shouldn't, for a given bundle. We compute two versions :\n OR_pct_vs = divided by the total number of voxel covered by the bundle.\n (percentage of the voxels touched by VS).\n Values range between 0 and 100%. Values are not defined when we\n recovered no streamline for a bundle, but we set the OR_pct_vs to 0\n in that case.\n OR_pct_gt = divided by the total size of the ground truth bundle mask.\n Values could be higher than 100%.\n - f1 score: which is the same as the Dice score.\n\npositional arguments:\n gt_config .json dict configured as specified above.\n bundles_dir Directory containing all bundles.\n (Ex: Output directory for scil_score_tractogram).\n It is expected to contain a file IS.trk and \n files segmented_VB/*_VS.trk, with, possibly, files \n segmented_WPC/*_wpc.trk and segmented_IC/\n\noptions:\n -h, --help show this help message and exit\n --json_prefix p Prefix of the output json file. Ex: 'study_x_'.\n Suffix will be results.json. File will be saved inside bundles_dir.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nAdditions to gt_config:\n --gt_dir DIR Root path of the ground truth files listed in the gt_config.\n If not set, filenames in the config file are considered \n as absolute paths.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "connect", + "connecting", + "connects", + "connecting" + ], + [ + "streamline", + "streamline" + ], + [ + "connectivity", + "connectivity" + ], + [ + "invalid", + "invalid" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "global", + "global" + ], + [ + "bundles", + "bundle" + ], + [ + "connections", + "connections" + ], + [ + "voxel", + "voxels" + ], + [ + "result", + "result" + ], + [ + "valid", + "valid" + ], + [ + "bundles", + "bundles" + ], + [ + "exist", + "exist" + ], + [ + "shape", + "shape" + ], + [ + "considered", + "considered" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "total", + "total" + ], + [ + "higher", + "higher" + ], + [ + "analysis", + "analysis" + ], + [ + "defined", + "defined" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_score_same_bundle_many_segmentations", + "docstring": "This script is intended to score many versions of a same bundle, compared to\nONE ground truth / gold standard.\n\nSee also scil_bundle_score_many_bundles_one_tractogram.py to score all bundles\nfrom a single tractogram by comparing each valid bundle to its ground truth.\n\nAll tractograms must be in the same space (aligned to one reference).\nThe measures can be applied to a voxel-wise or streamline-wise representation.\n\nA gold standard must be provided for the desired representation.\nA gold standard would be a segmentation from an expert or a group of experts.\nIf only the streamline-wise representation is provided without a voxel-wise\ngold standard, it will be computed from the provided streamlines.\nAt least one of the two representations is required.\n\nThe gold standard tractogram is the tractogram (whole brain most likely) from\nwhich the segmentation is performed.\nThe gold standard tracking mask is the tracking mask used by the tractography\nalgorighm to generate the gold standard tractogram.\n\nThe computed binary classification measures are:\nsensitivity, specificity, precision, accuracy, dice, kappa, youden for both\nthe streamline and voxel representation (if provided).\n\nFormerly: scil_evaluate_bundles_binary_classification_measures.py", + "help": "usage: scil_bundle_score_same_bundle_many_segmentations.py [-h]\n [--streamlines_measures GOLD_STANDARD_STREAMLINES TRACTOGRAM]\n [--voxels_measures GOLD_STANDARD_MASK TRACKING MASK]\n [--processes NBR]\n [--reference REFERENCE]\n [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bundles\n [in_bundles ...]\n out_json\n\nThis script is intended to score many versions of a same bundle, compared to\nONE ground truth / gold standard.\n\nSee also scil_bundle_score_many_bundles_one_tractogram.py to score all bundles\nfrom a single tractogram by comparing each valid bundle to its ground truth.\n\nAll tractograms must be in the same space (aligned to one reference).\nThe measures can be applied to a voxel-wise or streamline-wise representation.\n\nA gold standard must be provided for the desired representation.\nA gold standard would be a segmentation from an expert or a group of experts.\nIf only the streamline-wise representation is provided without a voxel-wise\ngold standard, it will be computed from the provided streamlines.\nAt least one of the two representations is required.\n\nThe gold standard tractogram is the tractogram (whole brain most likely) from\nwhich the segmentation is performed.\nThe gold standard tracking mask is the tracking mask used by the tractography\nalgorighm to generate the gold standard tractogram.\n\nThe computed binary classification measures are:\nsensitivity, specificity, precision, accuracy, dice, kappa, youden for both\nthe streamline and voxel representation (if provided).\n\nFormerly: scil_evaluate_bundles_binary_classification_measures.py\n\npositional arguments:\n in_bundles Path of the input bundles.\n out_json Path of the output json.\n\noptions:\n -h, --help show this help message and exit\n --streamlines_measures GOLD_STANDARD_STREAMLINES TRACTOGRAM\n The gold standard bundle and the original tractogram.\n --voxels_measures GOLD_STANDARD_MASK TRACKING MASK\n The gold standard mask and the original tracking mask.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "precision", + "precision" + ], + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "space", + "space" + ], + [ + "processes", + "processes" + ], + [ + "valid", + "valid" + ], + [ + "tractography", + "tractography" + ], + [ + "bundles", + "bundles" + ], + [ + "applied", + "applied" + ], + [ + "binary", + "binary" + ], + [ + "voxel", + "voxel" + ], + [ + "tracking", + "tracking" + ], + [ + "level", + "level" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_shape_measures", + "docstring": "Evaluate basic measurements of bundle(s).\n\nThe computed measures are:\n - volume_info: volume, volume_endpoints\n - streamlines_info: streamlines_count, avg_length (in mm or in number of\n point), average step size, min_length, max_length.\n ** You may also get this information with scil_tractogram_print_info.py.\n - shape_info: span, curl, diameter, elongation, surface area,\n irregularity, end surface area, radius, end surface irregularity,\n mean_curvature, fractal dimension.\n ** The diameter, here, is a simple estimation using volume / length.\n For a more complex calculation, see scil_bundle_diameter.py.\n\nWith more than one bundle, the measures are averaged over bundles. All\ntractograms must be in the same space.\n\nThe set average contains the average measures of all input bundles. The\nmeasures that are dependent on the streamline count are weighted by the number\nof streamlines of each bundle. Each of these average measure is computed by\nfirst summing the multiple of a measure and the streamline count of each\nbundle and divide the sum by the total number of streamlines. Thus, measures\nincluding length and span are essentially averages of all the streamlines.\nOther streamline-related set measure are computed with other set averages.\nWhereas bundle-related measures are computed as an average of all bundles.\nThese measures include volume and surface area.\n\nThe fractal dimension is dependent on the voxel size and the number of voxels.\nIf data comparison is performed, the bundles MUST be in same resolution.\n\nFormerly: scil_compute_bundle_volume.py or\nscil_evaluate_bundles_individual_measures.py", + "help": "usage: scil_bundle_shape_measures.py [-h] [--out_json OUT_JSON]\n [--group_statistics] [--no_uniformize]\n [--reference REFERENCE] [--processes NBR]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...]\n\nEvaluate basic measurements of bundle(s).\n\nThe computed measures are:\n - volume_info: volume, volume_endpoints\n - streamlines_info: streamlines_count, avg_length (in mm or in number of\n point), average step size, min_length, max_length.\n ** You may also get this information with scil_tractogram_print_info.py.\n - shape_info: span, curl, diameter, elongation, surface area,\n irregularity, end surface area, radius, end surface irregularity,\n mean_curvature, fractal dimension.\n ** The diameter, here, is a simple estimation using volume / length.\n For a more complex calculation, see scil_bundle_diameter.py.\n\nWith more than one bundle, the measures are averaged over bundles. All\ntractograms must be in the same space.\n\nThe set average contains the average measures of all input bundles. The\nmeasures that are dependent on the streamline count are weighted by the number\nof streamlines of each bundle. Each of these average measure is computed by\nfirst summing the multiple of a measure and the streamline count of each\nbundle and divide the sum by the total number of streamlines. Thus, measures\nincluding length and span are essentially averages of all the streamlines.\nOther streamline-related set measure are computed with other set averages.\nWhereas bundle-related measures are computed as an average of all bundles.\nThese measures include volume and surface area.\n\nThe fractal dimension is dependent on the voxel size and the number of voxels.\nIf data comparison is performed, the bundles MUST be in same resolution.\n\nFormerly: scil_compute_bundle_volume.py or\nscil_evaluate_bundles_individual_measures.py\n\npositional arguments:\n in_bundles Path of the input bundles.\n\noptions:\n -h, --help show this help message and exit\n --out_json OUT_JSON Path of the output file. If not given, the output is simply printed on screen.\n --group_statistics Show average measures [False].\n --no_uniformize Do NOT automatically uniformize endpoints for theendpoints related metrics.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\nReferences:\n[1] Fang-Cheng Yeh. 2020.\n Shape analysis of the human association pathways. NeuroImage.\n", + "synonyms": [ + [ + "human", + "human" + ], + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "weighted", + "weighted" + ], + [ + "examine", + "evaluate" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "association", + "association" + ], + [ + "space", + "space" + ], + [ + "pathway", + "pathways", + "pathways" + ], + [ + "Data", + "data", + "data" + ], + [ + "processes", + "processes" + ], + [ + "step", + "step" + ], + [ + "area", + "area" + ], + [ + "voxel", + "voxels" + ], + [ + "bundles", + "bundles" + ], + [ + "shape", + "shape" + ], + [ + "complex", + "complex" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "average", + "average" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "total", + "total" + ], + [ + "false", + "false" + ], + [ + "analysis", + "analysis" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_uniformize_endpoints", + "docstring": "Uniformize streamlines' endpoints according to a defined axis.\nUseful for tractometry or models creation.\n\nThe --auto option will automatically calculate the main orientation.\nIf the input bundle is poorly defined, it is possible heuristic will be wrong.\n\nThe default is to flip each streamline so their first point's coordinate in the\ndefined axis is smaller than their last point (--swap does the opposite).\n\nThe --target_roi option will use the barycenter of the target mask to define\nthe axis. The target mask can be a binary mask or an atlas. If an atlas is\nused, labels are expected in the form of --target_roi atlas.nii.gz 2 3 5:7.\n\nFormerly: scil_uniformize_streamlines_endpoints.py", + "help": "usage: scil_bundle_uniformize_endpoints.py [-h]\n (--axis {x,y,z} | --auto | --centroid tractogram | --target_roi TARGET_ROI [TARGET_ROI ...])\n [--swap] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_bundle\n\nUniformize streamlines' endpoints according to a defined axis.\nUseful for tractometry or models creation.\n\nThe --auto option will automatically calculate the main orientation.\nIf the input bundle is poorly defined, it is possible heuristic will be wrong.\n\nThe default is to flip each streamline so their first point's coordinate in the\ndefined axis is smaller than their last point (--swap does the opposite).\n\nThe --target_roi option will use the barycenter of the target mask to define\nthe axis. The target mask can be a binary mask or an atlas. If an atlas is\nused, labels are expected in the form of --target_roi atlas.nii.gz 2 3 5:7.\n\nFormerly: scil_uniformize_streamlines_endpoints.py\n\npositional arguments:\n in_bundle Input path of the tractography file.\n out_bundle Output path of the uniformized file.\n\noptions:\n -h, --help show this help message and exit\n --axis {x,y,z} Match endpoints of the streamlines along this axis.\n SUGGESTION: Commissural = x, Association = y, Projection = z\n --auto Match endpoints of the streamlines along an automatically determined axis.\n --centroid tractogram\n Match endpoints of the streamlines to align it to a reference unique streamline (centroid).\n --target_roi TARGET_ROI [TARGET_ROI ...]\n Provide a target ROI: either a binary mask or a label map and the labels to use.\n Will align heads to be closest to the mask barycenter.\n (atlas: if no labels are provided, all labels will be used.\n --swap Swap head <-> tail convention. Can be useful when the reference is not in RAS.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "maps", + "map" + ], + [ + "main", + "main" + ], + [ + "form", + "form" + ], + [ + "orientation", + "orientation" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "association", + "association" + ], + [ + "larger", + "smaller" + ], + [ + "unique", + "unique" + ], + [ + "projection", + "projection" + ], + [ + "atlas", + "atlas" + ], + [ + "tractography", + "tractography" + ], + [ + "binary", + "binary" + ], + [ + "level", + "level" + ], + [ + "defined", + "defined" + ] + ], + "keywords": [] + }, + { + "name": "scil_bundle_volume_per_label", + "docstring": "Compute bundle volume per label in mm3. This script supports anisotropic voxels\nresolution. Volume is estimated by counting the number of voxel occupied by\neach label and multiplying it by the volume of a single voxel.\n\nThe labels can be obtained by scil_bundle_label_map.py.\n\nThis estimation is typically performed at resolution around 1mm3.\n\nTo get the volume and other measures directly from the (whole) bundle, use\nscil_bundle_shape_measures.py.\n\nFormerly: scil_compute_bundle_volume_per_label.py", + "help": "usage: scil_bundle_volume_per_label.py [-h] [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n voxel_label_map bundle_name\n\nCompute bundle volume per label in mm3. This script supports anisotropic voxels\nresolution. Volume is estimated by counting the number of voxel occupied by\neach label and multiplying it by the volume of a single voxel.\n\nThe labels can be obtained by scil_bundle_label_map.py.\n\nThis estimation is typically performed at resolution around 1mm3.\n\nTo get the volume and other measures directly from the (whole) bundle, use\nscil_bundle_shape_measures.py.\n\nFormerly: scil_compute_bundle_volume_per_label.py\n\npositional arguments:\n voxel_label_map Fiber bundle file.\n bundle_name Bundle name.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "bundles", + "bundle" + ], + [ + "voxel", + "voxels" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_connectivity_compare_populations", + "docstring": "Performs a network-based statistical comparison for populations g1 and g2. The\noutput is a matrix of the same size as the input connectivity matrices, with\np-values at each edge.\nAll input matrices must have the same shape (NxN). For paired t-test, both\ngroups must have the same number of observations.\n\nFor example, if you have streamline count weighted matrices for a MCI and a\ncontrol group and you want to investiguate differences in their connectomes:\n >>> scil_connectivity_compare_populations.py pval.npy\n --g1 MCI/*_sc.npy --g2 CTL/*_sc.npy\n\n--filtering_mask will simply multiply the binary mask to all input\nmatrices before performing the statistical comparison. Reduces the number of\nstatistical tests, useful when using --fdr or --bonferroni.\n\nFormerly: scil_compare_connectivity.py", + "help": "usage: scil_connectivity_compare_populations.py [-h] --in_g1 IN_G1 [IN_G1 ...]\n --in_g2 IN_G2 [IN_G2 ...]\n [--tail {left,right,both}]\n [--paired]\n [--fdr | --bonferroni]\n [--p_threshold THRESH OUT_FILE]\n [--filtering_mask FILTERING_MASK]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n out_pval_matrix\n\nPerforms a network-based statistical comparison for populations g1 and g2. The\noutput is a matrix of the same size as the input connectivity matrices, with\np-values at each edge.\nAll input matrices must have the same shape (NxN). For paired t-test, both\ngroups must have the same number of observations.\n\nFor example, if you have streamline count weighted matrices for a MCI and a\ncontrol group and you want to investiguate differences in their connectomes:\n >>> scil_connectivity_compare_populations.py pval.npy\n --g1 MCI/*_sc.npy --g2 CTL/*_sc.npy\n\n--filtering_mask will simply multiply the binary mask to all input\nmatrices before performing the statistical comparison. Reduces the number of\nstatistical tests, useful when using --fdr or --bonferroni.\n\nFormerly: scil_compare_connectivity.py\n\npositional arguments:\n out_pval_matrix Output matrix (.npy) containing the edges p-value.\n\noptions:\n -h, --help show this help message and exit\n --in_g1 IN_G1 [IN_G1 ...]\n List of matrices for the first population (.npy).\n --in_g2 IN_G2 [IN_G2 ...]\n List of matrices for the second population (.npy).\n --tail {left,right,both}\n Enables specification of an alternative hypothesis:\n left: mean of g1 < mean of g2,\n right: mean of g2 < mean of g1,\n both: both means are not equal (default).\n --paired Use paired sample t-test instead of population t-test.\n --in_g1 and --in_g2 must be ordered the same way.\n --fdr Perform a false discovery rate (FDR) correction for the p-values.\n Uses the number of non-zero edges as number of tests (value between 0.01 and 0.1).\n --bonferroni Perform a Bonferroni correction for the p-values.\n Uses the number of non-zero edges as number of tests.\n --p_threshold THRESH OUT_FILE\n Threshold the final p-value matrix and save the binary matrix (.npy).\n --filtering_mask FILTERING_MASK\n Binary filtering mask (.npy) to apply before computing the measures.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Rubinov, Mikail, and Olaf Sporns. \"Complex network measures of brain\n connectivity: uses and interpretations.\" Neuroimage 52.3 (2010):\n 1059-1069.\n[2] Zalesky, Andrew, Alex Fornito, and Edward T. Bullmore. \"Network-based\n statistic: identifying differences in brain networks.\" Neuroimage 53.4\n (2010): 1197-1207.\n", + "synonyms": [ + [ + "population", + "population" + ], + [ + "network", + "networks", + "network" + ], + [ + "streamline", + "streamline" + ], + [ + "weighted", + "weighted" + ], + [ + "network", + "networks", + "networks" + ], + [ + "differences", + "differences" + ], + [ + "connectivity", + "connectivity" + ], + [ + "matrices", + "matrices" + ], + [ + "applied", + "apply" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "left", + "left" + ], + [ + "based", + "based" + ], + [ + "population", + "populations" + ], + [ + "shape", + "shape" + ], + [ + "complex", + "complex" + ], + [ + "discovery", + "discovery" + ], + [ + "binary", + "binary" + ], + [ + "level", + "level" + ], + [ + "false", + "false" + ], + [ + "brain", + "brain" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_connectivity_compute_matrices", + "docstring": "This script computes a variety of measures in the form of connectivity\nmatrices. This script is made to follow\nscil_tractogram_segment_bundles_for_connectivity.py and\nuses the same labels list as input.\n\nThe script expects a folder containing all relevants bundles following the\nnaming convention LABEL1_LABEL2.trk and a text file containing the list of\nlabels that should be part of the matrices. The ordering of labels in the\nmatrices will follow the same order as the list.\nThis script only generates matrices in the form of array, does not visualize\nor reorder the labels (node).\n\nThe parameter --similarity expects a folder with density maps\n(LABEL1_LABEL2.nii.gz) following the same naming convention as the input\ndirectory.\nThe bundles should be averaged version in the same space. This will\ncompute the weighted-dice between each node and their homologuous average\nversion.\n\nThe parameters --metrics can be used more than once and expect a map (t1, fa,\netc.) in the same space and each will generate a matrix. The average value in\nthe volume occupied by the bundle will be reported in the matrices nodes.\n\nThe parameters --maps can be used more than once and expect a folder with\npre-computed maps (LABEL1_LABEL2.nii.gz) following the same naming convention\nas the input directory. Each will generate a matrix. The average non-zeros\nvalue in the map will be reported in the matrices nodes.\n\nThe parameters --lesion_load will compute 3 lesion(s) related matrices:\nlesion_count.npy, lesion_vol.npy, lesion_sc.npy and put it inside of a\nspecified folder. They represent the number of lesion, the total volume of\nlesion(s) and the total of streamlines going through the lesion(s) for of each\nconnection. Each connection can be seen as a 'bundle' and then something\nsimilar to scil_analyse_lesion_load.py is run for each 'bundle'.\n\nFormerly: scil_compute_connectivity.py", + "help": "usage: scil_connectivity_compute_matrices.py [-h] [--volume OUT_FILE]\n [--streamline_count OUT_FILE]\n [--length OUT_FILE]\n [--similarity IN_FOLDER OUT_FILE]\n [--maps IN_FOLDER OUT_FILE]\n [--metrics IN_FILE OUT_FILE]\n [--lesion_load IN_FILE OUT_DIR]\n [--min_lesion_vol MIN_LESION_VOL]\n [--density_weighting]\n [--no_self_connection]\n [--include_dps OUT_DIR]\n [--force_labels_list FORCE_LABELS_LIST]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_hdf5 in_labels\n\nThis script computes a variety of measures in the form of connectivity\nmatrices. This script is made to follow\nscil_tractogram_segment_bundles_for_connectivity.py and\nuses the same labels list as input.\n\nThe script expects a folder containing all relevants bundles following the\nnaming convention LABEL1_LABEL2.trk and a text file containing the list of\nlabels that should be part of the matrices. The ordering of labels in the\nmatrices will follow the same order as the list.\nThis script only generates matrices in the form of array, does not visualize\nor reorder the labels (node).\n\nThe parameter --similarity expects a folder with density maps\n(LABEL1_LABEL2.nii.gz) following the same naming convention as the input\ndirectory.\nThe bundles should be averaged version in the same space. This will\ncompute the weighted-dice between each node and their homologuous average\nversion.\n\nThe parameters --metrics can be used more than once and expect a map (t1, fa,\netc.) in the same space and each will generate a matrix. The average value in\nthe volume occupied by the bundle will be reported in the matrices nodes.\n\nThe parameters --maps can be used more than once and expect a folder with\npre-computed maps (LABEL1_LABEL2.nii.gz) following the same naming convention\nas the input directory. Each will generate a matrix. The average non-zeros\nvalue in the map will be reported in the matrices nodes.\n\nThe parameters --lesion_load will compute 3 lesion(s) related matrices:\nlesion_count.npy, lesion_vol.npy, lesion_sc.npy and put it inside of a\nspecified folder. They represent the number of lesion, the total volume of\nlesion(s) and the total of streamlines going through the lesion(s) for of each\nconnection. Each connection can be seen as a 'bundle' and then something\nsimilar to scil_analyse_lesion_load.py is run for each 'bundle'.\n\nFormerly: scil_compute_connectivity.py\n\npositional arguments:\n in_hdf5 Input filename for the hdf5 container (.h5).\n Obtained from scil_tractogram_segment_bundles_for_connectivity.py.\n in_labels Labels file name (nifti).\n This generates a NxN connectivity matrix.\n\noptions:\n -h, --help show this help message and exit\n --volume OUT_FILE Output file for the volume weighted matrix (.npy).\n --streamline_count OUT_FILE\n Output file for the streamline count weighted matrix (.npy).\n --length OUT_FILE Output file for the length weighted matrix (.npy).\n --similarity IN_FOLDER OUT_FILE\n Input folder containing the averaged bundle density\n maps (.nii.gz) and output file for the similarity weighted matrix (.npy).\n --maps IN_FOLDER OUT_FILE\n Input folder containing pre-computed maps (.nii.gz)\n and output file for the weighted matrix (.npy).\n --metrics IN_FILE OUT_FILE\n Input (.nii.gz). and output file (.npy) for a metric weighted matrix.\n --lesion_load IN_FILE OUT_DIR\n Input binary mask (.nii.gz) and output directory for all lesion-related matrices.\n --min_lesion_vol MIN_LESION_VOL\n Minimum lesion volume in mm3 [7].\n --density_weighting Use density-weighting for the metric weightedmatrix.\n --no_self_connection Eliminate the diagonal from the matrices.\n --include_dps OUT_DIR\n Save matrices from data_per_streamline in the output directory.\n COMMIT-related values will be summed instead of averaged.\n Will always overwrite files.\n --force_labels_list FORCE_LABELS_LIST\n Path to a labels list (.txt) in case of missing labels in the atlas.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "reported", + "reported" + ], + [ + "order", + "order" + ], + [ + "streamlines", + "streamlines" + ], + [ + "represent", + "represent" + ], + [ + "streamline", + "streamline" + ], + [ + "maps", + "map" + ], + [ + "weighted", + "weighted" + ], + [ + "similarity", + "similarity" + ], + [ + "form", + "form" + ], + [ + "connectivity", + "connectivity" + ], + [ + "variety", + "variety" + ], + [ + "matrices", + "matrices" + ], + [ + "bundles", + "bundle" + ], + [ + "maps", + "maps" + ], + [ + "parameter", + "parameter" + ], + [ + "space", + "space" + ], + [ + "processes", + "processes" + ], + [ + "atlas", + "atlas" + ], + [ + "bundles", + "bundles" + ], + [ + "connection", + "connection" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "average", + "average" + ], + [ + "level", + "level" + ], + [ + "total", + "total" + ], + [ + "parameters", + "parameters" + ], + [ + "naming", + "naming" + ] + ], + "keywords": [] + }, + { + "name": "scil_connectivity_compute_pca", + "docstring": "Script to compute PCA analysis on diffusion metrics. Output returned is all\nsignificant principal components (e.g. presenting eigenvalues > 1) in a\nconnectivity matrix format. This script can take into account all edges from\nevery subject in a population or only non-zero edges across all subjects.\n\nThe script can take directly as input a connectoflow output folder. Simply use\nthe --input_connectoflow flag. For other type of folder input, the script\nexpects a single folder containing all matrices for all subjects.\nExample:\n [in_folder]\n |--- sub-01_ad.npy\n |--- sub-01_md.npy\n |--- sub-02_ad.npy\n |--- sub-02_md.npy\n |--- ...\n\nThe plots, tables and principal components matrices will be outputted in the\ndesignated folder from the argument. If you want to move back your\nprincipal components matrices in your connectoflow output, you can use a\nsimilar bash command for all principal components:\nfor sub in `cat list_id.txt`;\ndo\n cp out_folder/${sub}_PC1.npy connectoflow_output/$sub/Compute_Connectivity/\ndone\n\nInterpretation of resulting principal components can be done by evaluating the\nloadings values for each metrics. A value near 0 means that this metric doesn't\ncontribute to this specific component whereas high positive or negative values\nmean a larger contribution. Components can then be labeled based on which\nmetric contributes the highest. For example, a principal component showing a\nhigh loading for afd_fixel and near 0 loading for all other metrics can be\ninterpreted as axonal density (see Gagnon et al. 2022 for this specific example\nor ref [3] for an introduction to PCA).\n\nEXAMPLE USAGE:\nscil_connectivity_compute_pca.py input_folder/ output_folder/\n --metrics ad fa md rd [...] --list_ids list_ids.txt", + "help": "usage: scil_connectivity_compute_pca.py [-h] --metrics METRICS [METRICS ...]\n --list_ids FILE [--not_only_common]\n [--input_connectoflow]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_folder out_folder\n\nScript to compute PCA analysis on diffusion metrics. Output returned is all\nsignificant principal components (e.g. presenting eigenvalues > 1) in a\nconnectivity matrix format. This script can take into account all edges from\nevery subject in a population or only non-zero edges across all subjects.\n\nThe script can take directly as input a connectoflow output folder. Simply use\nthe --input_connectoflow flag. For other type of folder input, the script\nexpects a single folder containing all matrices for all subjects.\nExample:\n [in_folder]\n |--- sub-01_ad.npy\n |--- sub-01_md.npy\n |--- sub-02_ad.npy\n |--- sub-02_md.npy\n |--- ...\n\nThe plots, tables and principal components matrices will be outputted in the\ndesignated folder from the argument. If you want to move back your\nprincipal components matrices in your connectoflow output, you can use a\nsimilar bash command for all principal components:\nfor sub in `cat list_id.txt`;\ndo\n cp out_folder/${sub}_PC1.npy connectoflow_output/$sub/Compute_Connectivity/\ndone\n\nInterpretation of resulting principal components can be done by evaluating the\nloadings values for each metrics. A value near 0 means that this metric doesn't\ncontribute to this specific component whereas high positive or negative values\nmean a larger contribution. Components can then be labeled based on which\nmetric contributes the highest. For example, a principal component showing a\nhigh loading for afd_fixel and near 0 loading for all other metrics can be\ninterpreted as axonal density (see Gagnon et al. 2022 for this specific example\nor ref [3] for an introduction to PCA).\n\nEXAMPLE USAGE:\nscil_connectivity_compute_pca.py input_folder/ output_folder/\n --metrics ad fa md rd [...] --list_ids list_ids.txt\n\npositional arguments:\n in_folder Path to the input folder. See explanation above for its expected organization.\n out_folder Path to the output folder to export graphs, tables and principal \n components matrices.\n\noptions:\n -h, --help show this help message and exit\n --metrics METRICS [METRICS ...]\n Suffixes of all metrics to include in PCA analysis (ex: ad md fa rd). \n They must be immediately followed by the .npy extension.\n --list_ids FILE Path to a .txt file containing a list of all ids.\n --not_only_common If true, will include all edges from all subjects and not only \n common edges (Not recommended)\n --input_connectoflow If true, script will assume the input folder is a Connectoflow output.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Chamberland M, Raven EP, Genc S, Duffy K, Descoteaux M, Parker GD, Tax CMW,\n Jones DK. Dimensionality reduction of diffusion MRI measures for improved\n tractometry of the human brain. Neuroimage. 2019 Oct 15;200:89-100.\n doi: 10.1016/j.neuroimage.2019.06.020. Epub 2019 Jun 20. PMID: 31228638;\n PMCID: PMC6711466.\n[2] Gagnon A., Grenier G., Bocti C., Gillet V., Lepage J.-F., Baccarelli A. A.,\n Posner J., Descoteaux M., Takser L. (2022). White matter microstructural\n variability linked to differential attentional skills and impulsive behavior\n in a pediatric population. Cerebral Cortex.\n https://doi.org/10.1093/cercor/bhac180\n[3] https://towardsdatascience.com/what-are-pca-loadings-and-biplots-9a7897f2e559\n \n", + "synonyms": [ + [ + "human", + "human" + ], + [ + "population", + "population" + ], + [ + "principal", + "principal" + ], + [ + "subject", + "subject" + ], + [ + "connectivity", + "connectivity" + ], + [ + "white", + "white" + ], + [ + "larger", + "larger" + ], + [ + "positive", + "negative" + ], + [ + "matrices", + "matrices" + ], + [ + "high", + "high" + ], + [ + "diffusion", + "diffusion" + ], + [ + "cortex", + "cortical", + "cortex" + ], + [ + "variability", + "variability" + ], + [ + "positive", + "positive" + ], + [ + "true", + "true" + ], + [ + "subjects", + "subjects" + ], + [ + "based", + "based" + ], + [ + "matter", + "matter" + ], + [ + "specific", + "specific" + ], + [ + "level", + "level" + ], + [ + "axonal", + "axonal" + ], + [ + "analysis", + "analysis" + ], + [ + "highest", + "highest" + ], + [ + "brain", + "brain" + ], + [ + "presented", + "presenting" + ] + ], + "keywords": [] + }, + { + "name": "scil_connectivity_filter", + "docstring": "Script to facilitate filtering of connectivity matrices.\nThe same could be achieved through a complex sequence of\nscil_connectivity_math.py.\n\nCan be used with any connectivity matrix from\nscil_connectivity_compute_matrices.py.\n\nFor example, a simple filtering (Jasmeen style) would be:\nscil_connectivity_filter.py out_mask.npy\n --greater_than */sc.npy 1 0.90\n --lower_than */sim.npy 2 0.90\n --greater_than */len.npy 40 0.90 -v;\n\nThis will result in a binary mask where each node with a value of 1 represents\na node with at least 90% of the population having at least 1 streamline,\n90% of the population is similar to the average (2mm) and 90% of the\npopulation having at least 40mm of average streamlines length.\n\nAll operation are stricly > or <, there is no >= or <=.\n\n--greater_than or --lower_than expect the same convention:\n MATRICES_LIST VALUE_THR POPULATION_PERC\nIt is strongly recommended (but not enforced) that the same number of\nconnectivity matrices is used for each condition.\n\nThis script performs an intersection of all conditions, meaning that all\nconditions must be met in order not to be filtered.\nIf the user wants to manually handle the requirements, --keep_condition_count\ncan be used and manually binarized using scil_connectivity_math.py\n\nFormerly: scil_filter_connectivity.py", + "help": "usage: scil_connectivity_filter.py [-h] [--lower_than [LOWER_THAN ...]]\n [--greater_than [GREATER_THAN ...]]\n [--keep_condition_count] [--inverse_mask]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_matrix_mask\n\nScript to facilitate filtering of connectivity matrices.\nThe same could be achieved through a complex sequence of\nscil_connectivity_math.py.\n\nCan be used with any connectivity matrix from\nscil_connectivity_compute_matrices.py.\n\nFor example, a simple filtering (Jasmeen style) would be:\nscil_connectivity_filter.py out_mask.npy\n --greater_than */sc.npy 1 0.90\n --lower_than */sim.npy 2 0.90\n --greater_than */len.npy 40 0.90 -v;\n\nThis will result in a binary mask where each node with a value of 1 represents\na node with at least 90% of the population having at least 1 streamline,\n90% of the population is similar to the average (2mm) and 90% of the\npopulation having at least 40mm of average streamlines length.\n\nAll operation are stricly > or <, there is no >= or <=.\n\n--greater_than or --lower_than expect the same convention:\n MATRICES_LIST VALUE_THR POPULATION_PERC\nIt is strongly recommended (but not enforced) that the same number of\nconnectivity matrices is used for each condition.\n\nThis script performs an intersection of all conditions, meaning that all\nconditions must be met in order not to be filtered.\nIf the user wants to manually handle the requirements, --keep_condition_count\ncan be used and manually binarized using scil_connectivity_math.py\n\nFormerly: scil_filter_connectivity.py\n\npositional arguments:\n out_matrix_mask Output mask (matrix) resulting from the provided conditions (.npy).\n\noptions:\n -h, --help show this help message and exit\n --lower_than [LOWER_THAN ...]\n Lower than condition using the VALUE_THR in at least POPULATION_PERC (from MATRICES_LIST).\n See description for more details.\n --greater_than [GREATER_THAN ...]\n Greater than condition using the VALUE_THR in at least POPULATION_PERC (from MATRICES_LIST).\n See description for more details.\n --keep_condition_count\n Report the number of condition(s) that pass/fail rather than a binary mask.\n --inverse_mask Inverse the final mask. 0 where all conditions are respected and 1 where at least one fail.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "population", + "population" + ], + [ + "greater", + "greater" + ], + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "higher", + "lower" + ], + [ + "meaning", + "meaning" + ], + [ + "connectivity", + "connectivity" + ], + [ + "matrices", + "matrices" + ], + [ + "pass", + "pass" + ], + [ + "conditions", + "conditions" + ], + [ + "result", + "result" + ], + [ + "complex", + "complex" + ], + [ + "binary", + "binary" + ], + [ + "average", + "average" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_connectivity_graph_measures", + "docstring": "Evaluate graph theory measures from connectivity matrices.\nA length weighted and a streamline count weighted matrix are required since\nsome measures require one or the other.\n\nThis script evaluates the measures one subject at the time. To generate a\npopulation dictionary (similarly to other scil_connectivity_*.py scripts), use\nthe --append_json option as well as using the same output filename.\n>>> for i in hcp/*/; do scil_connectivity_graph_measures.py ${i}/sc_prob.npy\n ${i}/len_prob.npy hcp_prob.json --append_json --avg_node_wise; done\n\nSome measures output one value per node, the default behavior is to list\nthem all into a list. To obtain only the average use the\n--avg_node_wise option.\n\nThe computed connectivity measures are:\ncentrality, modularity, assortativity, participation, clustering,\nnodal_strength, local_efficiency, global_efficiency, density, rich_club,\npath_length, edge_count, omega, sigma\n\nFor more details about the measures, please refer to\n- https://sites.google.com/site/bctnet/measures\n- https://github.com/aestrivex/bctpy/wiki\n\nThis script is under the GNU GPLv3 license, for more detail please refer to\nhttps://www.gnu.org/licenses/gpl-3.0.en.html\n\nFormerly: scil_evaluate_connectivity_graph_measures.py", + "help": "usage: scil_connectivity_graph_measures.py [-h]\n [--filtering_mask FILTERING_MASK]\n [--avg_node_wise] [--append_json]\n [--small_world] [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_conn_matrix in_length_matrix\n out_json\n\nEvaluate graph theory measures from connectivity matrices.\nA length weighted and a streamline count weighted matrix are required since\nsome measures require one or the other.\n\nThis script evaluates the measures one subject at the time. To generate a\npopulation dictionary (similarly to other scil_connectivity_*.py scripts), use\nthe --append_json option as well as using the same output filename.\n>>> for i in hcp/*/; do scil_connectivity_graph_measures.py ${i}/sc_prob.npy\n ${i}/len_prob.npy hcp_prob.json --append_json --avg_node_wise; done\n\nSome measures output one value per node, the default behavior is to list\nthem all into a list. To obtain only the average use the\n--avg_node_wise option.\n\nThe computed connectivity measures are:\ncentrality, modularity, assortativity, participation, clustering,\nnodal_strength, local_efficiency, global_efficiency, density, rich_club,\npath_length, edge_count, omega, sigma\n\nFor more details about the measures, please refer to\n- https://sites.google.com/site/bctnet/measures\n- https://github.com/aestrivex/bctpy/wiki\n\nThis script is under the GNU GPLv3 license, for more detail please refer to\nhttps://www.gnu.org/licenses/gpl-3.0.en.html\n\nFormerly: scil_evaluate_connectivity_graph_measures.py\n\npositional arguments:\n in_conn_matrix Input connectivity matrix (.npy).\n Typically a streamline count weighted matrix.\n in_length_matrix Input length weighted matrix (.npy).\n out_json Path of the output json.\n\noptions:\n -h, --help show this help message and exit\n --filtering_mask FILTERING_MASK\n Binary filtering mask to apply before computing the measures.\n --avg_node_wise Return a single value for node-wise measures.\n --append_json If the file already exists, will append to the dictionary.\n --small_world Compute measure related to small worldness (omega and sigma).\n This option is much slower.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\n[1] Rubinov, Mikail, and Olaf Sporns. \"Complex network measures of brain\n connectivity: uses and interpretations.\" Neuroimage 52.3 (2010):\n 1059-1069.\n", + "synonyms": [ + [ + "population", + "population" + ], + [ + "streamline", + "streamline" + ], + [ + "network", + "networks", + "network" + ], + [ + "weighted", + "weighted" + ], + [ + "subject", + "subject" + ], + [ + "examine", + "evaluate" + ], + [ + "connectivity", + "connectivity" + ], + [ + "matrices", + "matrices" + ], + [ + "applied", + "apply" + ], + [ + "large", + "small" + ], + [ + "complex", + "complex" + ], + [ + "binary", + "binary" + ], + [ + "average", + "average" + ], + [ + "level", + "level" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_connectivity_hdf5_average_density_map", + "docstring": "Compute a density map for each connection from a hdf5 file.\nTypically use after scil_tractogram_segment_bundles_for_connectivity.py in\norder to obtain the average density map of each connection to allow the use\nof --similarity in scil_connectivity_compute_matrices.py.\n\nThis script is parallelized, but will run much slower on non-SSD if too many\nprocesses are used. The output is a directory containing the thousands of\nconnections:\nout_dir/\n |-- LABEL1_LABEL1.nii.gz\n |-- LABEL1_LABEL2.nii.gz\n |-- [...]\n |-- LABEL90_LABEL90.nii.gz\n\nFormerly: scil_compute_hdf5_average_density_map.py", + "help": "usage: scil_connectivity_hdf5_average_density_map.py [-h] [--binary]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_hdf5 [in_hdf5 ...]\n out_dir\n\nCompute a density map for each connection from a hdf5 file.\nTypically use after scil_tractogram_segment_bundles_for_connectivity.py in\norder to obtain the average density map of each connection to allow the use\nof --similarity in scil_connectivity_compute_matrices.py.\n\nThis script is parallelized, but will run much slower on non-SSD if too many\nprocesses are used. The output is a directory containing the thousands of\nconnections:\nout_dir/\n |-- LABEL1_LABEL1.nii.gz\n |-- LABEL1_LABEL2.nii.gz\n |-- [...]\n |-- LABEL90_LABEL90.nii.gz\n\nFormerly: scil_compute_hdf5_average_density_map.py\n\npositional arguments:\n in_hdf5 List of HDF5 filenames (.h5) from scil_tractogram_segment_bundles_for_connectivity.py.\n out_dir Path of the output directory.\n\noptions:\n -h, --help show this help message and exit\n --binary Binarize density maps before the population average.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "population", + "population" + ], + [ + "maps", + "map" + ], + [ + "similarity", + "similarity" + ], + [ + "maps", + "maps" + ], + [ + "processes", + "processes" + ], + [ + "connections", + "connections" + ], + [ + "connection", + "connection" + ], + [ + "binary", + "binary" + ], + [ + "average", + "average" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_connectivity_math", + "docstring": "Performs an operation on a list of matrices. The supported operations are\nlisted below.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of matrices.\n> scil_connectivity_math.py multiplication mat.npy 10 mult_10.npy", + "help": "usage: scil_connectivity_math.py [-h] [--data_type DATA_TYPE]\n [--exclude_background]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference}\n in_matrices [in_matrices ...] out_matrix\n\nPerforms an operation on a list of matrices. The supported operations are\nlisted below.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of matrices.\n> scil_connectivity_math.py multiplication mat.npy 10 mult_10.npy\n\n lower_threshold: MAT THRESHOLD\n All values below the threshold will be set to zero.\n All values above the threshold will be set to one.\n \n upper_threshold: MAT THRESHOLD\n All values below the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_eq: MAT THRESHOLD\n All values below the threshold will be set to zero.\n All values above or equal the threshold will be set to one.\n \n upper_threshold_eq: MAT THRESHOLD\n All values below or equal the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_otsu: MAT\n All values below or equal to the Otsu threshold will be set to zero.\n All values above the Otsu threshold will be set to one.\n (Otsu's method is an algorithm to perform automatic matrix thresholding\n of the background.)\n \n upper_threshold_otsu: MAT\n All values below the Otsu threshold will be set to one.\n All values above or equal to the Otsu threshold will be set to zero.\n Equivalent to lower_threshold_otsu followed by an inversion.\n \n lower_clip: MAT THRESHOLD\n All values below the threshold will be set to threshold.\n \n upper_clip: MAT THRESHOLD\n All values above the threshold will be set to threshold.\n \n absolute_value: MAT\n All negative values will become positive.\n \n round: MAT\n Round all decimal values to the closest integer.\n \n ceil: MAT\n Ceil all decimal values to the next integer.\n \n floor: MAT\n Floor all decimal values to the previous integer.\n \n normalize_sum: MAT\n Normalize the matrix so the sum of all values is one.\n \n normalize_max: MAT\n Normalize the matrix so the maximum value is one.\n \n log_10: MAT\n Apply a log (base 10) to all non zeros values of an matrix.\n \n log_e: MAT\n Apply a natural log to all non zeros values of an matrix.\n \n convert: MAT\n Perform no operation, but simply change the data type.\n \n invert: MAT\n Operation on binary matrix to interchange 0s and 1s in a binary mask.\n \n addition: MATs\n Add multiple matrices together.\n \n subtraction: MAT_1 MAT_2\n Subtract first matrix by the second (MAT_1 - MAT_2).\n \n multiplication: MATs\n Multiply multiple matrices together (danger of underflow and overflow)\n \n division: MAT_1 MAT_2\n Divide first matrix by the second (danger of underflow and overflow)\n Ignore zeros values, excluded from the operation.\n \n mean: MATs\n Compute the mean of matrices.\n If a single 4D matrix is provided, average along the last dimension.\n \n std: MATs\n Compute the standard deviation average of multiple matrices.\n If a single 4D matrix is provided, compute the STD along the last\n dimension.\n \n correlation: MATs\n Computes the correlation of the 3x3x3 neighborhood of each voxel, for\n all pair of input matrices. The final matrix is the average correlation\n (through all pairs).\n For a given pair of matrices\n - Background is considered as 0. May lead to very high correlations\n close to the border of the background regions, or very poor ones if the\n background in both matrices differ.\n - Images are zero-padded. For the same reason as higher, may lead to\n very high correlations if you have data close to the border of the\n matrix.\n - NaN values (if a voxel's neighborhood is entirely uniform; std 0) are\n replaced by\n - 0 if at least one neighborhood was entirely containing background.\n - 1 if the voxel's neighborhoods are uniform in both matrices\n - 0 if the voxel's neighborhoods is uniform in one matrix, but not\n the other.\n\n UPDATE AS OF VERSION 2.0: Random noise was previously added in the\n process to help avoid NaN values. Now replaced by either 0 or 1 as\n explained above.\n \n union: MATs\n Operation on binary matrix to keep voxels, that are non-zero, in at\n least one file.\n \n intersection: MATs\n Operation on binary matrix to keep the voxels, that are non-zero,\n are present in all files.\n \n difference: MAT_1 MAT_2\n Operation on binary matrix to keep voxels from the first file that are\n not in the second file (non-zeros).\n \n\npositional arguments:\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference}\n The type of operation to be performed on the matrices.\n in_matrices The list of matrices files or parameters.\n out_matrix Output matrix path.\n\noptions:\n -h, --help show this help message and exit\n --data_type DATA_TYPE\n Data type of the output image. Use the format: uint8, float16, int32.\n --exclude_background Does not affect the background of the original matrices.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "process", + "process" + ], + [ + "methods", + "method" + ], + [ + "region", + "regions", + "regions" + ], + [ + "positive", + "negative" + ], + [ + "matrices", + "matrices" + ], + [ + "supported", + "supported" + ], + [ + "image", + "image" + ], + [ + "high", + "high" + ], + [ + "algorithm", + "algorithm" + ], + [ + "applied", + "apply" + ], + [ + "positive", + "positive" + ], + [ + "random", + "random" + ], + [ + "Data", + "data", + "data" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "voxel", + "voxels" + ], + [ + "binary", + "binary" + ], + [ + "average", + "average" + ], + [ + "considered", + "considered" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "higher", + "higher" + ], + [ + "parameters", + "parameters" + ], + [ + "difference", + "difference" + ] + ], + "keywords": [] + }, + { + "name": "scil_connectivity_normalize", + "docstring": "Normalize a connectivity matrix coming from\nscil_tractogram_segment_bundles_for_connectivity.py.\n3 categories of normalization are available:\n-- Edge attributes\n - length: Multiply each edge by the average bundle length.\n Compensate for far away connections when using interface seeding.\n Cannot be used with inverse_length.\n\n - inverse_length: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n Cannot be used with length.\n\n - bundle_volume: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n\n-- Node attributes (Mutually exclusive)\n - parcel_volume: Divide each edge by the sum of node volume.\n Compensate for the likelihood of ending in the node.\n Compensate seeding bias when using interface seeding.\n\n - parcel_surface: Divide each edge by the sum of the node surface.\n Compensate for the likelihood of ending in the node.\n Compensate for seeding bias when using interface seeding.\n\n-- Matrix scaling (Mutually exclusive)\n - max_at_one: Maximum value of the matrix will be set to one.\n - sum_to_one: Ensure the sum of all edges weight is one\n - log_10: Apply a base 10 logarithm to all edges weight\n\nThe volume and length matrix should come from the\nscil_tractogram_segment_bundles_for_connectivity.py script.\n\nA review of the type of normalization is available in:\nColon-Perez, Luis M., et al. \"Dimensionless, scale-invariant, edge weight\nmetric for the study of complex structural networks.\" PLOS one 10.7 (2015).\n\nHowever, the proposed weighting of edge presented in this publication is not\nimplemented.\n\nFormerly: scil_normalize_connectivity.py", + "help": "usage: scil_connectivity_normalize.py [-h]\n [--length LENGTH_MATRIX | --inverse_length LENGTH_MATRIX]\n [--bundle_volume VOLUME_MATRIX]\n [--parcel_volume ATLAS LABELS_LIST | --parcel_surface ATLAS LABELS_LIST]\n [--max_at_one | --sum_to_one | --log_10]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_matrix out_matrix\n\nNormalize a connectivity matrix coming from\nscil_tractogram_segment_bundles_for_connectivity.py.\n3 categories of normalization are available:\n-- Edge attributes\n - length: Multiply each edge by the average bundle length.\n Compensate for far away connections when using interface seeding.\n Cannot be used with inverse_length.\n\n - inverse_length: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n Cannot be used with length.\n\n - bundle_volume: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n\n-- Node attributes (Mutually exclusive)\n - parcel_volume: Divide each edge by the sum of node volume.\n Compensate for the likelihood of ending in the node.\n Compensate seeding bias when using interface seeding.\n\n - parcel_surface: Divide each edge by the sum of the node surface.\n Compensate for the likelihood of ending in the node.\n Compensate for seeding bias when using interface seeding.\n\n-- Matrix scaling (Mutually exclusive)\n - max_at_one: Maximum value of the matrix will be set to one.\n - sum_to_one: Ensure the sum of all edges weight is one\n - log_10: Apply a base 10 logarithm to all edges weight\n\nThe volume and length matrix should come from the\nscil_tractogram_segment_bundles_for_connectivity.py script.\n\nA review of the type of normalization is available in:\nColon-Perez, Luis M., et al. \"Dimensionless, scale-invariant, edge weight\nmetric for the study of complex structural networks.\" PLOS one 10.7 (2015).\n\nHowever, the proposed weighting of edge presented in this publication is not\nimplemented.\n\nFormerly: scil_normalize_connectivity.py\n\npositional arguments:\n in_matrix Input connectivity matrix. This is typically a streamline_count matrix (.npy).\n out_matrix Output normalized matrix (.npy).\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nEdge-wise options:\n --length LENGTH_MATRIX\n Length matrix used for edge-wise multiplication.\n --inverse_length LENGTH_MATRIX\n Length matrix used for edge-wise division.\n --bundle_volume VOLUME_MATRIX\n Volume matrix used for edge-wise division.\n --parcel_volume ATLAS LABELS_LIST\n Atlas and labels list for edge-wise division.\n --parcel_surface ATLAS LABELS_LIST\n Atlas and labels list for edge-wise division.\n\nScaling options:\n --max_at_one Scale matrix with maximum value at one.\n --sum_to_one Scale matrix with sum of all elements at one.\n --log_10 Apply a base 10 logarithm to the matrix.\n", + "synonyms": [ + [ + "seeding", + "seeding" + ], + [ + "network", + "networks", + "networks" + ], + [ + "connectivity", + "connectivity" + ], + [ + "white", + "white" + ], + [ + "presented", + "presented" + ], + [ + "probability", + "likelihood" + ], + [ + "bundles", + "bundle" + ], + [ + "applied", + "apply" + ], + [ + "connections", + "connections" + ], + [ + "atlas", + "atlas" + ], + [ + "matter", + "matter" + ], + [ + "complex", + "complex" + ], + [ + "structural", + "structural" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "average", + "average" + ], + [ + "studies", + "study", + "study" + ], + [ + "proposed", + "proposed" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_connectivity_pairwise_agreement", + "docstring": "Evaluate pair-wise similarity measures of connectivity matrix.\n\nThe computed similarity measures are:\nsum of square difference and pearson correlation coefficent\n\nFormerly: scil_evaluate_connectivity_pairwaise_agreement_measures.py", + "help": "usage: scil_connectivity_pairwise_agreement.py [-h] [--single_compare matrix]\n [--normalize] [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_matrices [in_matrices ...]\n out_json\n\nEvaluate pair-wise similarity measures of connectivity matrix.\n\nThe computed similarity measures are:\nsum of square difference and pearson correlation coefficent\n\nFormerly: scil_evaluate_connectivity_pairwaise_agreement_measures.py\n\npositional arguments:\n in_matrices Path of the input matricies.\n out_json Path of the output json file.\n\noptions:\n -h, --help show this help message and exit\n --single_compare matrix\n Compare inputs to this single file.\n (Else, compute all pairs in in_matrices).\n --normalize If set, will normalize all matrices from zero to one.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "similarity", + "similarity" + ], + [ + "examine", + "evaluate" + ], + [ + "connectivity", + "connectivity" + ], + [ + "matrices", + "matrices" + ], + [ + "level", + "level" + ], + [ + "difference", + "difference" + ] + ], + "keywords": [] + }, + { + "name": "scil_connectivity_print_filenames", + "docstring": "Output the list of filenames using the coordinates from a binary connectivity\nmatrix. Typically used to move around files that are considered valid after\nthe scil_connectivity_filter.py script.\n\nExample:\n# Keep connections with more than 1000 streamlines for 100% of a population\nscil_connectivity_filter.py filtering_mask.npy\n --greater_than */streamlines_count.npy 1000 1.0\nscil_connectivity_print_filenames.py filtering_mask.npy\n labels_list.txt pass.txt\nfor file in $(cat pass.txt);\n do mv ${SOMEWHERE}/${FILE} ${SOMEWHERE_ELSE}/;\ndone\n\nFormerly: scil_print_connectivity_filenames.py", + "help": "usage: scil_connectivity_print_filenames.py [-h] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_matrix labels_list out_txt\n\nOutput the list of filenames using the coordinates from a binary connectivity\nmatrix. Typically used to move around files that are considered valid after\nthe scil_connectivity_filter.py script.\n\nExample:\n# Keep connections with more than 1000 streamlines for 100% of a population\nscil_connectivity_filter.py filtering_mask.npy\n --greater_than */streamlines_count.npy 1000 1.0\nscil_connectivity_print_filenames.py filtering_mask.npy\n labels_list.txt pass.txt\nfor file in $(cat pass.txt);\n do mv ${SOMEWHERE}/${FILE} ${SOMEWHERE_ELSE}/;\ndone\n\nFormerly: scil_print_connectivity_filenames.py\n\npositional arguments:\n in_matrix Binary matrix in numpy (.npy) format.\n Typically from scil_connectivity_filter.py\n labels_list List saved by the decomposition script.\n out_txt Output text file containing all filenames.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "population", + "population" + ], + [ + "streamlines", + "streamlines" + ], + [ + "connectivity", + "connectivity" + ], + [ + "pass", + "pass" + ], + [ + "connections", + "connections" + ], + [ + "valid", + "valid" + ], + [ + "binary", + "binary" + ], + [ + "considered", + "considered" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_connectivity_reorder_rois", + "docstring": "Re-order one or many connectivity matrices using a text file format.\nThe first row are the (x) and the second row the (y), must be space separated.\nThe resulting matrix does not have to be square (support unequal number of\nx and y).\n\nThe values refer to the coordinates (starting at 0) in the matrix, but if the\n--labels_list parameter is used, the values will refer to the label which will\nbe converted to the appropriate coordinates. This file must be the same as the\none provided to the scil_tractogram_segment_bundles_for_connectivity.py.\n\nTo subsequently use scil_visualize_connectivity.py with a lookup table, you\nmust use a label-based reording json and use --labels_list.\n\nYou can also use the Optimal Leaf Ordering(OLO) algorithm to transform a\nsparse matrix into an ordering that reduces the matrix bandwidth. The output\nfile can then be re-used with --in_ordering. Only one input can be used with\nthis option, we recommand an average streamline count or volume matrix.\n\nFormerly: scil_reorder_connectivity.py", + "help": "usage: scil_connectivity_reorder_rois.py [-h]\n (--in_ordering IN_ORDERING | --optimal_leaf_ordering OUT_FILE)\n [--out_suffix OUT_SUFFIX]\n [--out_dir OUT_DIR]\n [--labels_list LABELS_LIST]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_matrices [in_matrices ...]\n\nRe-order one or many connectivity matrices using a text file format.\nThe first row are the (x) and the second row the (y), must be space separated.\nThe resulting matrix does not have to be square (support unequal number of\nx and y).\n\nThe values refer to the coordinates (starting at 0) in the matrix, but if the\n--labels_list parameter is used, the values will refer to the label which will\nbe converted to the appropriate coordinates. This file must be the same as the\none provided to the scil_tractogram_segment_bundles_for_connectivity.py.\n\nTo subsequently use scil_visualize_connectivity.py with a lookup table, you\nmust use a label-based reording json and use --labels_list.\n\nYou can also use the Optimal Leaf Ordering(OLO) algorithm to transform a\nsparse matrix into an ordering that reduces the matrix bandwidth. The output\nfile can then be re-used with --in_ordering. Only one input can be used with\nthis option, we recommand an average streamline count or volume matrix.\n\nFormerly: scil_reorder_connectivity.py\n\npositional arguments:\n in_matrices Connectivity matrices in .npy or .txt format.\n\noptions:\n -h, --help show this help message and exit\n --in_ordering IN_ORDERING\n Txt file with the first row as x and second as y.\n --optimal_leaf_ordering OUT_FILE\n Output a text file with an ordering that alignsstructures along the diagonal.\n --out_suffix OUT_SUFFIX\n Suffix for the output matrix filename.\n --out_dir OUT_DIR Output directory for the re-ordered matrices.\n --labels_list LABELS_LIST\n List saved by the decomposition script,\n --in_ordering must contain labels rather than coordinates (.txt).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Rubinov, Mikail, and Olaf Sporns. \"Complex network measures of brain\n connectivity: uses and interpretations.\" Neuroimage 52.3 (2010):\n 1059-1069.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "streamline", + "streamline" + ], + [ + "network", + "networks", + "network" + ], + [ + "connectivity", + "connectivity" + ], + [ + "matrices", + "matrices" + ], + [ + "algorithm", + "algorithm" + ], + [ + "space", + "space" + ], + [ + "parameter", + "parameter" + ], + [ + "based", + "based" + ], + [ + "complex", + "complex" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "average", + "average" + ], + [ + "level", + "level" + ], + [ + "subsequently", + "subsequently" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_denoising_nlmeans", + "docstring": "Script to denoise a dataset with the Non Local Means algorithm.\n\nFormerly: scil_run_nlmeans.py", + "help": "usage: scil_denoising_nlmeans.py [-h] [--mask] [--sigma float] [--log LOGFILE]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_image out_image number_coils\n\nScript to denoise a dataset with the Non Local Means algorithm.\n\nFormerly: scil_run_nlmeans.py\n\npositional arguments:\n in_image Path of the image file to denoise.\n out_image Path to save the denoised image file.\n number_coils Number of receiver coils of the scanner.\n Use number_coils=1 in the case of a SENSE (GE, Philips) reconstruction and \n number_coils >= 1 for GRAPPA reconstruction (Siemens). number_coils=4 works well for the 1.5T\n in Sherbrooke. Use number_coils=0 if the noise is considered Gaussian distributed.\n\noptions:\n -h, --help show this help message and exit\n --mask Path to a binary mask. Only the data inside the mask will be used for computations\n --sigma float The standard deviation of the noise to use instead of computing it automatically.\n --log LOGFILE If supplied, name of the text file to store the logs.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "image", + "image" + ], + [ + "algorithm", + "algorithm" + ], + [ + "Data", + "data", + "data" + ], + [ + "processes", + "processes" + ], + [ + "binary", + "binary" + ], + [ + "considered", + "considered" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_dki_metrics", + "docstring": "Script to compute the Diffusion Kurtosis Imaging (DKI) and Mean Signal DKI\n(MSDKI) metrics. DKI is a multi-shell diffusion model. The input DWI needs\nto be multi-shell, i.e. multi-bvalued.\n\nSince the diffusion kurtosis model involves the estimation of a large number\nof parameters and since the non-Gaussian components of the diffusion signal\nare more sensitive to artefacts, you should really denoise your DWI volume\nbefore using this DKI script (e.g. scil_denoising_nlmeans.py). Moreover, to\nremove biases due to fiber dispersion, fiber crossings and other mesoscopic\nproperties of the underlying tissue, MSDKI does a powder-average of DWI for all\ndirections, thus removing the orientational dependencies and creating an\nalternative mean kurtosis map.\n\nDKI is also known to be vulnerable to artefacted voxels induced by the\nlow radial diffusivities of aligned white matter (CC, CST voxels). Since it is\nvery hard to capture non-Gaussian information due to the low decays in radial\ndirection, its kurtosis estimates have very low robustness.\nNoisy kurtosis estimates tend to be negative and its absolute values can have\norder of magnitudes higher than the typical kurtosis values. Consequently,\nthese negative kurtosis values will heavily propagate to the mean and radial\nkurtosis metrics. This is well-reported in [Rafael Henriques MSc thesis 2012,\nchapter 3]. Two ways to overcome this issue: i) compute the kurtosis values\nfrom powder-averaged MSDKI, and ii) perform 3D Gaussian smoothing. On\npowder-averaged signal decays, you don't have this low diffusivity issue and\nyour kurtosis estimates have much higher precision (additionally they are\nindependent to the fODF).\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section. If --not_all is set, only the metrics specified\nexplicitly by the flags will be output.\n\nThis script directly comes from the DIPY example gallery and references\ntherein.\n[1] examples_built/reconst_dki/#example-reconst-dki\n[2] examples_built/reconst_msdki/#example-reconst-msdki\n\nFormerly: scil_compute_kurtosis_metrics.py", + "help": "usage: scil_dki_metrics.py [-h] [--mask MASK] [--tolerance tol]\n [--skip_b0_check] [--min_k MIN_K] [--max_k MAX_K]\n [--smooth SMOOTH] [--not_all] [--ak file]\n [--mk file] [--rk file] [--msk file]\n [--dki_fa file] [--dki_md file] [--dki_ad file]\n [--dki_rd file] [--dki_residual file] [--msd file]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec\n\nScript to compute the Diffusion Kurtosis Imaging (DKI) and Mean Signal DKI\n(MSDKI) metrics. DKI is a multi-shell diffusion model. The input DWI needs\nto be multi-shell, i.e. multi-bvalued.\n\nSince the diffusion kurtosis model involves the estimation of a large number\nof parameters and since the non-Gaussian components of the diffusion signal\nare more sensitive to artefacts, you should really denoise your DWI volume\nbefore using this DKI script (e.g. scil_denoising_nlmeans.py). Moreover, to\nremove biases due to fiber dispersion, fiber crossings and other mesoscopic\nproperties of the underlying tissue, MSDKI does a powder-average of DWI for all\ndirections, thus removing the orientational dependencies and creating an\nalternative mean kurtosis map.\n\nDKI is also known to be vulnerable to artefacted voxels induced by the\nlow radial diffusivities of aligned white matter (CC, CST voxels). Since it is\nvery hard to capture non-Gaussian information due to the low decays in radial\ndirection, its kurtosis estimates have very low robustness.\nNoisy kurtosis estimates tend to be negative and its absolute values can have\norder of magnitudes higher than the typical kurtosis values. Consequently,\nthese negative kurtosis values will heavily propagate to the mean and radial\nkurtosis metrics. This is well-reported in [Rafael Henriques MSc thesis 2012,\nchapter 3]. Two ways to overcome this issue: i) compute the kurtosis values\nfrom powder-averaged MSDKI, and ii) perform 3D Gaussian smoothing. On\npowder-averaged signal decays, you don't have this low diffusivity issue and\nyour kurtosis estimates have much higher precision (additionally they are\nindependent to the fODF).\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section. If --not_all is set, only the metrics specified\nexplicitly by the flags will be output.\n\nThis script directly comes from the DIPY example gallery and references\ntherein.\n[1] examples_built/reconst_dki/#example-reconst-dki\n[2] examples_built/reconst_msdki/#example-reconst-msdki\n\nFormerly: scil_compute_kurtosis_metrics.py\n\npositional arguments:\n in_dwi Path of the input multi-shell DWI dataset.\n in_bval Path of the b-value file, in FSL format.\n in_bvec Path of the b-vector file, in FSL format.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computations and reconstruction.\n [Default: None]\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --min_k MIN_K Minimum kurtosis value in the output maps \n (ak, mk, rk). In theory, -3/7 is the min kurtosis \n limit for regions that consist of water confined \n to spherical pores (see DIPY example and \n documentation) [Default: 0.0].\n --max_k MAX_K Maximum kurtosis value in the output maps \n (ak, mk, rk). In theory, 10 is the max kurtosis\n limit for regions that consist of water confined\n to spherical pores (see DIPY example and \n documentation) [Default: 3.0].\n --smooth SMOOTH Smooth input DWI with a 3D Gaussian filter with \n full-width-half-max (fwhm). Kurtosis fitting is \n sensitive and outliers occur easily. According to\n tests on HCP, CB_Brain, Penthera3T, this smoothing\n is thus turned ON by default with fwhm=2.5. \n [Default: 2.5].\n --not_all If set, will only save the metrics explicitly \n specified using the other metrics flags. \n [Default: not set].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics files flags:\n --ak file Output filename for the axial kurtosis.\n --mk file Output filename for the mean kurtosis.\n --rk file Output filename for the radial kurtosis.\n --msk file Output filename for the mean signal kurtosis.\n --dki_fa file Output filename for the fractional anisotropy from DKI.\n --dki_md file Output filename for the mean diffusivity from DKI.\n --dki_ad file Output filename for the axial diffusivity from DKI.\n --dki_rd file Output filename for the radial diffusivity from DKI.\n\nQuality control files flags:\n --dki_residual file Output filename for the map of the residual of the tensor fit.\n Note. In previous versions, the resulting map was normalized. \n It is not anymore.\n --msd file Output filename for the mean signal diffusion (powder-average).\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "direction", + "direction" + ], + [ + "diffusion", + "diffusion" + ], + [ + "Data", + "data", + "data" + ], + [ + "higher", + "higher" + ], + [ + "axial", + "axial" + ], + [ + "maps", + "map" + ], + [ + "white", + "white" + ], + [ + "region", + "regions", + "regions" + ], + [ + "large", + "large" + ], + [ + "create", + "creating" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ], + [ + "reported", + "reported" + ], + [ + "precision", + "precision" + ], + [ + "comprised", + "consist" + ], + [ + "high", + "high" + ], + [ + "signal", + "signal" + ], + [ + "average", + "average" + ], + [ + "positive", + "negative" + ], + [ + "imaging", + "imaging" + ], + [ + "high", + "low" + ], + [ + "maps", + "maps" + ], + [ + "voxel", + "voxels" + ], + [ + "matter", + "matter" + ], + [ + "binary", + "binary" + ], + [ + "parameters", + "parameters" + ] + ], + "keywords": [] + }, + { + "name": "scil_dti_convert_tensors", + "docstring": "Conversion of tensors (the 6 values from the triangular matrix) between various\nsoftware standards. We cannot discover the input format type, user must know\nhow the tensors were created.", + "help": "usage: scil_dti_convert_tensors.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file out_file in_format out_format\n\nConversion of tensors (the 6 values from the triangular matrix) between various\nsoftware standards. We cannot discover the input format type, user must know\nhow the tensors were created.\n\n Dipy's order is [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://github.com/dipy/dipy/blob/master/dipy/reconst/dti.py#L1639\n \n MRTRIX's order is : [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz]\n Shape: [i, j , k, 6].\n Ref: https://mrtrix.readthedocs.io/en/dev/reference/commands/dwi2tensor.html\n \n ANTS's order ('nifti format') is : [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz].\n Shape: [i, j , k, 1, 6] (Careful, file is 5D).\n Ref: https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software\n \n FSL's order is [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide\n (Also used for the Fibernavigator)\n \n\npositional arguments:\n in_file Input tensors filename.\n out_file Output tensors filename.\n in_format Input format. Choices: ['fsl', 'nifti', 'mrtrix', 'dipy']\n out_format Output format. Choices: ['fsl', 'nifti', 'mrtrix', 'dipy']\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "variety", + "various" + ], + [ + "diffusion", + "diffusion" + ], + [ + "Data", + "data", + "data" + ], + [ + "shape", + "shape" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_dti_metrics", + "docstring": "Script to compute all of the Diffusion Tensor Imaging (DTI) metrics.\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section.\n\nIf --not_all is set, only the metrics specified explicitly by the flags\nwill be output. The available metrics are:\n\nfractional anisotropy (FA), geodesic anisotropy (GA), axial diffusivisty (AD),\nradial diffusivity (RD), mean diffusivity (MD), mode, red-green-blue colored\nFA (rgb), principal tensor e-vector and tensor coefficients (dxx, dxy, dxz,\ndyy, dyz, dzz).\n\nFor all the quality control metrics such as residual, physically implausible\nsignals, pulsation and misalignment artifacts, see\n[J-D Tournier, S. Mori, A. Leemans. Diffusion Tensor Imaging and Beyond.\nMRM 2011].\n\nFormerly: scil_compute_dti_metrics.py", + "help": "usage: scil_dti_metrics.py [-h] [-f] [--mask MASK] [--method method_name]\n [--not_all] [--ad file] [--evecs file]\n [--evals file] [--fa file] [--ga file] [--md file]\n [--mode file] [--norm file] [--rgb file]\n [--rd file] [--tensor file]\n [--tensor_format {fsl,nifti,mrtrix,dipy}]\n [--non-physical file] [--pulsation string]\n [--residual file] [--b0_threshold thr]\n [--skip_b0_check] [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval in_bvec\n\nScript to compute all of the Diffusion Tensor Imaging (DTI) metrics.\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section.\n\nIf --not_all is set, only the metrics specified explicitly by the flags\nwill be output. The available metrics are:\n\nfractional anisotropy (FA), geodesic anisotropy (GA), axial diffusivisty (AD),\nradial diffusivity (RD), mean diffusivity (MD), mode, red-green-blue colored\nFA (rgb), principal tensor e-vector and tensor coefficients (dxx, dxy, dxz,\ndyy, dyz, dzz).\n\nFor all the quality control metrics such as residual, physically implausible\nsignals, pulsation and misalignment artifacts, see\n[J-D Tournier, S. Mori, A. Leemans. Diffusion Tensor Imaging and Beyond.\nMRM 2011].\n\nFormerly: scil_compute_dti_metrics.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n\noptions:\n -h, --help show this help message and exit\n -f Force overwriting of the output files.\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computations and reconstruction. (Default: None)\n --method method_name Tensor fit method.\n WLS for weighted least squares\n LS for ordinary least squares\n NLLS for non-linear least-squares\n restore for RESTORE robust tensor fitting. (Default: WLS)\n --not_all If set, will only save the metrics explicitly specified using the other metrics flags. (Default: not set).\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nMetrics files flags:\n --ad file Output filename for the axial diffusivity.\n --evecs file Output filename for the eigenvectors of the tensor.\n --evals file Output filename for the eigenvalues of the tensor.\n --fa file Output filename for the fractional anisotropy.\n --ga file Output filename for the geodesic anisotropy.\n --md file Output filename for the mean diffusivity.\n --mode file Output filename for the mode.\n --norm file Output filename for the tensor norm.\n --rgb file Output filename for the colored fractional anisotropy.\n --rd file Output filename for the radial diffusivity.\n --tensor file Output filename for the tensor coefficients.\n --tensor_format {fsl,nifti,mrtrix,dipy}\n Format used for the tensors saved in --tensor file.(default: fsl)\n \n Dipy's order is [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://github.com/dipy/dipy/blob/master/dipy/reconst/dti.py#L1639\n \n MRTRIX's order is : [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz]\n Shape: [i, j , k, 6].\n Ref: https://mrtrix.readthedocs.io/en/dev/reference/commands/dwi2tensor.html\n \n ANTS's order ('nifti format') is : [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz].\n Shape: [i, j , k, 1, 6] (Careful, file is 5D).\n Ref: https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software\n \n FSL's order is [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide\n (Also used for the Fibernavigator)\n \n\nQuality control files flags:\n --non-physical file Output filename for the voxels with physically implausible signals \n where the mean of b=0 images is below one or more diffusion-weighted images.\n --pulsation string Standard deviation map across all diffusion-weighted images and across b=0 images if more than one is available.\n Shows pulsation and misalignment artifacts.\n --residual file Output filename for the map of the residual of the tensor fit.\n", + "synonyms": [ + [ + "blue", + "red", + "blue" + ], + [ + "axial", + "axial" + ], + [ + "order", + "order" + ], + [ + "methods", + "method" + ], + [ + "signal", + "signals" + ], + [ + "maps", + "map" + ], + [ + "weighted", + "weighted" + ], + [ + "principal", + "principal" + ], + [ + "imaging", + "imaging" + ], + [ + "high", + "high" + ], + [ + "diffusion", + "diffusion" + ], + [ + "Data", + "data", + "data" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "blue", + "red", + "red" + ], + [ + "voxel", + "voxels" + ], + [ + "shape", + "shape" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "green", + "green" + ], + [ + "considered", + "considered" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ] + ], + "keywords": [] + }, + { + "name": "scil_dwi_apply_bias_field", + "docstring": "Apply bias field correction to DWI. This script doesn't compute the bias\nfield itself. It ONLY applies an existing bias field. Please use the ANTs\nN4BiasFieldCorrection executable to compute the bias field.\n\nFormerly: scil_apply_bias_field_on_dwi.py", + "help": "usage: scil_dwi_apply_bias_field.py [-h] [--mask MASK]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bias_field out_name\n\nApply bias field correction to DWI. This script doesn't compute the bias\nfield itself. It ONLY applies an existing bias field. Please use the ANTs\nN4BiasFieldCorrection executable to compute the bias field.\n\nFormerly: scil_apply_bias_field_on_dwi.py\n\npositional arguments:\n in_dwi DWI Nifti image.\n in_bias_field Bias field Nifti image.\n out_name Corrected DWI Nifti image.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Apply bias field correction only in the region defined by the mask.\n If this is not given, the bias field is still only applied only in non-background data \n (i.e. where the dwi is not 0).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "region", + "regions", + "region" + ], + [ + "image", + "image" + ], + [ + "applied", + "apply" + ], + [ + "Data", + "data", + "data" + ], + [ + "applied", + "applied" + ], + [ + "level", + "level" + ], + [ + "defined", + "defined" + ] + ], + "keywords": [] + }, + { + "name": "scil_dwi_compute_snr", + "docstring": "Script to compute signal to noise ratio (SNR) in a region of interest (ROI)\nof a DWI volume.\n\nIt will compute the SNR for all DWI volumes of the input image seperately.\nThe output will contain the SNR which is the ratio of\nmean(signal) / std(noise).\nThe mean of the signal is computed inside the mask.\nThe standard deviation of the noise is estimated inside the noise_mask\nor inside the same mask if a noise_map is provided.\nIf it's not supplied, it will be estimated using the data outside the brain,\ncomputed with Dipy medotsu\n\nIf verbose is True, the SNR for every DWI volume will be output.\n\nThis works best in a well-defined ROI such as the corpus callosum.\nIt is heavily dependent on the ROI and its quality.\n\nWe highly recommend using a noise_map if you can acquire one.\nSee refs [1, 2] that describe the noise map acquisition.\n[1] St-Jean, et al (2016). Non Local Spatial and Angular Matching...\n https://doi.org/10.1016/j.media.2016.02.010\n[2] Reymbaut, et al (2021). Magic DIAMOND...\n https://doi.org/10.1016/j.media.2021.101988\n\nFormerly: scil_snr_in_roi.py", + "help": "usage: scil_dwi_compute_snr.py [-h]\n [--noise_mask NOISE_MASK | --noise_map NOISE_MAP]\n [--b0_thr B0_THR] [--out_basename OUT_BASENAME]\n [--split_shells] [--indent INDENT]\n [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_mask\n\nScript to compute signal to noise ratio (SNR) in a region of interest (ROI)\nof a DWI volume.\n\nIt will compute the SNR for all DWI volumes of the input image seperately.\nThe output will contain the SNR which is the ratio of\nmean(signal) / std(noise).\nThe mean of the signal is computed inside the mask.\nThe standard deviation of the noise is estimated inside the noise_mask\nor inside the same mask if a noise_map is provided.\nIf it's not supplied, it will be estimated using the data outside the brain,\ncomputed with Dipy medotsu\n\nIf verbose is True, the SNR for every DWI volume will be output.\n\nThis works best in a well-defined ROI such as the corpus callosum.\nIt is heavily dependent on the ROI and its quality.\n\nWe highly recommend using a noise_map if you can acquire one.\nSee refs [1, 2] that describe the noise map acquisition.\n[1] St-Jean, et al (2016). Non Local Spatial and Angular Matching...\n https://doi.org/10.1016/j.media.2016.02.010\n[2] Reymbaut, et al (2021). Magic DIAMOND...\n https://doi.org/10.1016/j.media.2021.101988\n\nFormerly: scil_snr_in_roi.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n in_mask Binary mask of the region used to estimate SNR.\n\noptions:\n -h, --help show this help message and exit\n --b0_thr B0_THR All b-values with values less than or equal to b0_thr are considered as b0s i.e. without diffusion weighting. [0.0]\n --out_basename OUT_BASENAME\n Path and prefix for the various saved file.\n --split_shells SNR will be split into shells.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMasks options:\n --noise_mask NOISE_MASK\n Binary mask used to estimate the noise from the DWI.\n --noise_map NOISE_MAP\n Noise map.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "region", + "regions", + "region" + ], + [ + "variety", + "various" + ], + [ + "spatial", + "spatial" + ], + [ + "maps", + "map" + ], + [ + "corpus", + "corpus" + ], + [ + "image", + "image" + ], + [ + "diffusion", + "diffusion" + ], + [ + "volume", + "volumes", + "volumes" + ], + [ + "highly", + "highly" + ], + [ + "signal", + "signal" + ], + [ + "Data", + "data", + "data" + ], + [ + "true", + "true" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "considered", + "considered" + ], + [ + "level", + "level" + ], + [ + "brain", + "brain" + ], + [ + "defined", + "defined" + ] + ], + "keywords": [] + }, + { + "name": "scil_dwi_concatenate", + "docstring": "Concatenate DWI, bval and bvecs together. File must be specified in matching\norder. Default data type will be the same as the first input DWI.\n\nFormerly: scil_concatenate_dwi.py", + "help": "usage: scil_dwi_concatenate.py [-h] [--in_dwis IN_DWIS [IN_DWIS ...]]\n [--in_bvals IN_BVALS [IN_BVALS ...]]\n [--in_bvecs IN_BVECS [IN_BVECS ...]]\n [--data_type DATA_TYPE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_dwi out_bval out_bvec\n\nConcatenate DWI, bval and bvecs together. File must be specified in matching\norder. Default data type will be the same as the first input DWI.\n\nFormerly: scil_concatenate_dwi.py\n\npositional arguments:\n out_dwi The name of the output DWI file.\n out_bval The name of the output b-values file (.bval).\n out_bvec The name of the output b-vectors file (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n The DWI file (.nii) to concatenate.\n --in_bvals IN_BVALS [IN_BVALS ...]\n The b-values files in FSL format (.bval).\n --in_bvecs IN_BVECS [IN_BVECS ...]\n The b-vectors files in FSL format (.bvec).\n --data_type DATA_TYPE\n Data type of the output image. Use the format: uint8, int16, int/float32, int/float64.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "image", + "image" + ], + [ + "Data", + "data", + "data" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_dwi_convert_FDF", + "docstring": "Converts a Varian FDF file or directory to a nifti file.\nIf the procpar contains diffusion information, it will be saved as bval and\nbvec in the same folder as the output file.\n\nex: scil_dwi_convert_FDF.py semsdw/b0_folder/ semsdw/dwi_folder/ dwi.nii.gz --bval dwi.bval --bvec dwi.bvec -f\n\nFormerly: scil_convert_fdf.py", + "help": "usage: scil_dwi_convert_FDF.py [-h] [--bval BVAL] [--bvec BVEC]\n [--flip dimension [dimension ...]]\n [--swap dimension [dimension ...]]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_b0_path in_dwi_path out_path\n\nConverts a Varian FDF file or directory to a nifti file.\nIf the procpar contains diffusion information, it will be saved as bval and\nbvec in the same folder as the output file.\n\nex: scil_dwi_convert_FDF.py semsdw/b0_folder/ semsdw/dwi_folder/ dwi.nii.gz --bval dwi.bval --bvec dwi.bvec -f\n\nFormerly: scil_convert_fdf.py\n\npositional arguments:\n in_b0_path Path to the b0 FDF file or folder to convert.\n in_dwi_path Path to the DWI FDF file or folder to convert.\n out_path Path to the nifti file to write on disk.\n\noptions:\n -h, --help show this help message and exit\n --bval BVAL Path to the bval file to write on disk.\n --bvec BVEC Path to the bvec file to write on disk.\n --flip dimension [dimension ...]\n The axes you want to flip. eg: to flip the x and y axes use: x y. [None]\n --swap dimension [dimension ...]\n The axes you want to swap. eg: to swap the x and y axes use: x y. [None]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "diffusion", + "diffusion" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_dwi_detect_volume_outliers", + "docstring": "This script simply finds the 3 closest angular neighbors of each direction\n(per shell) and compute the voxel-wise correlation.\nIf the angles or correlations to neighbors are below the shell average (by\nargs.std_scale x STD) it will flag the volume as a potential outlier.\n\nThis script supports multi-shells, but each shell is independant and detected\nusing the --b0_threshold parameter.\n\nThis script can be run before any processing to identify potential problem\nbefore launching pre-processing.", + "help": "usage: scil_dwi_detect_volume_outliers.py [-h] [--std_scale STD_SCALE]\n [--b0_threshold thr]\n [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval in_bvec\n\nThis script simply finds the 3 closest angular neighbors of each direction\n(per shell) and compute the voxel-wise correlation.\nIf the angles or correlations to neighbors are below the shell average (by\nargs.std_scale x STD) it will flag the volume as a potential outlier.\n\nThis script supports multi-shells, but each shell is independant and detected\nusing the --b0_threshold parameter.\n\nThis script can be run before any processing to identify potential problem\nbefore launching pre-processing.\n\npositional arguments:\n in_dwi The DWI file (.nii) to concatenate.\n in_bval The b-values files in FSL format (.bval).\n in_bvec The b-vectors files in FSL format (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --std_scale STD_SCALE\n How many deviation from the mean are required to be considered an outlier. [2.0]\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [ + [ + "direction", + "direction" + ], + [ + "high", + "high" + ], + [ + "parameter", + "parameter" + ], + [ + "Data", + "data", + "data" + ], + [ + "potential", + "potential" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "average", + "average" + ], + [ + "considered", + "considered" + ], + [ + "voxel", + "voxel" + ], + [ + "processing", + "processing" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_dwi_extract_b0", + "docstring": "Extract B0s from DWI, based on the bval and bvec information.\n\nThe default behavior is to save the first b0 of the series.\n\nFormerly: scil_extract_b0.py", + "help": "usage: scil_dwi_extract_b0.py [-h]\n [--all | --mean | --cluster-mean | --cluster-first]\n [--block-size INT] [--single-image]\n [--b0_threshold thr] [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_b0\n\nExtract B0s from DWI, based on the bval and bvec information.\n\nThe default behavior is to save the first b0 of the series.\n\nFormerly: scil_extract_b0.py\n\npositional arguments:\n in_dwi DWI Nifti image.\n in_bval b-values filename, in FSL format (.bval).\n in_bvec b-values filename, in FSL format (.bvec).\n out_b0 Output b0 file(s).\n\noptions:\n -h, --help show this help message and exit\n --block-size INT, -s INT\n Load the data using this block size. Useful\n when the data is too large to be loaded in memory.\n --single-image If output b0 volume has multiple time points, only outputs a single \n image instead of a numbered series of images.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nOptions in the case of multiple b0s.:\n --all Extract all b0s. Index number will be appended to the output file.\n --mean Extract mean b0.\n --cluster-mean Extract mean of each continuous cluster of b0s.\n --cluster-first Extract first b0 of each continuous cluster of b0s.\n", + "synonyms": [ + [ + "image", + "image" + ], + [ + "high", + "high" + ], + [ + "Data", + "data", + "data" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "large", + "large" + ], + [ + "based", + "based" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "considered", + "considered" + ], + [ + "memory", + "memory" + ], + [ + "level", + "level" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_dwi_extract_shell", + "docstring": "Extracts the DWI volumes that are on specific b-value shells. Many shells\ncan be extracted at once by specifying multiple b-values. The extracted\nvolumes are in the same order as in the original file.\n\nIf the b-values of a shell are not all identical, use the --tolerance\nargument to adjust the accepted interval. For example, a b-value of 2000\nand a tolerance of 20 will extract all volumes with a b-values from 1980 to\n2020.\n\nFiles that are too large to be loaded in memory can still be processed by\nsetting the --block-size argument. A block size of X means that X DWI volumes\nare loaded at a time for processing.\n\nFormerly: scil_extract_dwi_shell.py", + "help": "usage: scil_dwi_extract_shell.py [-h] [--out_indices OUT_INDICES]\n [--block-size INT] [--tolerance INT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_bvals_to_extract\n [in_bvals_to_extract ...] out_dwi out_bval\n out_bvec\n\nExtracts the DWI volumes that are on specific b-value shells. Many shells\ncan be extracted at once by specifying multiple b-values. The extracted\nvolumes are in the same order as in the original file.\n\nIf the b-values of a shell are not all identical, use the --tolerance\nargument to adjust the accepted interval. For example, a b-value of 2000\nand a tolerance of 20 will extract all volumes with a b-values from 1980 to\n2020.\n\nFiles that are too large to be loaded in memory can still be processed by\nsetting the --block-size argument. A block size of X means that X DWI volumes\nare loaded at a time for processing.\n\nFormerly: scil_extract_dwi_shell.py\n\npositional arguments:\n in_dwi The DW image file to split.\n in_bval The b-values file in FSL format (.bval).\n in_bvec The b-vectors file in FSL format (.bvec).\n in_bvals_to_extract The list of b-values to extract. For example 0 2000.\n out_dwi The name of the output DWI file.\n out_bval The name of the output b-value file (.bval).\n out_bvec The name of the output b-vector file (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --out_indices OUT_INDICES\n Optional filename for valid indices in input dwi volume\n --block-size INT, -s INT\n Loads the data using this block size. Useful\n when the data is too large to be loaded in memory.\n --tolerance INT, -t INT\n The tolerated gap between the b-values to extract\n and the actual b-values. [20]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "image", + "image" + ], + [ + "volume", + "volumes", + "volumes" + ], + [ + "Data", + "data", + "data" + ], + [ + "large", + "large" + ], + [ + "valid", + "valid" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "memory", + "memory" + ], + [ + "processing", + "processing" + ], + [ + "specific", + "specific" + ], + [ + "level", + "level" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_dwi_powder_average", + "docstring": "Script to compute powder average (mean diffusion weighted image) from set of\ndiffusion images.\n\nBy default will output an average image calculated from all images with\nnon-zero bvalue.\n\nSpecify --bvalue to output an image for a single shell\n\nScript currently does not take into account the diffusion gradient directions\nbeing averaged.\n\nFormerly: scil_compute_powder_average.py", + "help": "usage: scil_dwi_powder_average.py [-h] [-f] [--mask file] [--b0_thr B0_THR]\n [--shells SHELLS [SHELLS ...]]\n [--shell_thr SHELL_THR]\n [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval out_avg\n\nScript to compute powder average (mean diffusion weighted image) from set of\ndiffusion images.\n\nBy default will output an average image calculated from all images with\nnon-zero bvalue.\n\nSpecify --bvalue to output an image for a single shell\n\nScript currently does not take into account the diffusion gradient directions\nbeing averaged.\n\nFormerly: scil_compute_powder_average.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n out_avg Path of the output file.\n\noptions:\n -h, --help show this help message and exit\n -f Force overwriting of the output files.\n --mask file Path to a binary mask.\n Only data inside the mask will be used for powder avg. (Default: None)\n --b0_thr B0_THR Exclude b0 volumes from powder average with bvalue less than specified threshold.\n (Default: remove volumes with bvalue < 50\n --shells SHELLS [SHELLS ...]\n bvalue (shells) to include in powder average passed as a list \n (e.g. --shells 1000 2000). If not specified will include all volumes with a non-zero bvalue.\n --shell_thr SHELL_THR\n Include volumes with bvalue +- the specified threshold.\n (Default: [50]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [ + [ + "weighted", + "weighted" + ], + [ + "image", + "image" + ], + [ + "diffusion", + "diffusion" + ], + [ + "volume", + "volumes", + "volumes" + ], + [ + "Data", + "data", + "data" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "average", + "average" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_dwi_prepare_eddy_command", + "docstring": "Prepare a typical command for eddy and create the necessary files. When using\nmultiple acquisitions and/or opposite phase directions, images, b-values and\nb-vectors should be merged together using scil_dwi_concatenate.py. If using\ntopup prior to calling this script, images should be concatenated in the same\norder as the b0s used with prepare_topup.\n\nFormerly: scil_prepare_eddy_command.py", + "help": "usage: scil_dwi_prepare_eddy_command.py [-h] [--n_reverse N_REVERSE]\n [--topup TOPUP]\n [--topup_params TOPUP_PARAMS]\n [--eddy_cmd {eddy_openmp,eddy_cuda,eddy_cuda8.0,eddy_cuda9.1,eddy_cuda10.2,eddy,eddy_cpu}]\n [--b0_thr B0_THR]\n [--encoding_direction {x,y,z}]\n [--readout READOUT]\n [--slice_drop_correction]\n [--lsr_resampling]\n [--out_directory OUT_DIRECTORY]\n [--out_prefix OUT_PREFIX]\n [--out_script] [--fix_seed]\n [--eddy_options EDDY_OPTIONS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bvals in_bvecs in_mask\n\nPrepare a typical command for eddy and create the necessary files. When using\nmultiple acquisitions and/or opposite phase directions, images, b-values and\nb-vectors should be merged together using scil_dwi_concatenate.py. If using\ntopup prior to calling this script, images should be concatenated in the same\norder as the b0s used with prepare_topup.\n\nFormerly: scil_prepare_eddy_command.py\n\npositional arguments:\n in_dwi Input DWI Nifti image. If using multiple acquisition and/or opposite phase directions, please merge in the same order as for prepare_topup using scil_dwi_concatenate.py.\n in_bvals Input b-values file in FSL format.\n in_bvecs Input b-vectors file in FSL format.\n in_mask Binary brain mask.\n\noptions:\n -h, --help show this help message and exit\n --n_reverse N_REVERSE\n Number of reverse phase volumes included in the DWI image [0].\n --topup TOPUP Topup output name. If given, apply topup during eddy.\n Should be the same as --out_prefix from scil_dwi_prepare_topup_command.py.\n --topup_params TOPUP_PARAMS\n Parameters file (typically named acqparams) used to run topup.\n --eddy_cmd {eddy_openmp,eddy_cuda,eddy_cuda8.0,eddy_cuda9.1,eddy_cuda10.2,eddy,eddy_cpu}\n Eddy command [eddy_openmp].\n --b0_thr B0_THR All b-values with values less than or equal to b0_thr are considered\n as b0s i.e. without diffusion weighting [20].\n --encoding_direction {x,y,z}\n Acquisition direction, default is AP-PA [y].\n --readout READOUT Total readout time from the DICOM metadata [0.062].\n --slice_drop_correction\n If set, will activate eddy's outlier correction,\n which includes slice drop correction.\n --lsr_resampling Perform least-square resampling, allowing eddy to combine forward and reverse phase acquisitions for better reconstruction. Only works if directions and b-values are identical in both phase direction.\n --out_directory OUT_DIRECTORY\n Output directory for eddy files [.].\n --out_prefix OUT_PREFIX\n Prefix of the eddy-corrected DWI [dwi_eddy_corrected].\n --out_script If set, will output a .sh script (eddy.sh).\n else, will output the lines to the terminal [False].\n --fix_seed If set, will use the fixed seed strategy for eddy.\n Enhances reproducibility.\n --eddy_options EDDY_OPTIONS\n Additional options you want to use to run eddy.\n Add these options using quotes (i.e. \"--ol_nstd=6 --mb=4\").\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "direction", + "direction" + ], + [ + "create", + "create" + ], + [ + "image", + "image" + ], + [ + "diffusion", + "diffusion" + ], + [ + "applied", + "apply" + ], + [ + "volume", + "volumes", + "volumes" + ], + [ + "binary", + "binary" + ], + [ + "considered", + "considered" + ], + [ + "level", + "level" + ], + [ + "total", + "total" + ], + [ + "parameters", + "parameters" + ], + [ + "false", + "false" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_dwi_prepare_topup_command", + "docstring": "Prepare a typical command for topup and create the necessary files.\nThe reversed b0 must be in a different file.\n\nFormerly: scil_prepare_topup_command.py", + "help": "usage: scil_dwi_prepare_topup_command.py [-h] [--config CONFIG] [--synb0]\n [--encoding_direction {x,y,z}]\n [--readout READOUT]\n [--out_b0s OUT_B0S]\n [--out_directory OUT_DIRECTORY]\n [--out_prefix OUT_PREFIX]\n [--out_params OUT_PARAMS]\n [--out_script]\n [--topup_options TOPUP_OPTIONS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_forward_b0 in_reverse_b0\n\nPrepare a typical command for topup and create the necessary files.\nThe reversed b0 must be in a different file.\n\nFormerly: scil_prepare_topup_command.py\n\npositional arguments:\n in_forward_b0 Input b0 Nifti image with forward phase encoding.\n in_reverse_b0 Input b0 Nifti image with reversed phase encoding.\n\noptions:\n -h, --help show this help message and exit\n --config CONFIG Topup config file [b02b0.cnf].\n --synb0 If set, will use SyNb0 custom acqparams file.\n --encoding_direction {x,y,z}\n Acquisition direction of the forward b0 image, default is AP [y].\n --readout READOUT Total readout time from the DICOM metadata [0.062].\n --out_b0s OUT_B0S Output fused b0 file [fused_b0s.nii.gz].\n --out_directory OUT_DIRECTORY\n Output directory for topup files [.].\n --out_prefix OUT_PREFIX\n Prefix of the topup results [topup_results].\n --out_params OUT_PARAMS\n Filename for the acquisition parameters file [acqparams.txt].\n --out_script If set, will output a .sh script (topup.sh).\n else, will output the lines to the terminal [False].\n --topup_options TOPUP_OPTIONS\n Additional options you want to use to run topup.\n Add these options using quotes (i.e. \"--fwhm=6 --miter=4\").\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "direction", + "direction" + ], + [ + "create", + "create" + ], + [ + "image", + "image" + ], + [ + "level", + "level" + ], + [ + "total", + "total" + ], + [ + "parameters", + "parameters" + ], + [ + "false", + "false" + ] + ], + "keywords": [] + }, + { + "name": "scil_dwi_reorder_philips", + "docstring": "Re-order gradient according to original table (Philips)\nThis script is not needed for version 5.6 and higher\n\nFormerly: scil_reorder_dwi_philips.py", + "help": "usage: scil_dwi_reorder_philips.py [-h] [--json JSON]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_table\n out_basename\n\nRe-order gradient according to original table (Philips)\nThis script is not needed for version 5.6 and higher\n\nFormerly: scil_reorder_dwi_philips.py\n\npositional arguments:\n in_dwi Input dwi file.\n in_bval Input bval FSL format.\n in_bvec Input bvec FSL format.\n in_table Original philips table - first line is skipped.\n out_basename Basename output file.\n\noptions:\n -h, --help show this help message and exit\n --json JSON If you give a json file, it will check if you need to reorder your Philips dwi.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "level", + "level" + ], + [ + "higher", + "higher" + ] + ], + "keywords": [] + }, + { + "name": "scil_dwi_split_by_indices", + "docstring": "Splits the DWI image at certain indices along the last dimension (b-values).\nMany indices can be given at once by specifying multiple values. The splited\nvolumes are in the same order as in the original file. Also outputs the\ncorresponding .bval and .bvec files.\n\nThis script can be useful for splitting images at places where a b-value\nextraction does not work. For instance, if one wants to split the x first\nb-1500s from the rest of the b-1500s in an image, simply put x as an index.\n\nFormerly: scil_split_image.py", + "help": "usage: scil_dwi_split_by_indices.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_basename\n split_indices [split_indices ...]\n\nSplits the DWI image at certain indices along the last dimension (b-values).\nMany indices can be given at once by specifying multiple values. The splited\nvolumes are in the same order as in the original file. Also outputs the\ncorresponding .bval and .bvec files.\n\nThis script can be useful for splitting images at places where a b-value\nextraction does not work. For instance, if one wants to split the x first\nb-1500s from the rest of the b-1500s in an image, simply put x as an index.\n\nFormerly: scil_split_image.py\n\npositional arguments:\n in_dwi The DW image file to split.\n in_bval The b-values file in FSL format (.bval).\n in_bvec The b-vectors file in FSL format (.bvec).\n out_basename The basename of the output files. Indices number will be appended to out_basename. For example, if split_indices were 3 10, the files would be saved as out_basename_0_2, out_basename_3_10, out_basename_11_20, where the size of the last dimension is 21 in this example.\n split_indices The list of indices where to split the image. For example 3 10. This would split the image in three parts, such as [:3], [3:10], [10:]. Indices must be in increasing order.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "work", + "work" + ], + [ + "image", + "image" + ], + [ + "volume", + "volumes", + "volumes" + ], + [ + "level", + "level" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_dwi_to_sh", + "docstring": "Script to compute the SH coefficient directly on the raw DWI signal.\n\nFormerly: scil_compute_sh_from_signal.py", + "help": "usage: scil_dwi_to_sh.py [-h] [--sh_order SH_ORDER]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--smooth SMOOTH] [--use_attenuation] [--mask MASK]\n [--b0_threshold thr] [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_sh\n\nScript to compute the SH coefficient directly on the raw DWI signal.\n\nFormerly: scil_compute_sh_from_signal.py\n\npositional arguments:\n in_dwi Path of the dwi volume.\n in_bval Path of the b-value file, in FSL format.\n in_bvec Path of the b-vector file, in FSL format.\n out_sh Name of the output SH file to save.\n\noptions:\n -h, --help show this help message and exit\n --sh_order SH_ORDER SH order to fit (int). [4]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --smooth SMOOTH Lambda-regularization coefficient in the SH fit (float). [0.006]\n --use_attenuation If set, will use signal attenuation before fitting the SH (i.e. divide by the b0).\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computations and reconstruction \n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "high", + "high" + ], + [ + "signal", + "signal" + ], + [ + "Data", + "data", + "data" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "considered", + "considered" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_fodf_max_in_ventricles", + "docstring": "Script to compute the maximum fODF in the ventricles. The ventricules are\nestimated from an MD and FA threshold.\n\nThis allows to clip the noise of fODF using an absolute thresold.\n\nFormerly: scil_compute_fodf_max_in_ventricles.py", + "help": "usage: scil_fodf_max_in_ventricles.py [-h] [--fa_threshold FA_THRESHOLD]\n [--md_threshold MD_THRESHOLD]\n [--max_value_output file]\n [--mask_output file] [--small_dims]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n fODFs FA MD\n\nScript to compute the maximum fODF in the ventricles. The ventricules are\nestimated from an MD and FA threshold.\n\nThis allows to clip the noise of fODF using an absolute thresold.\n\nFormerly: scil_compute_fodf_max_in_ventricles.py\n\npositional arguments:\n fODFs Path of the fODF volume in spherical harmonics (SH).\n FA Path to the FA volume.\n MD Path to the mean diffusivity (MD) volume.\n\noptions:\n -h, --help show this help message and exit\n --fa_threshold FA_THRESHOLD\n Maximal threshold of FA (voxels under that threshold are considered \n for evaluation. [0.1]).\n --md_threshold MD_THRESHOLD\n Minimal threshold of MD in mm2/s (voxels above that threshold are \n considered for evaluation. [0.003]).\n --max_value_output file\n Output path for the text file containing the value. If not set the \n file will not be saved.\n --mask_output file Output path for the ventricule mask. If not set, the mask \n will not be saved.\n --small_dims If set, takes the full range of data to search the max fodf amplitude \n in ventricles. Useful when the data has small dimensions.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Dell'Acqua, Flavio, et al. \"Can spherical deconvolution provide more\n information than fiber orientations? Hindrance modulated orientational\n anisotropy, a true-tract specific index to characterize white matter\n diffusion.\" Human brain mapping 34.10 (2013): 2464-2483.\n", + "synonyms": [ + [ + "human", + "human" + ], + [ + "white", + "white" + ], + [ + "diffusion", + "diffusion" + ], + [ + "tract", + "tracts", + "tract" + ], + [ + "orientation", + "orientations" + ], + [ + "Data", + "data", + "data" + ], + [ + "large", + "small" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "true", + "true" + ], + [ + "voxel", + "voxels" + ], + [ + "matter", + "matter" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "considered", + "considered" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_fodf_memsmt", + "docstring": "Script to compute multi-encoding multi-shell multi-tissue (memsmt)\nConstrained Spherical Deconvolution ODFs.\n\nIn order to operate, the script only needs the data from one type of b-tensor\nencoding. However, giving only a spherical one will not produce good fODFs, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable.\n\nAll of `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\n>>> scil_fodf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_fodf.py", + "help": "usage: scil_fodf_memsmt.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals\n IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS\n [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5}\n [{0,1,-0.5,0.5} ...] [--sh_order int] [--mask MASK]\n [--tolerance tol] [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--not_all] [--wm_out_fODF file]\n [--gm_out_fODF file] [--csf_out_fODF file]\n [--vf file] [--vf_rgb file]\n in_wm_frf in_gm_frf in_csf_frf\n\nScript to compute multi-encoding multi-shell multi-tissue (memsmt)\nConstrained Spherical Deconvolution ODFs.\n\nIn order to operate, the script only needs the data from one type of b-tensor\nencoding. However, giving only a spherical one will not produce good fODFs, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable.\n\nAll of `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\n>>> scil_fodf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_fodf.py\n\npositional arguments:\n in_wm_frf Text file of WM response function.\n in_gm_frf Text file of GM response function.\n in_csf_frf Text file of CSF response function.\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n Path to the input diffusion volume for each b-tensor encoding type.\n --in_bvals IN_BVALS [IN_BVALS ...]\n Path to the bval file, in FSL format, for each b-tensor encoding type.\n --in_bvecs IN_BVECS [IN_BVECS ...]\n Path to the bvec file, in FSL format, for each b-tensor encoding type.\n --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...]\n Value of b_delta for each b-tensor encoding type, in the same order as dwi, bval and bvec inputs.\n --sh_order int SH order used for the CSD. (Default: 8)\n --mask MASK Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --not_all If set, only saves the files specified using the file flags. (Default: False)\n\nFile flags:\n --wm_out_fODF file Output filename for the WM fODF coefficients.\n --gm_out_fODF file Output filename for the GM fODF coefficients.\n --csf_out_fODF file Output filename for the CSF fODF coefficients.\n --vf file Output filename for the volume fractions map.\n --vf_rgb file Output filename for the volume fractions map in rgb.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "variance", + "variance" + ], + [ + "maps", + "map" + ], + [ + "supported", + "supported" + ], + [ + "work", + "work" + ], + [ + "image", + "image" + ], + [ + "high", + "high" + ], + [ + "diffusion", + "diffusion" + ], + [ + "Data", + "data", + "data" + ], + [ + "processes", + "processes" + ], + [ + "function", + "functions", + "function" + ], + [ + "based", + "based" + ], + [ + "shape", + "shape" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "specific", + "specific" + ], + [ + "level", + "level" + ], + [ + "false", + "false" + ], + [ + "analysis", + "analysis" + ] + ], + "keywords": [] + }, + { + "name": "scil_fodf_metrics", + "docstring": "Script to compute the maximum Apparent Fiber Density (AFD), the fiber ODFs\norientations, values and indices (peaks, peak_values, peak_indices), the Number\nof Fiber Orientations (NuFO) maps from fiber ODFs and the RGB map.\n\nAFD_max map is the maximal fODF amplitude for each voxel.\n\nNuFO is the the number of maxima of the fODF with an ABSOLUTE amplitude above\nthe threshold set using --at, AND an amplitude above the RELATIVE threshold\nset using --rt.\n\nThe --at argument should be set to a value which is 1.5 times the maximal\nvalue of the fODF in the ventricules. This can be obtained with the\nscil_fodf_max_in_ventricles.py script.\n\nIf the --abs_peaks_and_values argument is set, the peaks are all normalized\nand the peak_values are equal to the actual fODF amplitude of the peaks. By\ndefault, the script max-normalizes the peak_values for each voxel and\nmultiplies the peaks by peak_values.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Raffelt et al. NeuroImage 2012] and [Dell'Acqua et al HBM 2013] for the\ndefinitions.\n\nFormerly: scil_compute_fodf_metrics.py", + "help": "usage: scil_fodf_metrics.py [-h] [--sphere string] [--mask] [--at A_THRESHOLD]\n [--rt R_THRESHOLD] [--abs_peaks_and_values]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [--processes NBR] [-f]\n [--not_all] [--afd_max file] [--afd_total file]\n [--afd_sum file] [--nufo file] [--rgb file]\n [--peaks file] [--peak_values file]\n [--peak_indices file]\n in_fODF\n\nScript to compute the maximum Apparent Fiber Density (AFD), the fiber ODFs\norientations, values and indices (peaks, peak_values, peak_indices), the Number\nof Fiber Orientations (NuFO) maps from fiber ODFs and the RGB map.\n\nAFD_max map is the maximal fODF amplitude for each voxel.\n\nNuFO is the the number of maxima of the fODF with an ABSOLUTE amplitude above\nthe threshold set using --at, AND an amplitude above the RELATIVE threshold\nset using --rt.\n\nThe --at argument should be set to a value which is 1.5 times the maximal\nvalue of the fODF in the ventricules. This can be obtained with the\nscil_fodf_max_in_ventricles.py script.\n\nIf the --abs_peaks_and_values argument is set, the peaks are all normalized\nand the peak_values are equal to the actual fODF amplitude of the peaks. By\ndefault, the script max-normalizes the peak_values for each voxel and\nmultiplies the peaks by peak_values.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Raffelt et al. NeuroImage 2012] and [Dell'Acqua et al HBM 2013] for the\ndefinitions.\n\nFormerly: scil_compute_fodf_metrics.py\n\npositional arguments:\n in_fODF Path of the fODF volume in spherical harmonics (SH).\n\noptions:\n -h, --help show this help message and exit\n --sphere string Discrete sphere to use in the processing [repulsion724].\n --mask Path to a binary mask. Only the data inside the mask\n will beused for computations and reconstruction [None].\n --at A_THRESHOLD Absolute threshold on fODF amplitude. This value should be set to\n approximately 1.5 to 2 times the maximum fODF amplitude in isotropic voxels\n (ie. ventricles).\n Use scil_fodf_max_in_ventricles.py to find the maximal value.\n See [Dell'Acqua et al HBM 2013] [0.0].\n --rt R_THRESHOLD Relative threshold on fODF amplitude in percentage [0.1].\n --abs_peaks_and_values\n If set, the peak_values are not max-normalized for each voxel, \n but rather they keep the actual fODF amplitude of the peaks. \n Also, the peaks are given as unit directions instead of being proportional to peak_values. [False]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -f Force overwriting of the output files.\n --not_all If set, only saves the files specified using the file flags [False].\n\nFile flags:\n --afd_max file Output filename for the AFD_max map.\n --afd_total file Output filename for the AFD_total map(SH coeff = 0).\n --afd_sum file Output filename for the sum of all peak contributions\n (sum of fODF lobes on the sphere).\n --nufo file Output filename for the NuFO map.\n --rgb file Output filename for the RGB map.\n --peaks file Output filename for the extracted peaks.\n --peak_values file Output filename for the extracted peaks values.\n --peak_indices file Output filename for the generated peaks indices on the sphere.\n", + "synonyms": [ + [ + "maps", + "map" + ], + [ + "maps", + "maps" + ], + [ + "orientation", + "orientations" + ], + [ + "Data", + "data", + "data" + ], + [ + "processes", + "processes" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "voxel", + "voxels" + ], + [ + "lobes", + "lobes" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "voxel", + "voxel" + ], + [ + "processing", + "processing" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ], + [ + "false", + "false" + ] + ], + "keywords": [] + }, + { + "name": "scil_fodf_msmt", + "docstring": "Script to compute Multishell Multi-tissue Constrained Spherical Deconvolution\nODFs.\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical\ndeconvolution for improved analysis of multi-shell diffusion\nMRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_fodf.py", + "help": "usage: scil_fodf_msmt.py [-h] [--sh_order int] [--mask] [--tolerance tol]\n [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [--not_all] [--wm_out_fODF file]\n [--gm_out_fODF file] [--csf_out_fODF file]\n [--vf file] [--vf_rgb file]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_wm_frf in_gm_frf in_csf_frf\n\nScript to compute Multishell Multi-tissue Constrained Spherical Deconvolution\nODFs.\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical\ndeconvolution for improved analysis of multi-shell diffusion\nMRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_fodf.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bval file, in FSL format.\n in_bvec Path of the bvec file, in FSL format.\n in_wm_frf Text file of WM response function.\n in_gm_frf Text file of GM response function.\n in_csf_frf Text file of CSF response function.\n\noptions:\n -h, --help show this help message and exit\n --sh_order int SH order used for the CSD. (Default: 8)\n --mask Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --not_all If set, only saves the files specified using the file flags. (Default: False)\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nFile flags:\n --wm_out_fODF file Output filename for the WM fODF coefficients.\n --gm_out_fODF file Output filename for the GM fODF coefficients.\n --csf_out_fODF file Output filename for the CSF fODF coefficients.\n --vf file Output filename for the volume fractions map.\n --vf_rgb file Output filename for the volume fractions map in rgb.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "maps", + "map" + ], + [ + "high", + "high" + ], + [ + "diffusion", + "diffusion" + ], + [ + "Data", + "data", + "data" + ], + [ + "processes", + "processes" + ], + [ + "function", + "functions", + "function" + ], + [ + "based", + "based" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ], + [ + "false", + "false" + ], + [ + "analysis", + "analysis" + ] + ], + "keywords": [] + }, + { + "name": "scil_fodf_ssst", + "docstring": "Script to compute Constrained Spherical Deconvolution (CSD) fiber ODFs.\n\nSee [Tournier et al. NeuroImage 2007]\n\nFormerly: scil_compute_ssst_fodf.py", + "help": "usage: scil_fodf_ssst.py [-h] [--sh_order int] [--mask] [--b0_threshold thr]\n [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec frf_file out_fODF\n\nScript to compute Constrained Spherical Deconvolution (CSD) fiber ODFs.\n\nSee [Tournier et al. NeuroImage 2007]\n\nFormerly: scil_compute_ssst_fodf.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n frf_file Path of the FRF file\n out_fODF Output path for the fiber ODF coefficients.\n\noptions:\n -h, --help show this help message and exit\n --sh_order int SH order used for the CSD. (Default: 8)\n --mask Path to a binary mask. Only the data inside the mask will be used \n for computations and reconstruction.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "high", + "high" + ], + [ + "diffusion", + "diffusion" + ], + [ + "Data", + "data", + "data" + ], + [ + "processes", + "processes" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "considered", + "considered" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_fodf_to_bingham", + "docstring": "Script for fitting a Bingham distribution to each fODF lobe, as described\nin [1].\n\nThe Bingham fit is saved, with each Bingham distribution described by 7\ncoefficients (for example, for a maximum number of lobes of 5, the number\nof coefficients is 7 x 5 = 35 -- less than the number of coefficients for\nSH of maximum order 8).\n\nUsing 12 threads, the execution takes approximately 30 minutes for a brain with\n1mm isotropic resolution.\n\nFormerly: scil_fit_bingham_to_fodf.py", + "help": "usage: scil_fodf_to_bingham.py [-h] [--max_lobes MAX_LOBES] [--at AT]\n [--rt RT] [--min_sep_angle MIN_SEP_ANGLE]\n [--max_fit_angle MAX_FIT_ANGLE] [--mask MASK]\n [-v [{DEBUG,INFO,WARNING}]] [--processes NBR]\n [-f]\n in_sh out_bingham\n\nScript for fitting a Bingham distribution to each fODF lobe, as described\nin [1].\n\nThe Bingham fit is saved, with each Bingham distribution described by 7\ncoefficients (for example, for a maximum number of lobes of 5, the number\nof coefficients is 7 x 5 = 35 -- less than the number of coefficients for\nSH of maximum order 8).\n\nUsing 12 threads, the execution takes approximately 30 minutes for a brain with\n1mm isotropic resolution.\n\nFormerly: scil_fit_bingham_to_fodf.py\n\npositional arguments:\n in_sh Input SH image.\n out_bingham Output Bingham functions image.\n\noptions:\n -h, --help show this help message and exit\n --max_lobes MAX_LOBES\n Maximum number of lobes per voxel to extract. [5]\n --at AT Absolute threshold for peaks extraction. [0.0]\n --rt RT Relative threshold for peaks extraction. [0.1]\n --min_sep_angle MIN_SEP_ANGLE\n Minimum separation angle between two peaks. [25.0]\n --max_fit_angle MAX_FIT_ANGLE\n Maximum distance in degrees around a peak direction for fitting the Bingham function. [15.0]\n --mask MASK Optional mask file. Only SH inside the mask are fitted.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -f Force overwriting of the output files.\n\n[1] T. W. Riffert, J. Schreiber, A. Anwander, and T. R. Kn\u00f6sche, \u201cBeyond\n fractional anisotropy: Extraction of bundle-specific structural metrics\n from crossing fiber models,\u201d NeuroImage, vol. 100, pp. 176-191, Oct. 2014,\n doi: 10.1016/j.neuroimage.2014.06.015.\n\n[2] J. Schreiber, T. Riffert, A. Anwander, and T. R. Kn\u00f6sche, \u201cPlausibility\n Tracking: A method to evaluate anatomical connectivity and microstructural\n properties along fiber pathways,\u201d NeuroImage, vol. 90, pp. 163-178, Apr.\n 2014, doi: 10.1016/j.neuroimage.2014.01.002.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "lobe", + "lobe" + ], + [ + "methods", + "method" + ], + [ + "direction", + "direction" + ], + [ + "examine", + "evaluate" + ], + [ + "connectivity", + "connectivity" + ], + [ + "bundles", + "bundle" + ], + [ + "image", + "image" + ], + [ + "pathway", + "pathways", + "pathways" + ], + [ + "processes", + "processes" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "function", + "functions", + "function" + ], + [ + "lobes", + "lobes" + ], + [ + "structural", + "structural" + ], + [ + "anatomical", + "anatomy", + "anatomical" + ], + [ + "voxel", + "voxel" + ], + [ + "tracking", + "tracking" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ], + [ + "brain", + "brain" + ], + [ + "function", + "functions", + "functions" + ] + ], + "keywords": [] + }, + { + "name": "scil_freewater_maps", + "docstring": "Compute Free Water maps [1] using AMICO.\nThis script supports both single and multi-shell data.\n\nFormerly: scil_compute_freewater.py", + "help": "usage: scil_freewater_maps.py [-h] [--mask MASK] [--out_dir OUT_DIR]\n [--b_thr B_THR] [--para_diff PARA_DIFF]\n [--iso_diff ISO_DIFF]\n [--perp_diff_min PERP_DIFF_MIN]\n [--perp_diff_max PERP_DIFF_MAX]\n [--lambda1 LAMBDA1] [--lambda2 LAMBDA2]\n [--save_kernels DIRECTORY | --load_kernels DIRECTORY]\n [--compute_only] [--mouse] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec\n\nCompute Free Water maps [1] using AMICO.\nThis script supports both single and multi-shell data.\n\nFormerly: scil_compute_freewater.py\n\npositional arguments:\n in_dwi DWI file.\n in_bval b-values filename, in FSL format (.bval).\n in_bvec b-vectors filename, in FSL format (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Brain mask filename.\n --out_dir OUT_DIR Output directory for the Free Water results. [results]\n --b_thr B_THR Limit value to consider that a b-value is on an\n existing shell. Above this limit, the b-value is\n placed on a new shell. This includes b0s values.\n --mouse If set, use mouse fitting profile.\n --processes NBR Number of sub-processes to start. Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n\nModel options:\n --para_diff PARA_DIFF\n Axial diffusivity (AD) in the CC. [0.0015]\n --iso_diff ISO_DIFF Mean diffusivity (MD) in ventricles. [0.003]\n --perp_diff_min PERP_DIFF_MIN\n Radial diffusivity (RD) minimum. [0.0001]\n --perp_diff_max PERP_DIFF_MAX\n Radial diffusivity (RD) maximum. [0.0007]\n --lambda1 LAMBDA1 First regularization parameter. [0.0]\n --lambda2 LAMBDA2 Second regularization parameter. [0.25]\n\nKernels options:\n --save_kernels DIRECTORY\n Output directory for the COMMIT kernels.\n --load_kernels DIRECTORY\n Input directory where the COMMIT kernels are located.\n --compute_only Compute kernels only, --save_kernels must be used.\n\nReference:\n [1] Pasternak 0, Sochen N, Gur Y, Intrator N, Assaf Y.\n Free water elimination and mapping from diffusion mri.\n Magn Reson Med. 62 (3) (2009) 717-730.\n", + "synonyms": [ + [ + "axial", + "axial" + ], + [ + "diffusion", + "diffusion" + ], + [ + "maps", + "maps" + ], + [ + "parameter", + "parameter" + ], + [ + "Data", + "data", + "data" + ], + [ + "processes", + "processes" + ], + [ + "level", + "level" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_freewater_priors", + "docstring": "Synonym for scil_NODDI_priors.py", + "help": "usage: scil_freewater_priors.py [-h]\n [--fa_min_single_fiber FA_MIN_SINGLE_FIBER]\n [--fa_max_ventricles FA_MAX_VENTRICLES]\n [--md_min_ventricles MD_MIN_VENTRICLES]\n [--roi_radius ROI_RADIUS]\n [--roi_center pos pos pos]\n [--out_txt_1fiber_para FILE]\n [--out_txt_1fiber_perp FILE]\n [--out_mask_1fiber FILE]\n [--out_txt_ventricles FILE]\n [--out_mask_ventricles FILE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_FA in_AD in_RD in_MD\n\nCompute the axial (para_diff), radial (perp_diff), and mean (iso_diff)\ndiffusivity priors for NODDI.\n\nFormerly: scil_compute_NODDI_priors.py\n\npositional arguments:\n in_FA Path to the FA volume.\n in_AD Path to the axial diffusivity (AD) volume.\n in_RD Path to the radial diffusivity (RD) volume.\n in_MD Path to the mean diffusivity (MD) volume.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics options:\n --fa_min_single_fiber FA_MIN_SINGLE_FIBER\n Minimal threshold of FA (voxels above that threshold are considered in \n the single fiber mask). [0.7]\n --fa_max_ventricles FA_MAX_VENTRICLES\n Maximal threshold of FA (voxels under that threshold are considered in \n the ventricles). [0.1]\n --md_min_ventricles MD_MIN_VENTRICLES\n Minimal threshold of MD in mm2/s (voxels above that threshold are considered \n for in the ventricles). [0.003]\n\nRegions options:\n --roi_radius ROI_RADIUS\n Radius of the region used to estimate the priors. The roi will be a cube spanning \n from ROI_CENTER in each direction. [20]\n --roi_center pos pos pos\n Center of the roi of size roi_radius used to estimate the priors; a 3-value coordinate. \n If not set, uses the center of the 3D volume.\n\nOutputs:\n --out_txt_1fiber_para FILE\n Output path for the text file containing the single fiber average value of AD.\n If not set, the file will not be saved.\n --out_txt_1fiber_perp FILE\n Output path for the text file containing the single fiber average value of RD.\n If not set, the file will not be saved.\n --out_mask_1fiber FILE\n Output path for single fiber mask. If not set, the mask will not be saved.\n --out_txt_ventricles FILE\n Output path for the text file containing the ventricles average value of MD.\n If not set, the file will not be saved.\n --out_mask_ventricles FILE\n Output path for the ventricule mask.\n If not set, the mask will not be saved.\n\nReference:\n [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC.\n NODDI: practical in vivo neurite orientation dispersion and density\n imaging of the human brain. NeuroImage. 2012 Jul 16;61:1000-16.\n", + "synonyms": [ + [ + "region", + "regions", + "region" + ], + [ + "human", + "human" + ], + [ + "axial", + "axial" + ], + [ + "direction", + "direction" + ], + [ + "orientation", + "orientation" + ], + [ + "region", + "regions", + "regions" + ], + [ + "imaging", + "imaging" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "voxel", + "voxels" + ], + [ + "vivo", + "vivo" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "average", + "average" + ], + [ + "considered", + "considered" + ], + [ + "level", + "level" + ], + [ + "brain", + "brain" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_frf_mean", + "docstring": "Compute the mean Fiber Response Function from a set of individually\ncomputed Response Functions.\n\nThe FRF files are obtained from scil_frf_ssst.py, scil_frf_msmt.py in the\ncase of multi-shell data or scil_frf_memsmt.py in the case of multi-encoding\nmulti-shell data.\n\nFormerly: scil_compute_mean_frf.py", + "help": "usage: scil_frf_mean.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n list [list ...] file\n\nCompute the mean Fiber Response Function from a set of individually\ncomputed Response Functions.\n\nThe FRF files are obtained from scil_frf_ssst.py, scil_frf_msmt.py in the\ncase of multi-shell data or scil_frf_memsmt.py in the case of multi-encoding\nmulti-shell data.\n\nFormerly: scil_compute_mean_frf.py\n\npositional arguments:\n list List of FRF filepaths.\n file Path of the output mean FRF file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "Data", + "data", + "data" + ], + [ + "level", + "level" + ], + [ + "function", + "functions", + "function" + ], + [ + "function", + "functions", + "functions" + ] + ], + "keywords": [] + }, + { + "name": "scil_frf_memsmt", + "docstring": "Script to estimate response functions for multi-encoding multi-shell\nmulti-tissue (memsmt) constrained spherical deconvolution. In order to operate,\nthe script only needs the data from one type of b-tensor encoding. However,\ngiving only a spherical one will not produce good fiber response functions, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable. Moreover, all of `--in_dwis`, `--in_bvals`, `--in_bvecs` and\n`--in_bdeltas` must have the same number of arguments. Be sure to keep the\nsame order of encodings throughout all these inputs and to set `--in_bdeltas`\naccordingly (IMPORTANT).\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n\nIn the wm, we compute the response function in each voxels where\nthe FA is superior at threshold_fa_wm.\n\nIn the gm (or csf), we compute the response function in each voxels where\nthe FA is below at threshold_fa_gm (or threshold_fa_csf) and where\nthe MD is below threshold_md_gm (or threshold_md_csf).\n\n>>> scil_frf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_frf.py", + "help": "usage: scil_frf_memsmt.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals\n IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS\n [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5}\n [{0,1,-0.5,0.5} ...] [--mask MASK]\n [--mask_wm MASK_WM] [--mask_gm MASK_GM]\n [--mask_csf MASK_CSF] [--fa_thr_wm FA_THR_WM]\n [--fa_thr_gm FA_THR_GM] [--fa_thr_csf FA_THR_CSF]\n [--md_thr_gm MD_THR_GM] [--md_thr_csf MD_THR_CSF]\n [--min_nvox MIN_NVOX] [--tolerance tol]\n [--skip_b0_check] [--dti_bval_limit DTI_BVAL_LIMIT]\n [--roi_radii ROI_RADII [ROI_RADII ...]]\n [--roi_center tuple(3) tuple(3) tuple(3)]\n [--wm_frf_mask file] [--gm_frf_mask file]\n [--csf_frf_mask file] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n out_wm_frf out_gm_frf out_csf_frf\n\nScript to estimate response functions for multi-encoding multi-shell\nmulti-tissue (memsmt) constrained spherical deconvolution. In order to operate,\nthe script only needs the data from one type of b-tensor encoding. However,\ngiving only a spherical one will not produce good fiber response functions, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable. Moreover, all of `--in_dwis`, `--in_bvals`, `--in_bvecs` and\n`--in_bdeltas` must have the same number of arguments. Be sure to keep the\nsame order of encodings throughout all these inputs and to set `--in_bdeltas`\naccordingly (IMPORTANT).\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n\nIn the wm, we compute the response function in each voxels where\nthe FA is superior at threshold_fa_wm.\n\nIn the gm (or csf), we compute the response function in each voxels where\nthe FA is below at threshold_fa_gm (or threshold_fa_csf) and where\nthe MD is below threshold_md_gm (or threshold_md_csf).\n\n>>> scil_frf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_frf.py\n\npositional arguments:\n out_wm_frf Path to the output WM frf file, in .txt format.\n out_gm_frf Path to the output GM frf file, in .txt format.\n out_csf_frf Path to the output CSF frf file, in .txt format.\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n Path to the input diffusion volume for each b-tensor encoding type.\n --in_bvals IN_BVALS [IN_BVALS ...]\n Path to the bval file, in FSL format, for each b-tensor encoding type.\n --in_bvecs IN_BVECS [IN_BVECS ...]\n Path to the bvec file, in FSL format, for each b-tensor encoding type.\n --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...]\n Value of b_delta for each b-tensor encoding type, in the same order as \n dwi, bval and bvec inputs.\n --mask MASK Path to a binary mask. Only the data inside the mask will be used for\n computations and reconstruction. Useful if no tissue masks are available.\n --mask_wm MASK_WM Path to the input WM mask file, used to improve the final WM frf mask.\n --mask_gm MASK_GM Path to the input GM mask file, used to improve the final GM frf mask.\n --mask_csf MASK_CSF Path to the input CSF mask file, used to improve the final CSF frf mask.\n --fa_thr_wm FA_THR_WM\n If supplied, use this threshold to select single WM fiber voxels from \n the FA inside the WM mask defined by mask_wm. \n Each voxel above this threshold will be selected. [0.7]\n --fa_thr_gm FA_THR_GM\n If supplied, use this threshold to select GM voxels from the FA inside \n the GM mask defined by mask_gm. \n Each voxel below this threshold will be selected. [0.2]\n --fa_thr_csf FA_THR_CSF\n If supplied, use this threshold to select CSF voxels from the FA inside \n the CSF mask defined by mask_csf. \n Each voxel below this threshold will be selected. [0.1]\n --md_thr_gm MD_THR_GM\n If supplied, use this threshold to select GM voxels from the MD inside \n the GM mask defined by mask_gm. \n Each voxel below this threshold will be selected. [0.0007]\n --md_thr_csf MD_THR_CSF\n If supplied, use this threshold to select CSF voxels from the MD inside \n the CSF mask defined by mask_csf. \n Each voxel below this threshold will be selected. [0.003]\n --min_nvox MIN_NVOX Minimal number of voxels needed for each tissue masks in order to \n proceed to frf estimation. [100]\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --dti_bval_limit DTI_BVAL_LIMIT\n The highest b-value taken for the DTI model. [1200]\n --roi_radii ROI_RADII [ROI_RADII ...]\n If supplied, use those radii to select a cuboid roi to estimate the \n response functions. The roi will be a cuboid spanning from the middle of \n the volume in each direction with the different radii. The type is either \n an int (e.g. --roi_radii 10) or an array-like (3,) (e.g. --roi_radii 20 30 10). [[20]]\n --roi_center tuple(3) tuple(3) tuple(3)\n If supplied, use this center to span the cuboid roi using roi_radii. \n [center of the 3D volume] (e.g. --roi_center 66 79 79)\n --wm_frf_mask file Path to the output WM frf mask file, the voxels used to compute the WM frf.\n --gm_frf_mask file Path to the output GM frf mask file, the voxels used to compute the GM frf.\n --csf_frf_mask file Path to the output CSF frf mask file, the voxels used to compute the CSF frf.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "variance", + "variance" + ], + [ + "superior", + "superior" + ], + [ + "direction", + "direction" + ], + [ + "white", + "white" + ], + [ + "supported", + "supported" + ], + [ + "work", + "work" + ], + [ + "image", + "image" + ], + [ + "high", + "high" + ], + [ + "diffusion", + "diffusion" + ], + [ + "middle", + "middle" + ], + [ + "Data", + "data", + "data" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "function", + "functions", + "function" + ], + [ + "voxel", + "voxels" + ], + [ + "based", + "based" + ], + [ + "shape", + "shape" + ], + [ + "matter", + "matter" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "analysis", + "analysis" + ], + [ + "highest", + "highest" + ], + [ + "function", + "functions", + "functions" + ], + [ + "defined", + "defined" + ] + ], + "keywords": [] + }, + { + "name": "scil_frf_msmt", + "docstring": "Compute response functions for multi-shell multi-tissue (MSMT) constrained\nspherical deconvolution from DWI data.\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n - In the wm, we compute the response function in each voxel where the FA is\n superior at threshold_fa_wm.\n - In the gm (or csf), we compute the response function in each voxel where\n the FA is below at threshold_fa_gm (or threshold_fa_csf) and where the MD\n is below threshold_md_gm (or threshold_md_csf).\n\nWe output one response function file for each tissue, containing the response\nfunction for each b-value (arranged by lines). These are saved as the diagonal\nof the axis-symmetric diffusion tensor (3 e-values) and a mean b0 value.\nFor example, a typical wm_frf is [15e-4, 4e-4, 4e-4, 700], where the tensor\ne-values are (15,4,4)x10^-4 mm^2/s and the mean b0 is 700.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical deconvolution\nfor improved analysis of multi-shell diffusion MRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_frf.py", + "help": "usage: scil_frf_msmt.py [-h] [--mask MASK] [--mask_wm MASK_WM]\n [--mask_gm MASK_GM] [--mask_csf MASK_CSF]\n [--fa_thr_wm FA_THR_WM] [--fa_thr_gm FA_THR_GM]\n [--fa_thr_csf FA_THR_CSF] [--md_thr_gm MD_THR_GM]\n [--md_thr_csf MD_THR_CSF] [--min_nvox MIN_NVOX]\n [--tolerance TOLERANCE] [--skip_b0_check]\n [--dti_bval_limit DTI_BVAL_LIMIT]\n [--roi_radii ROI_RADII [ROI_RADII ...]]\n [--roi_center tuple(3) tuple(3) tuple(3)]\n [--wm_frf_mask file] [--gm_frf_mask file]\n [--csf_frf_mask file] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_wm_frf out_gm_frf\n out_csf_frf\n\nCompute response functions for multi-shell multi-tissue (MSMT) constrained\nspherical deconvolution from DWI data.\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n - In the wm, we compute the response function in each voxel where the FA is\n superior at threshold_fa_wm.\n - In the gm (or csf), we compute the response function in each voxel where\n the FA is below at threshold_fa_gm (or threshold_fa_csf) and where the MD\n is below threshold_md_gm (or threshold_md_csf).\n\nWe output one response function file for each tissue, containing the response\nfunction for each b-value (arranged by lines). These are saved as the diagonal\nof the axis-symmetric diffusion tensor (3 e-values) and a mean b0 value.\nFor example, a typical wm_frf is [15e-4, 4e-4, 4e-4, 700], where the tensor\ne-values are (15,4,4)x10^-4 mm^2/s and the mean b0 is 700.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical deconvolution\nfor improved analysis of multi-shell diffusion MRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_frf.py\n\npositional arguments:\n in_dwi Path to the input diffusion volume.\n in_bval Path to the bval file, in FSL format.\n in_bvec Path to the bvec file, in FSL format.\n out_wm_frf Path to the output WM frf file, in .txt format.\n out_gm_frf Path to the output GM frf file, in .txt format.\n out_csf_frf Path to the output CSF frf file, in .txt format.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to a binary mask. Only the data inside the mask\n will be used for computations and reconstruction.\n Useful if no tissue masks are available.\n --mask_wm MASK_WM Path to the input WM mask file, used to improve the\n final WM frf mask.\n --mask_gm MASK_GM Path to the input GM mask file, used to improve the\n final GM frf mask.\n --mask_csf MASK_CSF Path to the input CSF mask file, used to improve the\n final CSF frf mask.\n --fa_thr_wm FA_THR_WM\n If supplied, use this threshold to select single WM\n fiber voxels from the FA inside the WM mask defined by\n mask_wm. Each voxel above this threshold will be\n selected. [0.7]\n --fa_thr_gm FA_THR_GM\n If supplied, use this threshold to select GM voxels\n from the FA inside the GM mask defined by mask_gm.\n Each voxel below this threshold will be selected.\n [0.2]\n --fa_thr_csf FA_THR_CSF\n If supplied, use this threshold to select CSF voxels\n from the FA inside the CSF mask defined by mask_csf.\n Each voxel below this threshold will be selected.\n [0.1]\n --md_thr_gm MD_THR_GM\n If supplied, use this threshold to select GM voxels\n from the MD inside the GM mask defined by mask_gm.\n Each voxel below this threshold will be selected.\n [0.0007]\n --md_thr_csf MD_THR_CSF\n If supplied, use this threshold to select CSF voxels\n from the MD inside the CSF mask defined by mask_csf.\n Each voxel below this threshold will be selected.\n [0.003]\n --min_nvox MIN_NVOX Minimal number of voxels needed for each tissue masks\n in order to proceed to frf estimation. [100]\n --tolerance TOLERANCE\n The tolerated gap between the b-values to extract and\n the current b-value. [20]\n --skip_b0_check By default, we supervise that at least one b0 exists\n in your data (i.e. b-values below the default\n --tolerance). Use this option to allow continuing even\n if the minimum b-value is suspiciously high. Use with\n care, and only if you understand your data.\n --dti_bval_limit DTI_BVAL_LIMIT\n The highest b-value taken for the DTI model. [1200]\n --roi_radii ROI_RADII [ROI_RADII ...]\n If supplied, use those radii to select a cuboid roi to\n estimate the response functions. The roi will be a\n cuboid spanning from the middle of the volume in each\n direction with the different radii. The type is either\n an int (e.g. --roi_radii 10) or an array-like (3,)\n (e.g. --roi_radii 20 30 10). [[20]]\n --roi_center tuple(3) tuple(3) tuple(3)\n If supplied, use this center to span the cuboid roi\n using roi_radii. [center of the 3D volume] (e.g.\n --roi_center 66 79 79)\n --wm_frf_mask file Path to the output WM frf mask file, the voxels used\n to compute the WM frf.\n --gm_frf_mask file Path to the output GM frf mask file, the voxels used\n to compute the GM frf.\n --csf_frf_mask file Path to the output CSF frf mask file, the voxels used\n to compute the CSF frf.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "superior", + "superior" + ], + [ + "direction", + "direction" + ], + [ + "white", + "white" + ], + [ + "high", + "high" + ], + [ + "diffusion", + "diffusion" + ], + [ + "middle", + "middle" + ], + [ + "Data", + "data", + "data" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "function", + "functions", + "function" + ], + [ + "voxel", + "voxels" + ], + [ + "based", + "based" + ], + [ + "matter", + "matter" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "analysis", + "analysis" + ], + [ + "highest", + "highest" + ], + [ + "function", + "functions", + "functions" + ], + [ + "defined", + "defined" + ] + ], + "keywords": [] + }, + { + "name": "scil_frf_set_diffusivities", + "docstring": "Replace the fiber response function in the FRF file.\nUse this script when you want to use a fixed response function\nand keep the mean b0.\n\nThe FRF file is obtained from scil_frf_ssst.py or scil_frf_msmt.py in the case\nof multi-shell data.\n\nFormerly: scil_set_response_function.py", + "help": "usage: scil_frf_set_diffusivities.py [-h] [--no_factor]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n input new_frf output\n\nReplace the fiber response function in the FRF file.\nUse this script when you want to use a fixed response function\nand keep the mean b0.\n\nThe FRF file is obtained from scil_frf_ssst.py or scil_frf_msmt.py in the case\nof multi-shell data.\n\nFormerly: scil_set_response_function.py\n\npositional arguments:\n input Path of the FRF file.\n new_frf New response function given as a tuple. We will replace the \n response function in frf_file with this fiber response \n function x 10**-4 (e.g. 15,4,4). \n If multi-shell, write the first shell,then the second shell, \n and the third, etc. (e.g. 15,4,4,13,5,5,12,5,5).\n output Path of the new FRF file.\n\noptions:\n -h, --help show this help message and exit\n --no_factor If supplied, the fiber response function is\n evaluated without the x 10**-4 factor. [False].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "Data", + "data", + "data" + ], + [ + "function", + "functions", + "function" + ], + [ + "level", + "level" + ], + [ + "false", + "false" + ], + [ + "examined", + "evaluated" + ] + ], + "keywords": [] + }, + { + "name": "scil_frf_ssst", + "docstring": "Compute a single Fiber Response Function from a DWI.\n\nA DTI fit is made, and voxels containing a single fiber population are\nfound using a threshold on the FA.\n\nFormerly: scil_compute_ssst_frf.py", + "help": "usage: scil_frf_ssst.py [-h] [--mask MASK] [--mask_wm MASK_WM]\n [--fa_thresh FA_THRESH]\n [--min_fa_thresh MIN_FA_THRESH] [--min_nvox MIN_NVOX]\n [--roi_radii ROI_RADII [ROI_RADII ...]]\n [--roi_center tuple(3) tuple(3) tuple(3)]\n [--b0_threshold thr] [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec frf_file\n\nCompute a single Fiber Response Function from a DWI.\n\nA DTI fit is made, and voxels containing a single fiber population are\nfound using a threshold on the FA.\n\nFormerly: scil_compute_ssst_frf.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n frf_file Path to the output FRF file, in .txt format, saved by Numpy.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to a binary mask. Only the data inside the mask will be used \n for computations and reconstruction. Useful if no white matter mask \n is available.\n --mask_wm MASK_WM Path to a binary white matter mask. Only the data inside this mask \n and above the threshold defined by --fa_thresh will be used to estimate the \n fiber response function.\n --fa_thresh FA_THRESH\n If supplied, use this threshold as the initial threshold to select \n single fiber voxels. [0.7]\n --min_fa_thresh MIN_FA_THRESH\n If supplied, this is the minimal value that will be tried when looking \n for single fiber voxels. [0.5]\n --min_nvox MIN_NVOX Minimal number of voxels needing to be identified as single fiber voxels \n in the automatic estimation. [300]\n --roi_radii ROI_RADII [ROI_RADII ...]\n If supplied, use those radii to select a cuboid roi to estimate the \n response functions. The roi will be a cuboid spanning from the middle of \n the volume in each direction with the different radii. The type is either \n an int (e.g. --roi_radii 10) or an array-like (3,) (e.g. --roi_radii 20 30 10). [[20]]\n --roi_center tuple(3) tuple(3) tuple(3)\n If supplied, use this center to span the roi of size roi_radius. [center of the 3D volume]\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences: [1] Tournier et al. NeuroImage 2007\n", + "synonyms": [ + [ + "population", + "population" + ], + [ + "direction", + "direction" + ], + [ + "white", + "white" + ], + [ + "high", + "high" + ], + [ + "diffusion", + "diffusion" + ], + [ + "middle", + "middle" + ], + [ + "Data", + "data", + "data" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "function", + "functions", + "function" + ], + [ + "voxel", + "voxels" + ], + [ + "matter", + "matter" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "considered", + "considered" + ], + [ + "level", + "level" + ], + [ + "function", + "functions", + "functions" + ], + [ + "defined", + "defined" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_get_version", + "docstring": "Give you information about your current scilpy installation.\nThis is useful for non-developers to give you the information\nneeded to reproduce your results, or to help debugging.\n\nIf you are experiencing a bug, please run this script and\nsend the output to the scilpy developers.", + "help": "usage: scil_get_version.py [-h] [--show_dependencies]\n [-v [{DEBUG,INFO,WARNING}]]\n\nGive you information about your current scilpy installation.\nThis is useful for non-developers to give you the information\nneeded to reproduce your results, or to help debugging.\n\nIf you are experiencing a bug, please run this script and\nsend the output to the scilpy developers.\n\noptions:\n -h, --help show this help message and exit\n --show_dependencies Show the dependencies of scilpy.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [ + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_gradients_apply_transform", + "docstring": "Transform bvecs using an affine/rigid transformation.\n\nFormerly: scil_apply_transform_to_bvecs.py.", + "help": "usage: scil_gradients_apply_transform.py [-h] [--inverse]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bvecs in_transfo out_bvecs\n\nTransform bvecs using an affine/rigid transformation.\n\nFormerly: scil_apply_transform_to_bvecs.py.\n\npositional arguments:\n in_bvecs Path of the bvec file, in FSL format\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_bvecs Output filename of the transformed bvecs.\n\noptions:\n -h, --help show this help message and exit\n --inverse Apply the inverse transformation.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "applied", + "apply" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_gradients_convert", + "docstring": "Script to convert gradient tables between FSL and MRtrix formats.\n\nFormerly: scil_convert_gradients_mrtrix_to_fsl.py or\nscil_convert_gradients_fsl_to_mrtrix.py", + "help": "usage: scil_gradients_convert.py [-h] (--input_fsl | --input_mrtrix) [-f]\n [-v [{DEBUG,INFO,WARNING}]]\n GRADIENT_FILE(S) [GRADIENT_FILE(S) ...]\n output\n\nScript to convert gradient tables between FSL and MRtrix formats.\n\nFormerly: scil_convert_gradients_mrtrix_to_fsl.py or\nscil_convert_gradients_fsl_to_mrtrix.py\n\npositional arguments:\n GRADIENT_FILE(S) Path(s) to the gradient file(s). Either FSL (.bval, .bvec) or MRtrix (.b).\n output Basename of output without extension. Extension(s) will be added automatically (.b for MRtrix, .bval/.bvec for FSL.\n\noptions:\n -h, --help show this help message and exit\n --input_fsl FSL format.\n --input_mrtrix MRtrix format.\n -f Force overwriting of the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [ + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_gradients_generate_sampling", + "docstring": "Generate multi-shell gradient sampling with various processing options. Helps\naccelerate gradients, optimize duty cycle and avoid artefacts.\n\nMulti-shell gradient sampling is generated as in [1]. The bvecs are then\nflipped to maximize spread for eddy current correction, b0s are interleaved at\nequal spacing and the non-b0 samples are finally shuffled to minimize the total\ndiffusion gradient amplitude over a few TR.\n\nFormerly: scil_generate_gradient_sampling.py", + "help": "usage: scil_gradients_generate_sampling.py [-h] [--eddy] [--duty]\n [--no_b0_start NO_B0_START | --b0_every B0_EVERY]\n [--b0_end] [--b0_value B0_VALUE]\n [--b0_philips]\n (--bvals bvals [bvals ...] | --b_lin_max B_LIN_MAX | --q_lin_max Q_LIN_MAX)\n (--fsl | --mrtrix)\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n nb_samples_per_shell\n [nb_samples_per_shell ...]\n out_basename\n\nGenerate multi-shell gradient sampling with various processing options. Helps\naccelerate gradients, optimize duty cycle and avoid artefacts.\n\nMulti-shell gradient sampling is generated as in [1]. The bvecs are then\nflipped to maximize spread for eddy current correction, b0s are interleaved at\nequal spacing and the non-b0 samples are finally shuffled to minimize the total\ndiffusion gradient amplitude over a few TR.\n\nFormerly: scil_generate_gradient_sampling.py\n\npositional arguments:\n nb_samples_per_shell Number of samples on each non b0 shell. \n If multishell, provide a number per shell.\n out_basename Gradient sampling output basename (don't include extension).\n Please add options --fsl and/or --mrtrix below.\n\noptions:\n -h, --help show this help message and exit\n --eddy If set, we apply eddy optimization.\n B-vectors are flipped to be well spread without symmetry.\n --duty If set, we apply duty cycle optimization. \n B-vectors are shuffled to reduce consecutive colinearity in the samples. [False]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nb0 acquisitions:\n Default if you add no option is to have a b0 at the start.\n\n --no_b0_start NO_B0_START\n If set, do not add a b0 at the beginning. \n --b0_every B0_EVERY Interleave a b0 every n=b0_every values. Starts after the first b0 \n (cannot be used with --no_b0_start). Must be an integer >= 1.\n --b0_end If set, adds a b0 as last sample.\n --b0_value B0_VALUE b-value of the b0s. [0.0]\n --b0_philips If set, replaces values of b0s bvecs by existing bvecs for Philips handling.\n\nNon-b0 acquisitions:\n --bvals bvals [bvals ...]\n bval of each non-b0 shell.\n --b_lin_max B_LIN_MAX\n b-max for linear bval distribution in *b*.\n --q_lin_max Q_LIN_MAX\n b-max for linear bval distribution in *q*; \n the square root of b-values will be linearly distributed..\n\nSave as:\n --fsl Save in FSL format (.bvec/.bval).\n --mrtrix Save in MRtrix format (.b).\n\nReferences: [1] Emmanuel Caruyer, Christophe Lenglet, Guillermo Sapiro,\nRachid Deriche. Design of multishell gradient sampling with uniform coverage\nin diffusion MRI. Magnetic Resonance in Medicine, Wiley, 2013, 69 (6),\npp. 1534-1540. \n \n", + "synonyms": [ + [ + "variety", + "various" + ], + [ + "diffusion", + "diffusion" + ], + [ + "applied", + "apply" + ], + [ + "processing", + "processing" + ], + [ + "level", + "level" + ], + [ + "total", + "total" + ], + [ + "false", + "false" + ] + ], + "keywords": [] + }, + { + "name": "scil_gradients_modify_axes", + "docstring": "Flip (ex, x --> -x) or swap (ex, x <-> y) chosen axes of the gradient sampling\nmatrix. Result will be saved in the same format as input gradient sampling\nfile.\n\nFormerly: scil_flip_gradients.py or scil_swap_gradient_axis.py", + "help": "usage: scil_gradients_modify_axes.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_gradient_sampling_file\n out_gradient_sampling_file\n {1,2,3,-1,-2,-3} {1,2,3,-1,-2,-3}\n {1,2,3,-1,-2,-3}\n\nFlip (ex, x --> -x) or swap (ex, x <-> y) chosen axes of the gradient sampling\nmatrix. Result will be saved in the same format as input gradient sampling\nfile.\n\nFormerly: scil_flip_gradients.py or scil_swap_gradient_axis.py\n\npositional arguments:\n in_gradient_sampling_file\n Path to gradient sampling file. (.bvec or .b)\n out_gradient_sampling_file\n Where to save the flipped gradient sampling file.Extension (.bvec or .b) must be the same as in_gradient_sampling_file\n {1,2,3,-1,-2,-3} The final order of the axes, compared to original order: x=1 y=2 z=3.\n Ex: to only flip y: 1 -2 3.\n Ex: to only swap x and y: 2 1 3.\n Ex: to first flip x, then permute all three axes: 3 -1 2.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "result", + "result" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_gradients_round_bvals", + "docstring": "Select b-values on specific b-value shells.\n\nWith the --tolerance argument, this is useful for sampling schemes where\nb-values of a shell are not all identical. Adjust the tolerance to vary the\naccepted interval around the targetted b-value.\n\nFor example, a b-value of 2000 and a tolerance of 20 will select all b-values\nbetween [1980, 2020] and round them to the value of 2000.\n\n>> scil_gradients_round_bvals.py bvals 0 1000 2000 newbvals --tolerance 20\n\nFormerly: scil_resample_bvals.py", + "help": "usage: scil_gradients_round_bvals.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bval shells [shells ...] out_bval\n tolerance\n\nSelect b-values on specific b-value shells.\n\nWith the --tolerance argument, this is useful for sampling schemes where\nb-values of a shell are not all identical. Adjust the tolerance to vary the\naccepted interval around the targetted b-value.\n\nFor example, a b-value of 2000 and a tolerance of 20 will select all b-values\nbetween [1980, 2020] and round them to the value of 2000.\n\n>> scil_gradients_round_bvals.py bvals 0 1000 2000 newbvals --tolerance 20\n\nFormerly: scil_resample_bvals.py\n\npositional arguments:\n in_bval The b-values in FSL format.\n shells The list of expected shells. For example 0 1000 2000.\n All b-values in the b_val file should correspond to one given shell (up to the tolerance).\n out_bval The name of the output b-values.\n tolerance The tolerated gap between the b-values to extract and the \n actual b-values. Expecting an integer value. Comparison is \n strict: a b-value of 1010 with a tolerance of 10 is NOT \n included in shell 1000. Suggestion: 20.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "level", + "level" + ], + [ + "specific", + "specific" + ] + ], + "keywords": [] + }, + { + "name": "scil_gradients_validate_correct", + "docstring": "Detect sign flips and/or axes swaps in the gradients table from a fiber\ncoherence index [1]. The script takes as input the principal direction(s)\nat each voxel, the b-vectors and the fractional anisotropy map and outputs\na corrected b-vectors file.\n\nA typical pipeline could be:\n>>> scil_dti_metrics.py dwi.nii.gz bval bvec --not_all --fa fa.nii.gz\n --evecs peaks.nii.gz\n>>> scil_gradients_validate_correct.py bvec peaks_v1.nii.gz fa.nii.gz bvec_corr\n\nNote that peaks_v1.nii.gz is the file containing the direction associated\nto the highest eigenvalue at each voxel.\n\nIt is also possible to use a file containing multiple principal directions per\nvoxel, given that they are sorted by decreasing amplitude. In that case, the\nfirst direction (with the highest amplitude) will be chosen for validation.\nOnly 4D data is supported, so the directions must be stored in a single\ndimension. For example, peaks.nii.gz from scil_fodf_metrics.py could be used.\n\nFormerly: scil_validate_and_correct_bvecs.py", + "help": "usage: scil_gradients_validate_correct.py [-h] [--mask MASK]\n [--fa_threshold FA_THRESHOLD]\n [--column_wise]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bvec in_peaks in_FA out_bvec\n\nDetect sign flips and/or axes swaps in the gradients table from a fiber\ncoherence index [1]. The script takes as input the principal direction(s)\nat each voxel, the b-vectors and the fractional anisotropy map and outputs\na corrected b-vectors file.\n\nA typical pipeline could be:\n>>> scil_dti_metrics.py dwi.nii.gz bval bvec --not_all --fa fa.nii.gz\n --evecs peaks.nii.gz\n>>> scil_gradients_validate_correct.py bvec peaks_v1.nii.gz fa.nii.gz bvec_corr\n\nNote that peaks_v1.nii.gz is the file containing the direction associated\nto the highest eigenvalue at each voxel.\n\nIt is also possible to use a file containing multiple principal directions per\nvoxel, given that they are sorted by decreasing amplitude. In that case, the\nfirst direction (with the highest amplitude) will be chosen for validation.\nOnly 4D data is supported, so the directions must be stored in a single\ndimension. For example, peaks.nii.gz from scil_fodf_metrics.py could be used.\n\nFormerly: scil_validate_and_correct_bvecs.py\n\npositional arguments:\n in_bvec Path to bvec file.\n in_peaks Path to peaks file.\n in_FA Path to the fractional anisotropy file.\n out_bvec Path to corrected bvec file (FSL format).\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to an optional mask. If set, FA and Peaks will only be used inside the mask.\n --fa_threshold FA_THRESHOLD\n FA threshold. Only voxels with FA higher than fa_threshold will be considered. [0.2]\n --column_wise Specify if input peaks are column-wise (..., 3, N) instead of row-wise (..., N, 3).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Schilling KG, Yeh FC, Nath V, Hansen C, Williams O, Resnick S, Anderson AW,\nLandman BA. A fiber coherence index for quality control of B-table orientation\nin diffusion MRI scans. Magn Reson Imaging. 2019 May;58:82-89.\ndoi: 10.1016/j.mri.2019.01.018.\n", + "synonyms": [ + [ + "maps", + "map" + ], + [ + "principal", + "principal" + ], + [ + "direction", + "direction" + ], + [ + "orientation", + "orientation" + ], + [ + "imaging", + "imaging" + ], + [ + "supported", + "supported" + ], + [ + "diffusion", + "diffusion" + ], + [ + "Data", + "data", + "data" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "voxel", + "voxels" + ], + [ + "considered", + "considered" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "higher", + "higher" + ], + [ + "highest", + "highest" + ], + [ + "validation", + "validation" + ] + ], + "keywords": [] + }, + { + "name": "scil_gradients_validate_correct_eddy", + "docstring": "Validate and correct gradients from eddy outputs\nWith full AP-PA eddy outputs a full bvec bval (2x nb of dirs and bval)\nthat doesnt fit with the output dwi (1x nb of dir)\n\nFormerly: scil_validate_and_correct_eddy_gradients.py", + "help": "usage: scil_gradients_validate_correct_eddy.py [-h]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bvec in_bval nb_dirs\n out_bvec out_bval\n\nValidate and correct gradients from eddy outputs\nWith full AP-PA eddy outputs a full bvec bval (2x nb of dirs and bval)\nthat doesnt fit with the output dwi (1x nb of dir)\n\nFormerly: scil_validate_and_correct_eddy_gradients.py\n\npositional arguments:\n in_bvec In bvec file.\n in_bval In bval file.\n nb_dirs Number of directions per DWI.\n out_bvec Out bvec file.\n out_bval Out bval file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_header_print_info", + "docstring": "Print the raw header from the provided file or only the specified keys.\nSupport trk, nii and mgz files.\n\nFormerly: scil_print_header.py", + "help": "usage: scil_header_print_info.py [-h] [--keys KEYS [KEYS ...]]\n [--print_affine] [-v [{DEBUG,INFO,WARNING}]]\n in_file\n\nPrint the raw header from the provided file or only the specified keys.\nSupport trk, nii and mgz files.\n\nFormerly: scil_print_header.py\n\npositional arguments:\n in_file Input file (trk, nii and mgz).\n\noptions:\n -h, --help show this help message and exit\n --keys KEYS [KEYS ...]\n Print only the specified keys.\n --print_affine Print nibabel's affine.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [ + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_header_validate_compatibility", + "docstring": "Will compare all input files against the first one for the compatibility\nof their spatial attributes.\n\nSpatial attributes are: affine, dimensions, voxel sizes and voxel order.\n\nFormerly: scil_verify_space_attributes_compatibility.py", + "help": "usage: scil_header_validate_compatibility.py [-h] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n in_files [in_files ...]\n\nWill compare all input files against the first one for the compatibility\nof their spatial attributes.\n\nSpatial attributes are: affine, dimensions, voxel sizes and voxel order.\n\nFormerly: scil_verify_space_attributes_compatibility.py\n\npositional arguments:\n in_files List of file to compare (trk, tck and nii/nii.gz).\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "spatial", + "spatial" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_json_convert_entries_to_xlsx", + "docstring": "Convert a final aggregated json file to an Excel spreadsheet.\nTypically used during the tractometry pipeline.\n\nFormerly: scil_convert_json_to_xlsx.py", + "help": "usage: scil_json_convert_entries_to_xlsx.py [-h] [--no_sort_subs]\n [--no_sort_bundles]\n [--ignore_bundles FILE]\n [--stats_over_population]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_json out_xlsx\n\nConvert a final aggregated json file to an Excel spreadsheet.\nTypically used during the tractometry pipeline.\n\nFormerly: scil_convert_json_to_xlsx.py\n\npositional arguments:\n in_json File containing the json stats (.json).\n out_xlsx Output Excel file for the stats (.xlsx).\n\noptions:\n -h, --help show this help message and exit\n --no_sort_subs If set, subjects won't be sorted alphabetically.\n --no_sort_bundles If set, bundles won't be sorted alphabetically.\n --ignore_bundles FILE\n Path to a text file containing a list of bundles to ignore (.txt).\n One bundle, corresponding to keys in the json, per line.\n --stats_over_population\n If set, consider the input stats to be over an entire population and not subject-based.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "population", + "population" + ], + [ + "subject", + "subject" + ], + [ + "bundles", + "bundle" + ], + [ + "subjects", + "subjects" + ], + [ + "based", + "based" + ], + [ + "bundles", + "bundles" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_json_harmonize_entries", + "docstring": "This script will harmonize a json file by adding missing keys and values\nthat differs between the different layers of the dictionary.\n\nThis is used only (for now) in Aggregate_All_* portion of tractometry-flow,\nto counter the problem of missing bundles/metrics/lesions between subjects.\n\nThe most common use case is when specific subjects have missing bundles\nwhich will cause a panda array to be incomplete, and thus crash. Finding out\nthe union of all bundles/metrics/lesions will allow to create a complete json\n(but with NaN for missing values).\n\nFormerly: scil_harmonize_json.py", + "help": "usage: scil_json_harmonize_entries.py [-h] [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file out_file\n\n This script will harmonize a json file by adding missing keys and values\nthat differs between the different layers of the dictionary.\n\nThis is used only (for now) in Aggregate_All_* portion of tractometry-flow,\nto counter the problem of missing bundles/metrics/lesions between subjects.\n\nThe most common use case is when specific subjects have missing bundles\nwhich will cause a panda array to be incomplete, and thus crash. Finding out\nthe union of all bundles/metrics/lesions will allow to create a complete json\n(but with NaN for missing values).\n\nFormerly: scil_harmonize_json.py\n\npositional arguments:\n in_file Input file (json).\n out_file Output file (json).\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "create", + "create" + ], + [ + "subjects", + "subjects" + ], + [ + "bundles", + "bundles" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ] + ], + "keywords": [] + }, + { + "name": "scil_json_merge_entries", + "docstring": "Merge multiple json files into a single one.\nTypically used during the tractometry pipeline.\n\nWithout option it will simply merge all entries at the top level, the top\nlevel must not have any conflicting keys.\n\n--keep_separate option will add a parent for each file, its basename will\nbecome the key.\n\n--no_list option will merge all entries at the top level, if there is a\nconflict the lowest level will be extended with the new values (if list) or\nadded (if value)\n\n--add_parent_key option will add a parent key before merging all entries.\n\n--remove_parent_key option will remove the parent key before merging all\nentries.\n\n--recursive option will merge all entries (scalar) at the lowest layers as a\nlist.\n\n--average_last_layer option will average all entries (scalar) at the lowest\nlayers, but instead of creating a list it creates a mean/std level.\n\nFormerly: scil_merge_json.py", + "help": "usage: scil_json_merge_entries.py [-h] [--keep_separate] [--no_list]\n [--add_parent_key ADD_PARENT_KEY]\n [--remove_parent_key] [--recursive]\n [--average_last_layer] [--indent INDENT]\n [--sort_keys] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_json [in_json ...] out_json\n\n Merge multiple json files into a single one.\nTypically used during the tractometry pipeline.\n\nWithout option it will simply merge all entries at the top level, the top\nlevel must not have any conflicting keys.\n\n--keep_separate option will add a parent for each file, its basename will\nbecome the key.\n\n--no_list option will merge all entries at the top level, if there is a\nconflict the lowest level will be extended with the new values (if list) or\nadded (if value)\n\n--add_parent_key option will add a parent key before merging all entries.\n\n--remove_parent_key option will remove the parent key before merging all\nentries.\n\n--recursive option will merge all entries (scalar) at the lowest layers as a\nlist.\n\n--average_last_layer option will average all entries (scalar) at the lowest\nlayers, but instead of creating a list it creates a mean/std level.\n\nFormerly: scil_merge_json.py\n\npositional arguments:\n in_json List of json files to merge (.json).\n out_json Output json file (.json).\n\noptions:\n -h, --help show this help message and exit\n --keep_separate Merge entries as separate keys based on filename.\n --no_list Merge entries knowing there is no conflict.\n --add_parent_key ADD_PARENT_KEY\n Merge all entries under a single parent.\n --remove_parent_key Merge ignoring parent key (e.g for population).\n --recursive Merge all entries at the lowest layers.\n --average_last_layer Average all entries at the lowest layers.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "population", + "population" + ], + [ + "key", + "key" + ], + [ + "create", + "creating" + ], + [ + "based", + "based" + ], + [ + "highest", + "lowest" + ], + [ + "average", + "average" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_labels_combine", + "docstring": "Script to combine labels from multiple volumes. If there is overlap, it will\noverwrite them based on the input order.\n\n >>> scil_labels_combine.py out_labels.nii.gz\n --volume_ids animal_labels.nii 20\n --volume_ids DKT_labels.nii.gz 44 53\n --out_labels_indices 20 44 53\n >>> scil_labels_combine.py slf_labels.nii.gz\n --volume_ids a2009s_aseg.nii.gz all\n --volume_ids clean/s1__DKT.nii.gz 1028 2028\n\nFormerly: scil_combine_labels.py.", + "help": "usage: scil_labels_combine.py [-h] --volume_ids VOLUME_IDS [VOLUME_IDS ...]\n [--out_labels_ids OUT_LABELS_IDS [OUT_LABELS_IDS ...]\n | --unique | --group_in_m]\n [--background BACKGROUND] [--merge_groups]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n output\n\nScript to combine labels from multiple volumes. If there is overlap, it will\noverwrite them based on the input order.\n\n >>> scil_labels_combine.py out_labels.nii.gz\n --volume_ids animal_labels.nii 20\n --volume_ids DKT_labels.nii.gz 44 53\n --out_labels_indices 20 44 53\n >>> scil_labels_combine.py slf_labels.nii.gz\n --volume_ids a2009s_aseg.nii.gz all\n --volume_ids clean/s1__DKT.nii.gz 1028 2028\n\nFormerly: scil_combine_labels.py.\n\npositional arguments:\n output Combined labels volume output.\n\noptions:\n -h, --help show this help message and exit\n --volume_ids VOLUME_IDS [VOLUME_IDS ...]\n List of volumes directly followed by their labels:\n --volume_ids atlasA id1a id2a \n --volume_ids atlasB id1b id2b ... \n \"all\" can be used instead of id numbers.\n --out_labels_ids OUT_LABELS_IDS [OUT_LABELS_IDS ...]\n List of labels indices for output images.\n --unique If set, output id with unique labels, excluding first background value.\n --group_in_m Add (x * 10 000) to each volume labels, where x is the input volume order number.\n --background BACKGROUND\n Background id, excluded from output [0],\n the value is used as output background value.\n --merge_groups Each group from the --volume_ids option will be merged as a single labels.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "connectome", + "connectome" + ], + [ + "unique", + "unique" + ], + [ + "volume", + "volumes", + "volumes" + ], + [ + "integrating", + "integration" + ], + [ + "based", + "based" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "level", + "level" + ], + [ + "analysis", + "analysis" + ] + ], + "keywords": [] + }, + { + "name": "scil_labels_dilate", + "docstring": "Dilate regions (with or without masking) from a labeled volume:\n- \"label_to_dilate\" are regions that will dilate over\n \"label_to_fill\" if close enough to it (\"distance\").\n- \"label_to_dilate\", by default (None) will be all\n non-\"label_to_fill\" and non-\"label_not_to_dilate\".\n- \"label_not_to_dilate\" will not be changed, but will not dilate.\n- \"mask\" is where the dilation is allowed (constrained)\n in addition to \"background_label\" (logical AND)\n\n>>> scil_labels_dilate.py wmparc_t1.nii.gz wmparc_dil.nii.gz \\\n --label_to_fill 0 5001 5002 \\\n --label_not_to_dilate 4 43 10 11 12 49 50 51\n\nFormerly: scil_dilate_labels.py", + "help": "usage: scil_labels_dilate.py [-h] [--distance DISTANCE]\n [--labels_to_dilate LABELS_TO_DILATE [LABELS_TO_DILATE ...]]\n [--labels_to_fill LABELS_TO_FILL [LABELS_TO_FILL ...]]\n [--labels_not_to_dilate LABELS_NOT_TO_DILATE [LABELS_NOT_TO_DILATE ...]]\n [--mask MASK] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file out_file\n\nDilate regions (with or without masking) from a labeled volume:\n- \"label_to_dilate\" are regions that will dilate over\n \"label_to_fill\" if close enough to it (\"distance\").\n- \"label_to_dilate\", by default (None) will be all\n non-\"label_to_fill\" and non-\"label_not_to_dilate\".\n- \"label_not_to_dilate\" will not be changed, but will not dilate.\n- \"mask\" is where the dilation is allowed (constrained)\n in addition to \"background_label\" (logical AND)\n\n>>> scil_labels_dilate.py wmparc_t1.nii.gz wmparc_dil.nii.gz \\\n --label_to_fill 0 5001 5002 \\\n --label_not_to_dilate 4 43 10 11 12 49 50 51\n\nFormerly: scil_dilate_labels.py\n\npositional arguments:\n in_file Path of the volume (nii or nii.gz).\n out_file Output filename of the dilated labels.\n\noptions:\n -h, --help show this help message and exit\n --distance DISTANCE Maximal distance to dilate (in mm) [2.0].\n --labels_to_dilate LABELS_TO_DILATE [LABELS_TO_DILATE ...]\n Label list to dilate. By default it dilates all \n labels not in labels_to_fill nor in labels_not_to_dilate.\n --labels_to_fill LABELS_TO_FILL [LABELS_TO_FILL ...]\n Background id / labels to be filled [[0]],\n the first one is given as output background value.\n --labels_not_to_dilate LABELS_NOT_TO_DILATE [LABELS_NOT_TO_DILATE ...]\n Label list not to dilate.\n --mask MASK Only dilate values inside the mask.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \n", + "synonyms": [ + [ + "connectome", + "connectome" + ], + [ + "region", + "regions", + "regions" + ], + [ + "processes", + "processes" + ], + [ + "integrating", + "integration" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "level", + "level" + ], + [ + "analysis", + "analysis" + ] + ], + "keywords": [] + }, + { + "name": "scil_labels_remove", + "docstring": "Script to remove specific labels from an atlas volume.\n\n >>> scil_labels_remove.py DKT_labels.nii out_labels.nii.gz -i 5001 5002\n\nFormerly: scil_remove_labels.py", + "help": "usage: scil_labels_remove.py [-h] -i INDICES [INDICES ...]\n [--background BACKGROUND]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_labels out_labels\n\nScript to remove specific labels from an atlas volume.\n\n >>> scil_labels_remove.py DKT_labels.nii out_labels.nii.gz -i 5001 5002\n\nFormerly: scil_remove_labels.py\n\npositional arguments:\n in_labels Input labels volume.\n out_labels Output labels volume.\n\noptions:\n -h, --help show this help message and exit\n -i INDICES [INDICES ...], --indices INDICES [INDICES ...]\n List of labels indices to remove.\n --background BACKGROUND\n Integer used for removed labels [0].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \n", + "synonyms": [ + [ + "connectome", + "connectome" + ], + [ + "atlas", + "atlas" + ], + [ + "integrating", + "integration" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ], + [ + "analysis", + "analysis" + ] + ], + "keywords": [] + }, + { + "name": "scil_labels_split_volume_by_ids", + "docstring": "Split a label image into multiple images where the name of the output images\nis the id of the label (ex. 35.nii.gz, 36.nii.gz, ...). If the --range option\nis not provided, all labels of the image are extracted. The label 0 is\nconsidered as the background and is ignored.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_ids.py", + "help": "usage: scil_labels_split_volume_by_ids.py [-h] [--out_dir OUT_DIR]\n [--out_prefix OUT_PREFIX]\n [-r min max min max]\n [--background BACKGROUND]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_labels\n\nSplit a label image into multiple images where the name of the output images\nis the id of the label (ex. 35.nii.gz, 36.nii.gz, ...). If the --range option\nis not provided, all labels of the image are extracted. The label 0 is\nconsidered as the background and is ignored.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_ids.py\n\npositional arguments:\n in_labels Path of the input label file, in a format supported by Nibabel.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Put all ouptput images in a specific directory.\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n -r min max min max, --range min max min max\n Specifies a subset of labels to split, formatted as min max. Ex: -r 3 5 will give files _3, _4, _5.\n --background BACKGROUND\n Background value. Will not be saved as a separate label. Default: 0.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "supported", + "supported" + ], + [ + "image", + "image" + ], + [ + "considered", + "considered" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ] + ], + "keywords": [] + }, + { + "name": "scil_labels_split_volume_from_lut", + "docstring": "Split a label image into multiple images where the name of the output images\nis taken from a lookup table (ex: left-lateral-occipital.nii.gz,\nright-thalamus.nii.gz, ...). Only the labels included in the lookup table\nare extracted.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_labels.py", + "help": "usage: scil_labels_split_volume_from_lut.py [-h] [--out_dir OUT_DIR]\n [--out_prefix OUT_PREFIX]\n (--scilpy_lut {freesurfer_subcortical,dk_aggregate_structures,freesurfer_desikan_killiany} | --custom_lut CUSTOM_LUT)\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_label\n\nSplit a label image into multiple images where the name of the output images\nis taken from a lookup table (ex: left-lateral-occipital.nii.gz,\nright-thalamus.nii.gz, ...). Only the labels included in the lookup table\nare extracted.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_labels.py\n\npositional arguments:\n in_label Path of the input label file, in a format supported by Nibabel.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Put all ouptput images in a specific directory.\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n --scilpy_lut {freesurfer_subcortical,dk_aggregate_structures,freesurfer_desikan_killiany}\n Lookup table, in the file scilpy/data/LUT, used to name the output files.\n --custom_lut CUSTOM_LUT\n Path of the lookup table file, used to name the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "lateral", + "lateral" + ], + [ + "supported", + "supported" + ], + [ + "image", + "image" + ], + [ + "gyrus", + "occipital", + "occipital" + ], + [ + "Data", + "data", + "data" + ], + [ + "left", + "left" + ], + [ + "thalamus", + "thalamus" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ] + ], + "keywords": [] + }, + { + "name": "scil_lesions_info", + "docstring": "This script will output informations about lesion load in bundle(s).\nThe input can either be streamlines, binary bundle map, or a bundle voxel\nlabel map.\n\nTo be considered a valid lesion, the lesion volume must be at least\nmin_lesion_vol mm3. This avoid the detection of thousand of single voxel\nlesions if an automatic lesion segmentation tool is used.\n\nFormerly: scil_analyse_lesions_load.py", + "help": "usage: scil_lesions_info.py [-h]\n [--bundle BUNDLE | --bundle_mask BUNDLE_MASK | --bundle_labels_map BUNDLE_LABELS_MAP]\n [--min_lesion_vol MIN_LESION_VOL]\n [--out_lesion_atlas FILE]\n [--out_lesion_stats FILE]\n [--out_streamlines_stats FILE] [--indent INDENT]\n [--sort_keys] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_lesion out_json\n\nThis script will output informations about lesion load in bundle(s).\nThe input can either be streamlines, binary bundle map, or a bundle voxel\nlabel map.\n\nTo be considered a valid lesion, the lesion volume must be at least\nmin_lesion_vol mm3. This avoid the detection of thousand of single voxel\nlesions if an automatic lesion segmentation tool is used.\n\nFormerly: scil_analyse_lesions_load.py\n\npositional arguments:\n in_lesion Binary mask of the lesion(s) (.nii.gz).\n out_json Output file for lesion information (.json).\n\noptions:\n -h, --help show this help message and exit\n --bundle BUNDLE Path of the bundle file (.trk).\n --bundle_mask BUNDLE_MASK\n Path of the bundle binary mask (.nii.gz).\n --bundle_labels_map BUNDLE_LABELS_MAP\n Path of the bundle labels map (.nii.gz).\n --min_lesion_vol MIN_LESION_VOL\n Minimum lesion volume in mm3 [7].\n --out_lesion_atlas FILE\n Save the labelized lesion(s) map (.nii.gz).\n --out_lesion_stats FILE\n Save the lesion-wise volume measure (.json).\n --out_streamlines_stats FILE\n Save the lesion-wise streamline count (.json).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "maps", + "map" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "tool", + "tool" + ], + [ + "valid", + "valid" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "considered", + "considered" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_mti_adjust_B1_header", + "docstring": "Correct B1 map header problem, by applying the scaling (slope) and setting\nthe slope to 1.", + "help": "usage: scil_mti_adjust_B1_header.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_B1_map out_B1_map in_B1_json\n\nCorrect B1 map header problem, by applying the scaling (slope) and setting\nthe slope to 1.\n\npositional arguments:\n in_B1_map Path to input B1 map file.\n out_B1_map Path to output B1 map file.\n in_B1_json Json file of the B1 map.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "level", + "level" + ], + [ + "applied", + "applying" + ], + [ + "maps", + "map" + ] + ], + "keywords": [] + }, + { + "name": "scil_mti_maps_MT", + "docstring": "This script computes two myelin indices maps from the Magnetization Transfer\n(MT) images.\nMagnetization Transfer is a contrast mechanism in tissue resulting from the\nproton exchange between non-aqueous protons (from macromolecules and their\nclosely associated water molecules, the \"bound\" pool) and protons in the free\nwater pool called aqueous protons. This exchange attenuates the MRI signal,\nintroducing microstructure-dependent contrast. MT's effect reflects the\nrelative density of macromolecules such as proteins and lipids, it has been\nassociated with myelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse to saturating the\nprotons on non-aqueous molecules a frequency irradiation. The MT maps are\nobtained using three or four contrasts: a single positive frequency image\nand/or a single negative frequency image, and two unsaturated contrasts as\nreference. These two references should be acquired with predominant PD\n(proton density) and T1 weighting at different excitation flip angles\n(a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all input must have a matching json file with the same filename\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script.\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\n\nThe output consists of a MT_native_maps folder containing the 2 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n The MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n The MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\n\nThe final maps from MT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 1 or 2 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n\n>>> scil_mti_maps_MT.py path/to/output/directory\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_positive path/to/echo*pos.nii.gz --in_negative path/to/echo*neg.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.", + "help": "usage: scil_mti_maps_MT.py [-h] [--out_prefix OUT_PREFIX] [--mask MASK]\n --in_positive IN_POSITIVE [IN_POSITIVE ...]\n --in_negative IN_NEGATIVE [IN_NEGATIVE ...]\n --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...]\n [--in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]]\n [--extended] [--filtering]\n [--in_jsons PD_json T1_json | --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time]\n [--in_B1_map IN_B1_MAP]\n [--B1_correction_method {empiric,model_based}]\n [--B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]]\n [--B1_nominal B1_NOMINAL]\n [--B1_smooth_dims B1_SMOOTH_DIMS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_dir\n\nThis script computes two myelin indices maps from the Magnetization Transfer\n(MT) images.\nMagnetization Transfer is a contrast mechanism in tissue resulting from the\nproton exchange between non-aqueous protons (from macromolecules and their\nclosely associated water molecules, the \"bound\" pool) and protons in the free\nwater pool called aqueous protons. This exchange attenuates the MRI signal,\nintroducing microstructure-dependent contrast. MT's effect reflects the\nrelative density of macromolecules such as proteins and lipids, it has been\nassociated with myelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse to saturating the\nprotons on non-aqueous molecules a frequency irradiation. The MT maps are\nobtained using three or four contrasts: a single positive frequency image\nand/or a single negative frequency image, and two unsaturated contrasts as\nreference. These two references should be acquired with predominant PD\n(proton density) and T1 weighting at different excitation flip angles\n(a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all input must have a matching json file with the same filename\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script.\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\nThe output consists of a MT_native_maps folder containing the 2 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n The MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n The MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\nThe final maps from MT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 1 or 2 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n>>> scil_mti_maps_MT.py path/to/output/directory\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_positive path/to/echo*pos.nii.gz --in_negative path/to/echo*neg.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.\n\npositional arguments:\n out_dir Path to output folder.\n\noptions:\n -h, --help show this help message and exit\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n --mask MASK Path to the binary brain mask.\n --extended If set, outputs the folder Complementary_maps.\n --filtering Gaussian filtering to remove Gibbs ringing. Not recommended.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nContrast maps:\n Path to echoes corresponding to contrast images. All constrasts must have \n the same number of echoes and coregistered between them. Use * to include all echoes. \n The in_mtoff_pd input and at least one of in_positive or in_negative are required.\n\n --in_positive IN_POSITIVE [IN_POSITIVE ...]\n Path to all echoes corresponding to the positive frequency \n saturation pulse.\n --in_negative IN_NEGATIVE [IN_NEGATIVE ...]\n Path to all echoes corresponding to the negative frequency \n saturation pulse.\n --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...]\n Path to all echoes corresponding to the predominant PD \n (proton density) weighting images with no saturation pulse.\n --in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]\n Path to all echoes corresponding to the predominant T1 \n weighting images with no saturation pulse. This one is optional, \n since it is only needed for the calculation of MTsat. \n Acquisition parameters should also be set with this image.\n\nAcquisition parameters:\n Acquisition parameters required for MTsat and ihMTsat calculation. \n These are the excitation flip angles (a_PD, a_T1), in DEGREES, and \n repetition times (TR_PD, TR_T1) of the PD and T1 images, in SECONDS. \n Can be given through json files (--in_jsons) or directly (--in_acq_parameters).\n\n --in_jsons PD_json T1_json\n Path to MToff PD json file and MToff T1 json file, in that order. \n The acquisition parameters will be extracted from these files. \n Must come from a Philips acquisition, otherwise, use in_acq_parameters.\n --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time\n Acquisition parameters in that order: flip angle of mtoff_PD, \n flip angle of mtoff_T1, repetition time of mtoff_PD, \n repetition time of mtoff_T1\n\nB1 correction:\n --in_B1_map IN_B1_MAP\n Path to B1 coregister map to MT contrasts.\n --B1_correction_method {empiric,model_based}\n Choice of B1 correction method. Choose between empiric and model-based. \n Note that the model-based method requires a B1 fitvalues file. \n Both method will only correct the saturation measures. [empiric]\n --B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]\n Path to B1 fitvalues files obtained externally. Should be one .mat \n file per input MT-on image, given in this specific order: \n positive frequency saturation, negative frequency saturation.\n --B1_nominal B1_NOMINAL\n Nominal value for the B1 map. For Philips, should be 100. [100]\n --B1_smooth_dims B1_SMOOTH_DIMS\n Dimension of the squared window used for B1 smoothing, in number of voxels. [5]\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "methods", + "method" + ], + [ + "create", + "create" + ], + [ + "contrast", + "contrast" + ], + [ + "Data", + "data", + "data" + ], + [ + "based", + "based" + ], + [ + "maps", + "map" + ], + [ + "white", + "white" + ], + [ + "image", + "image" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ], + [ + "weighted", + "weighted" + ], + [ + "positive", + "positive" + ], + [ + "signal", + "signal" + ], + [ + "comprised", + "consists" + ], + [ + "brain", + "brain" + ], + [ + "positive", + "negative" + ], + [ + "maps", + "maps" + ], + [ + "voxel", + "voxels" + ], + [ + "matter", + "matter" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "methods" + ], + [ + "parameters", + "parameters" + ] + ], + "keywords": [] + }, + { + "name": "scil_mti_maps_ihMT", + "docstring": "This script computes four myelin indices maps from the Magnetization Transfer\n(MT) and inhomogeneous Magnetization Transfer (ihMT) images. Magnetization\nTransfer is a contrast mechanism in tissue resulting from the proton exchange\nbetween non-aqueous protons (from macromolecules and their closely associated\nwater molecules, the \"bound\" pool) and protons in the free water pool called\naqueous protons. This exchange attenuates the MRI signal, introducing\nmicrostructure-dependent contrast. MT's effect reflects the relative density\nof macromolecules such as proteins and lipids, it has been associated with\nmyelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse prior to image\nacquisition (a prepulse), saturating the protons on non-aqueous molecules,\nby applying different frequency irradiation. The two MT maps and two ihMT maps\nare obtained using six contrasts: single positive frequency image, single\nnegative frequency image, dual alternating positive/negative frequency image,\ndual alternating negative/positive frequency image (saturated images);\nand two unsaturated contrasts as reference. These two references should be\nacquired with predominant PD (proton density) and T1 weighting at different\nexcitation flip angles (a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\n\nThe output consists of a ihMT_native_maps folder containing the 4 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n - ihMTR.nii.gz : inhomogeneous Magnetization Transfer Ratio map\n The (ih)MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n - ihMTsat.nii.gz : inhomogeneous Magnetization Transfer saturation map\n The (ih)MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - altnp.nii.gz : dual alternating negative and positive frequency image\n - altpn.nii.gz : dual alternating positive and negative frequency image\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_d.nii.gz : MTsat computed from the mean dual frequency images\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\n\nThe final maps from ihMT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 3 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation, dual frequency saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n\n>>> scil_mti_maps_ihMT.py path/to/output/directory\n --in_altnp path/to/echo*altnp.nii.gz --in_altpn path/to/echo*altpn.nii.gz\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_negative path/to/echo*neg.nii.gz --in_positive path/to/echo*pos.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.", + "help": "usage: scil_mti_maps_ihMT.py [-h] [--out_prefix OUT_PREFIX] [--mask MASK]\n --in_altnp IN_ALTNP [IN_ALTNP ...] --in_altpn\n IN_ALTPN [IN_ALTPN ...] --in_negative IN_NEGATIVE\n [IN_NEGATIVE ...] --in_positive IN_POSITIVE\n [IN_POSITIVE ...] --in_mtoff_pd IN_MTOFF_PD\n [IN_MTOFF_PD ...]\n [--in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]]\n [--extended] [--filtering]\n [--in_jsons PD_json T1_json | --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time]\n [--in_B1_map IN_B1_MAP]\n [--B1_correction_method {empiric,model_based}]\n [--B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]]\n [--B1_nominal B1_NOMINAL]\n [--B1_smooth_dims B1_SMOOTH_DIMS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_dir\n\nThis script computes four myelin indices maps from the Magnetization Transfer\n(MT) and inhomogeneous Magnetization Transfer (ihMT) images. Magnetization\nTransfer is a contrast mechanism in tissue resulting from the proton exchange\nbetween non-aqueous protons (from macromolecules and their closely associated\nwater molecules, the \"bound\" pool) and protons in the free water pool called\naqueous protons. This exchange attenuates the MRI signal, introducing\nmicrostructure-dependent contrast. MT's effect reflects the relative density\nof macromolecules such as proteins and lipids, it has been associated with\nmyelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse prior to image\nacquisition (a prepulse), saturating the protons on non-aqueous molecules,\nby applying different frequency irradiation. The two MT maps and two ihMT maps\nare obtained using six contrasts: single positive frequency image, single\nnegative frequency image, dual alternating positive/negative frequency image,\ndual alternating negative/positive frequency image (saturated images);\nand two unsaturated contrasts as reference. These two references should be\nacquired with predominant PD (proton density) and T1 weighting at different\nexcitation flip angles (a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\nThe output consists of a ihMT_native_maps folder containing the 4 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n - ihMTR.nii.gz : inhomogeneous Magnetization Transfer Ratio map\n The (ih)MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n - ihMTsat.nii.gz : inhomogeneous Magnetization Transfer saturation map\n The (ih)MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - altnp.nii.gz : dual alternating negative and positive frequency image\n - altpn.nii.gz : dual alternating positive and negative frequency image\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_d.nii.gz : MTsat computed from the mean dual frequency images\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\nThe final maps from ihMT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 3 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation, dual frequency saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n>>> scil_mti_maps_ihMT.py path/to/output/directory\n --in_altnp path/to/echo*altnp.nii.gz --in_altpn path/to/echo*altpn.nii.gz\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_negative path/to/echo*neg.nii.gz --in_positive path/to/echo*pos.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.\n\npositional arguments:\n out_dir Path to output folder.\n\noptions:\n -h, --help show this help message and exit\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n --mask MASK Path to the binary brain mask.\n --extended If set, outputs the folder Complementary_maps.\n --filtering Gaussian filtering to remove Gibbs ringing. Not recommended.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nContrast maps:\n Path to echoes corresponding to contrast images. All constrasts must have \n the same number of echoes and coregistered between them. Use * to include all echoes.\n\n --in_altnp IN_ALTNP [IN_ALTNP ...]\n Path to all echoes corresponding to the alternation of \n negative and positive frequency saturation pulse.\n --in_altpn IN_ALTPN [IN_ALTPN ...]\n Path to all echoes corresponding to the alternation of \n positive and negative frequency saturation pulse.\n --in_negative IN_NEGATIVE [IN_NEGATIVE ...]\n Path to all echoes corresponding to the negative frequency \n saturation pulse.\n --in_positive IN_POSITIVE [IN_POSITIVE ...]\n Path to all echoes corresponding to the positive frequency \n saturation pulse.\n --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...]\n Path to all echoes corresponding to the predominant PD \n (proton density) weighting images with no saturation pulse.\n --in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]\n Path to all echoes corresponding to the predominant T1 \n weighting images with no saturation pulse. This one is optional, \n since it is only needed for the calculation of MTsat and ihMTsat. \n Acquisition parameters should also be set with this image.\n\nAcquisition parameters:\n Acquisition parameters required for MTsat and ihMTsat calculation. \n These are the excitation flip angles (a_PD, a_T1), in DEGREES, and \n repetition times (TR_PD, TR_T1) of the PD and T1 images, in SECONDS. \n Can be given through json files (--in_jsons) or directly (--in_acq_parameters).\n\n --in_jsons PD_json T1_json\n Path to MToff PD json file and MToff T1 json file, in that order. \n The acquisition parameters will be extracted from these files. \n Must come from a Philips acquisition, otherwise, use in_acq_parameters.\n --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time\n Acquisition parameters in that order: flip angle of mtoff_PD, \n flip angle of mtoff_T1, repetition time of mtoff_PD, \n repetition time of mtoff_T1\n\nB1 correction:\n --in_B1_map IN_B1_MAP\n Path to B1 coregister map to MT contrasts.\n --B1_correction_method {empiric,model_based}\n Choice of B1 correction method. Choose between empiric and model-based. \n Note that the model-based method requires a B1 fitvalues file. \n Both method will only correct the saturation measures. [empiric]\n --B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]\n Path to B1 fitvalues files obtained externally. Should be one .mat \n file per input MT-on image, given in this specific order: \n positive frequency saturation, negative frequency saturation.\n --B1_nominal B1_NOMINAL\n Nominal value for the B1 map. For Philips, should be 100. [100]\n --B1_smooth_dims B1_SMOOTH_DIMS\n Dimension of the squared window used for B1 smoothing, in number of voxels. [5]\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "methods", + "method" + ], + [ + "create", + "create" + ], + [ + "contrast", + "contrast" + ], + [ + "Data", + "data", + "data" + ], + [ + "based", + "based" + ], + [ + "maps", + "map" + ], + [ + "white", + "white" + ], + [ + "image", + "image" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ], + [ + "applied", + "applying" + ], + [ + "weighted", + "weighted" + ], + [ + "positive", + "positive" + ], + [ + "signal", + "signal" + ], + [ + "comprised", + "consists" + ], + [ + "brain", + "brain" + ], + [ + "positive", + "negative" + ], + [ + "maps", + "maps" + ], + [ + "voxel", + "voxels" + ], + [ + "matter", + "matter" + ], + [ + "binary", + "binary" + ], + [ + "methods", + "methods" + ], + [ + "parameters", + "parameters" + ] + ], + "keywords": [] + }, + { + "name": "scil_plot_stats_per_point", + "docstring": "Plot all mean/std per point for a subject or population json file from\ntractometry-flow.\nWARNING: For population, the displayed STDs is only showing the variation\nof the means. It does not account intra-subject STDs.\n\nFormerly: scil_plot_mean_std_per_point.py", + "help": "usage: scil_plot_stats_per_point.py [-h] [--stats_over_population]\n [--nb_pts NB_PTS] [--display_means]\n [--fill_color FILL_COLOR | --dict_colors DICT_COLORS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_json out_dir\n\nPlot all mean/std per point for a subject or population json file from\ntractometry-flow.\nWARNING: For population, the displayed STDs is only showing the variation\nof the means. It does not account intra-subject STDs.\n\nFormerly: scil_plot_mean_std_per_point.py\n\npositional arguments:\n in_json JSON file containing the mean/std per point. For example, can be created using scil_bundle_mean_std.py.\n out_dir Output directory.\n\noptions:\n -h, --help show this help message and exit\n --stats_over_population\n If set, consider the input stats to be over an entire population and not subject-based.\n --nb_pts NB_PTS Force the number of divisions for the bundles.\n Avoid unequal plots across datasets, replace missing data with zeros.\n --display_means Display the subjects means as semi-transparent line.\n Poor results when the number of subject is high.\n --fill_color FILL_COLOR\n Hexadecimal RGB color filling the region between mean +/- std. The hexadecimal RGB color should be formatted as 0xRRGGBB.\n --dict_colors DICT_COLORS\n Dictionnary mapping basename to color.Same convention as --color.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "region", + "regions", + "region" + ], + [ + "population", + "population" + ], + [ + "subject", + "subject" + ], + [ + "high", + "high" + ], + [ + "Data", + "data", + "data" + ], + [ + "subjects", + "subjects" + ], + [ + "based", + "based" + ], + [ + "bundles", + "bundles" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_qball_metrics", + "docstring": "Script to compute the Constant Solid Angle (CSA) or Analytical Q-ball model,\nthe generalized fractional anisotropy (GFA) and the peaks of the model.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Descoteaux et al MRM 2007, Aganj et al MRM 2009] for details and\n[Cote et al MEDIA 2013] for quantitative comparisons.\n\nFormerly: scil_compute_qball_metrics.py", + "help": "usage: scil_qball_metrics.py [-h] [-f] [--sh_order SH_ORDER] [--mask MASK]\n [--use_qball] [--not_all] [--gfa GFA]\n [--peaks PEAKS] [--peak_indices PEAK_INDICES]\n [--sh SH] [--nufo NUFO] [--a_power A_POWER]\n [--b0_threshold thr] [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval in_bvec\n\nScript to compute the Constant Solid Angle (CSA) or Analytical Q-ball model,\nthe generalized fractional anisotropy (GFA) and the peaks of the model.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Descoteaux et al MRM 2007, Aganj et al MRM 2009] for details and\n[Cote et al MEDIA 2013] for quantitative comparisons.\n\nFormerly: scil_compute_qball_metrics.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n\noptions:\n -h, --help show this help message and exit\n -f Force overwriting of the output files.\n --sh_order SH_ORDER Spherical harmonics order. Must be a positive even number [4].\n --mask MASK Path to a binary mask. Only data inside the mask will be used for computations and reconstruction [None].\n --use_qball If set, qball will be used as the odf reconstruction model instead of CSA.\n --not_all If set, will only save the files specified using the following flags.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nFile flags:\n --gfa GFA Output filename for the generalized fractional anisotropy [gfa.nii.gz].\n --peaks PEAKS Output filename for the extracted peaks [peaks.nii.gz].\n --peak_indices PEAK_INDICES\n Output filename for the generated peaks indices on the sphere [peaks_indices.nii.gz].\n --sh SH Output filename for the spherical harmonics coefficients [sh.nii.gz].\n --nufo NUFO Output filename for the NUFO map [nufo.nii.gz].\n --a_power A_POWER Output filename for the anisotropic power map[anisotropic_power.nii.gz].\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "maps", + "map" + ], + [ + "high", + "high" + ], + [ + "diffusion", + "diffusion" + ], + [ + "positive", + "positive" + ], + [ + "processes", + "processes" + ], + [ + "Data", + "data", + "data" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "considered", + "considered" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ] + ], + "keywords": [] + }, + { + "name": "scil_rgb_convert", + "docstring": "Converts a RGB image encoded as a 4D image to a RGB image encoded as\na 3D image, or vice versa.\n\nTypically, most software tools used in the SCIL (including MI-Brain) use\nthe former, while Trackvis uses the latter.\n\nInput\n-Case 1: 4D image where the 4th dimension contains 3 values.\n-Case 2: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value.\n\nOutput\n-Case 1: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value (uint8).\n-Case 2: 4D image where the 4th dimension contains 3 values (uint8).\n\nFormerly: scil_convert_rgb.py", + "help": "usage: scil_rgb_convert.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image\n\nConverts a RGB image encoded as a 4D image to a RGB image encoded as\na 3D image, or vice versa.\n\nTypically, most software tools used in the SCIL (including MI-Brain) use\nthe former, while Trackvis uses the latter.\n\nInput\n-Case 1: 4D image where the 4th dimension contains 3 values.\n-Case 2: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value.\n\nOutput\n-Case 1: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value (uint8).\n-Case 2: 4D image where the 4th dimension contains 3 values (uint8).\n\nFormerly: scil_convert_rgb.py\n\npositional arguments:\n in_image name of input RGB image.\n Either 4D or 3D image.\n out_image name of output RGB image.\n Either 3D or 4D image.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "tool", + "tools" + ], + [ + "image", + "image" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_sh_convert", + "docstring": "Convert a SH file between the two of the following basis choices:\n'descoteaux07', 'descoteaux07_legacy', 'tournier07' or 'tournier07_legacy'.\nUsing the sh_basis argument, both the input and the output SH bases must be\ngiven, in the order. For more information about the bases, see\nhttps://docs.dipy.org/stable/theory/sh_basis.html.\n\nFormerly: scil_convert_sh_basis.py", + "help": "usage: scil_sh_convert.py [-h] [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_sh out_sh\n {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n\nConvert a SH file between the two of the following basis choices:\n'descoteaux07', 'descoteaux07_legacy', 'tournier07' or 'tournier07_legacy'.\nUsing the sh_basis argument, both the input and the output SH bases must be\ngiven, in the order. For more information about the bases, see\nhttps://docs.dipy.org/stable/theory/sh_basis.html.\n\nFormerly: scil_convert_sh_basis.py\n\npositional arguments:\n in_sh Input SH filename. (nii or nii.gz)\n out_sh Output SH filename. (nii or nii.gz)\n {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Both the input and output bases are required, in that order.\n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy', 'tournier07']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n\noptions:\n -h, --help show this help message and exit\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "processes", + "processes" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_sh_fusion", + "docstring": "Merge a list of Spherical Harmonics files.\n\nThis merges the coefficients of multiple Spherical Harmonics files by taking,\nfor each coefficient, the one with the largest magnitude.\n\nCan be used to merge fODFs computed from different shells into 1, while\nconserving the most relevant information.\n\nBased on [1] and [2].\n\nFormerly: scil_merge_sh.py", + "help": "usage: scil_sh_fusion.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_shs [in_shs ...] out_sh\n\nMerge a list of Spherical Harmonics files.\n\nThis merges the coefficients of multiple Spherical Harmonics files by taking,\nfor each coefficient, the one with the largest magnitude.\n\nCan be used to merge fODFs computed from different shells into 1, while\nconserving the most relevant information.\n\nBased on [1] and [2].\n\nFormerly: scil_merge_sh.py\n\npositional arguments:\n in_shs List of SH files.\n out_sh output SH file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReference:\n[1] Garyfallidis, E., Zucchelli, M., Houde, J-C., Descoteaux, M.\n How to perform best ODF reconstruction from the Human Connectome\n Project sampling scheme?\n ISMRM 2014.\n\n[2] Khachaturian, M. H., Wisco, J. J., & Tuch, D. S. (2007). Boosting the\n sampling efficiency of q\u2010ball imaging using multiple wavevector fusion.\n Magnetic Resonance in Medicine: An Official Journal of the International\n Society for Magnetic Resonance in Medicine, 57(2), 289-296.\n", + "synonyms": [ + [ + "human", + "human" + ], + [ + "connectome", + "connectome" + ], + [ + "imaging", + "imaging" + ], + [ + "project", + "project" + ], + [ + "based", + "based" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_sh_to_aodf", + "docstring": "Script to estimate asymmetric ODFs (aODFs) from a spherical harmonics image.\n\nTwo methods are available:\n * Unified filtering [1] combines four asymmetric filtering methods into\n a single equation and relies on a combination of four gaussian filters.\n * Cosine filtering [2] is a simpler implementation using cosine distance\n for assigning weights to neighbours.\n\nUnified filtering can be accelerated using OpenCL with the option --use_opencl.\nMake sure you have pyopencl installed before using this option. By default, the\nOpenCL program will run on the cpu. To use a gpu instead, also specify the\noption --device gpu.", + "help": "usage: scil_sh_to_aodf.py [-h] [--out_sym OUT_SYM]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--method {unified,cosine}]\n [--sigma_spatial SIGMA_SPATIAL]\n [--sigma_align SIGMA_ALIGN]\n [--sigma_range SIGMA_RANGE]\n [--sigma_angle SIGMA_ANGLE] [--disable_spatial]\n [--disable_align] [--disable_range]\n [--include_center] [--win_hwidth WIN_HWIDTH]\n [--sharpness SHARPNESS] [--device {cpu,gpu}]\n [--use_opencl] [--patch_size PATCH_SIZE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh out_sh\n\nScript to estimate asymmetric ODFs (aODFs) from a spherical harmonics image.\n\nTwo methods are available:\n * Unified filtering [1] combines four asymmetric filtering methods into\n a single equation and relies on a combination of four gaussian filters.\n * Cosine filtering [2] is a simpler implementation using cosine distance\n for assigning weights to neighbours.\n\nUnified filtering can be accelerated using OpenCL with the option --use_opencl.\nMake sure you have pyopencl installed before using this option. By default, the\nOpenCL program will run on the cpu. To use a gpu instead, also specify the\noption --device gpu.\n\npositional arguments:\n in_sh Path to the input file.\n out_sh File name for averaged signal.\n\noptions:\n -h, --help show this help message and exit\n --out_sym OUT_SYM Name of optional symmetric output. [None]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Sphere used for the SH to SF projection. [repulsion200]\n --method {unified,cosine}\n Method for estimating asymmetric ODFs [unified].\n One of:\n 'unified': Unified filtering [1].\n 'cosine' : Cosine-based filtering [2].\n --device {cpu,gpu} Device to use for execution. [cpu]\n --use_opencl Accelerate code using OpenCL (requires pyopencl\n and a working OpenCL implementation).\n --patch_size PATCH_SIZE\n OpenCL patch size. [40]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nShared filter arguments:\n --sigma_spatial SIGMA_SPATIAL\n Standard deviation for spatial distance. [1.0]\n\nUnified filter arguments:\n --sigma_align SIGMA_ALIGN\n Standard deviation for alignment filter. [0.8]\n --sigma_range SIGMA_RANGE\n Standard deviation for range filter\n *relative to SF range of image*. [0.2]\n --sigma_angle SIGMA_ANGLE\n Standard deviation for angular filter\n (disabled by default).\n --disable_spatial Disable spatial filtering.\n --disable_align Disable alignment filtering.\n --disable_range Disable range filtering.\n --include_center Include center voxel in neighourhood.\n --win_hwidth WIN_HWIDTH\n Filtering window half-width. Defaults to 3*sigma_spatial.\n\nCosine filter arguments:\n --sharpness SHARPNESS\n Specify sharpness factor to use for\n weighted average. [1.0]\n\n[1] Poirier and Descoteaux, 2024, \"A Unified Filtering Method for Estimating\n Asymmetric Orientation Distribution Functions\", Neuroimage, vol. 287,\n https://doi.org/10.1016/j.neuroimage.2024.120516\n\n[2] Poirier et al, 2021, \"Investigating the Occurrence of Asymmetric Patterns\n in White Matter Fiber Orientation Distribution Functions\", ISMRM 2021\n (abstract 0865)\n", + "synonyms": [ + [ + "spatial", + "spatial" + ], + [ + "methods", + "method" + ], + [ + "weighted", + "weighted" + ], + [ + "working", + "working" + ], + [ + "white", + "white" + ], + [ + "orientation", + "orientation" + ], + [ + "image", + "image" + ], + [ + "signal", + "signal" + ], + [ + "projection", + "projection" + ], + [ + "based", + "based" + ], + [ + "matter", + "matter" + ], + [ + "methods", + "methods" + ], + [ + "average", + "average" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "function", + "functions", + "functions" + ], + [ + "occurrence", + "occurrence" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_sh_to_rish", + "docstring": "Compute the RISH (Rotationally Invariant Spherical Harmonics) features of an SH\nsignal [1].\n\nEach RISH feature map is the total energy of its associated order.\nMathematically, it is the sum of the squared SH coefficients of the SH order.\n\nThis script supports both symmetrical and asymmetrical SH images as input, of\nany SH order.\n\nEach RISH feature will be saved as a separate file.\n\n[1] Mirzaalian, Hengameh, et al. \"Harmonizing diffusion MRI data across\nmultiple sites and scanners.\" MICCAI 2015.\nhttps://scholar.harvard.edu/files/hengameh/files/miccai2015.pdf\n\nFormerly: scil_compute_rish_from_sh.py", + "help": "usage: scil_sh_to_rish.py [-h] [--full_basis] [--mask MASK]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh out_prefix\n\nCompute the RISH (Rotationally Invariant Spherical Harmonics) features of an SH\nsignal [1].\n\nEach RISH feature map is the total energy of its associated order.\nMathematically, it is the sum of the squared SH coefficients of the SH order.\n\nThis script supports both symmetrical and asymmetrical SH images as input, of\nany SH order.\n\nEach RISH feature will be saved as a separate file.\n\n[1] Mirzaalian, Hengameh, et al. \"Harmonizing diffusion MRI data across\nmultiple sites and scanners.\" MICCAI 2015.\nhttps://scholar.harvard.edu/files/hengameh/files/miccai2015.pdf\n\nFormerly: scil_compute_rish_from_sh.py\n\npositional arguments:\n in_sh Path of the sh image. They can be formatted in any sh basis, but we \n expect it to be a symmetrical one. Else, provide --full_basis.\n out_prefix Prefix of the output RISH files to save. Suffixes will be \n based on the sh orders.\n\noptions:\n -h, --help show this help message and exit\n --full_basis Input SH image uses a full SH basis (asymmetrical).\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computation.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "maps", + "map" + ], + [ + "image", + "image" + ], + [ + "diffusion", + "diffusion" + ], + [ + "signal", + "signal" + ], + [ + "Data", + "data", + "data" + ], + [ + "based", + "based" + ], + [ + "binary", + "binary" + ], + [ + "level", + "level" + ], + [ + "total", + "total" + ] + ], + "keywords": [] + }, + { + "name": "scil_sh_to_sf", + "docstring": "Script to sample SF values from a Spherical Harmonics signal. Outputs a Nifti\nfile with the SF values and an associated .bvec file with the chosen\ndirections.\n\nIf converting from SH to a DWI-like SF volume, --in_bval and --in_b0 need\nto be provided to concatenate the b0 image to the SF, and to generate the new\nbvals file. Otherwise, no .bval file will be created.\n\nFormerly: scil_compute_sf_from_sh.py", + "help": "usage: scil_sh_to_sf.py [-h]\n (--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} | --in_bvec IN_BVEC)\n [--dtype {float32,float64}] [--in_bval IN_BVAL]\n [--in_b0 IN_B0] [--out_bval OUT_BVAL]\n [--out_bvec OUT_BVEC] [--b0_scaling]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--full_basis] [--b0_threshold thr] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh out_sf\n\nScript to sample SF values from a Spherical Harmonics signal. Outputs a Nifti\nfile with the SF values and an associated .bvec file with the chosen\ndirections.\n\nIf converting from SH to a DWI-like SF volume, --in_bval and --in_b0 need\nto be provided to concatenate the b0 image to the SF, and to generate the new\nbvals file. Otherwise, no .bval file will be created.\n\nFormerly: scil_compute_sf_from_sh.py\n\npositional arguments:\n in_sh Path of the SH volume.\n out_sf Name of the output SF file to save (bvals/bvecs will be automatically named when necessary).\n\noptions:\n -h, --help show this help message and exit\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Sphere used for the SH to SF projection. \n --in_bvec IN_BVEC Directions used for the SH to SF projection. \n If given, --in_bval must also be provided.\n --dtype {float32,float64}\n Datatype to use for SF computation and output array.'[float32]'\n --in_bval IN_BVAL b-value file, in FSL format, used to assign a b-value to the \n output SF and generate a `.bval` file.\n - If used, --out_bval is required.\n - The output bval will contain one b-value per point in the SF \n output (i.e. one per point on the --sphere or one per --in_bvec.)\n - The values of the output bval will all be set to the same b-value:\n the average of your in_bval. (Any b0 found in this file, i.e \n b-values under --b0_threshold, will be removed beforehand.)\n - To add b0s to both the SF volume and the --out_bval file, use --in_b0.\n --in_b0 IN_B0 b0 volume to concatenate to the final SF volume.\n --out_bval OUT_BVAL Optional output bval file.\n --out_bvec OUT_BVEC Optional output bvec file.\n --b0_scaling Scale resulting SF by the b0 image (--in_b0 mustbe given).\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --full_basis If true, use a full basis for the input SH coefficients.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n Default if not set is 20.\n This value is used with option --in_bval only: any b0 found in the in_bval will be removed.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "image", + "image" + ], + [ + "signal", + "signal" + ], + [ + "processes", + "processes" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "projection", + "projection" + ], + [ + "true", + "true" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "average", + "average" + ], + [ + "considered", + "considered" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_stats_group_comparison", + "docstring": "Run group comparison statistics on metrics from tractometry\n1) Separate the sample given a particular variable (group_by) into groups\n\n2) Does Shapiro-Wilk test of normality for every sample\nhttps://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test\n\n3) Does Levene or Bartlett (depending on normality) test of variance\nhomogeneity Levene:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm\nBartlett:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm\n\n4) Test the group difference for every measure with the correct test depending\n on the sample (Student, Welch, Mannwhitneyu, ANOVA, Kruskall-Wallis)\nStudent :\nhttps://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test\nWelch :\nhttps://en.wikipedia.org/wiki/Welch%27s_t-test\nMann-Whitney U :\nhttps://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test\nANOVA :\nhttp://www.biostathandbook.com/onewayanova.html\nKruskall-Wallis :\nhttps://en.wikipedia.org/wiki/Kruskal%E2%80%93Wallis_one-way_analysis_of_variance\n\n\n5) If the group difference test is positive and number of group is greater than\n 2, test the group difference two by two.\n\n6) Generate the result for all metrics and bundles\n\nFormerly: scil_group_comparison.py", + "help": "usage: scil_stats_group_comparison.py [-h] [--out_dir OUT_DIR]\n [--out_json OUT_JSON]\n [--bundles BUNDLES [BUNDLES ...]]\n [--metrics METRICS [METRICS ...]]\n [--values VALUES [VALUES ...]]\n [--alpha_error ALPHA_ERROR]\n [--generate_graph] [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n IN_JSON IN_PARTICIPANTS GROUP_BY\n\nRun group comparison statistics on metrics from tractometry\n1) Separate the sample given a particular variable (group_by) into groups\n\n2) Does Shapiro-Wilk test of normality for every sample\nhttps://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test\n\n3) Does Levene or Bartlett (depending on normality) test of variance\nhomogeneity Levene:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm\nBartlett:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm\n\n4) Test the group difference for every measure with the correct test depending\n on the sample (Student, Welch, Mannwhitneyu, ANOVA, Kruskall-Wallis)\nStudent :\nhttps://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test\nWelch :\nhttps://en.wikipedia.org/wiki/Welch%27s_t-test\nMann-Whitney U :\nhttps://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test\nANOVA :\nhttp://www.biostathandbook.com/onewayanova.html\nKruskall-Wallis :\nhttps://en.wikipedia.org/wiki/Kruskal%E2%80%93Wallis_one-way_analysis_of_variance\n\n5) If the group difference test is positive and number of group is greater than\n 2, test the group difference two by two.\n\n6) Generate the result for all metrics and bundles\n\nFormerly: scil_group_comparison.py\n\npositional arguments:\n IN_JSON Input JSON file from tractometry nextflow pipeline or equivalent.\n IN_PARTICIPANTS Input tsv participants file.See doc in https://scilpy.readthedocs.io/en/latest/documentation/construct_participants_tsv_file.html.\n GROUP_BY Variable that will be used to compare group together.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Name of the output folder path. [stats]\n --out_json OUT_JSON The name of the result json output file otherwise it will be printed.\n --bundles BUNDLES [BUNDLES ...], -b BUNDLES [BUNDLES ...]\n Bundle(s) in which you want to do stats. [all]\n --metrics METRICS [METRICS ...], -m METRICS [METRICS ...]\n Metric(s) on which you want to do stats. [all]\n --values VALUES [VALUES ...], --va VALUES [VALUES ...]\n Value(s) on which you want to do stats (mean, std). [all]\n --alpha_error ALPHA_ERROR, -a ALPHA_ERROR\n Type 1 error for all the test. [0.05]\n --generate_graph, --gg\n Generate a simple plot of every metric across groups.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "variance", + "variance" + ], + [ + "greater", + "greater" + ], + [ + "participants", + "participants" + ], + [ + "variable", + "variable" + ], + [ + "bundles", + "bundle" + ], + [ + "positive", + "positive" + ], + [ + "result", + "result" + ], + [ + "bundles", + "bundles" + ], + [ + "level", + "level" + ], + [ + "error", + "error" + ], + [ + "difference", + "difference" + ] + ], + "keywords": [] + }, + { + "name": "scil_surface_apply_transform", + "docstring": "Script to apply a transform to a surface (FreeSurfer or VTK supported),\nusing output from ANTs registration tools (i.e. affine.txt, warp.nii.gz).\n\nExample usage from T1 to b0 using ANTs transforms:\n> ConvertTransformFile 3 output0GenericAffine.mat vtk_transfo.txt --hm\n> scil_surface_apply_transform.py lh_white_lps.vtk affine.txt lh_white_b0.vtk\\\n --ants_warp warp.nii.gz\n\nImportant: The input surface needs to be in *T1 world LPS* coordinates\n(aligned over the T1 in MI-Brain).\n\nThe script will use the linear affine first and then the warp image.\nThe resulting surface will be in *b0 world LPS* coordinates\n(aligned over the b0 in MI-Brain).\n\nFormerly: scil_apply_transform_to_surface.py.", + "help": "usage: scil_surface_apply_transform.py [-h] [--ants_warp ANTS_WARP]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface ants_affine out_surface\n\nScript to apply a transform to a surface (FreeSurfer or VTK supported),\nusing output from ANTs registration tools (i.e. affine.txt, warp.nii.gz).\n\nExample usage from T1 to b0 using ANTs transforms:\n> ConvertTransformFile 3 output0GenericAffine.mat vtk_transfo.txt --hm\n> scil_surface_apply_transform.py lh_white_lps.vtk affine.txt lh_white_b0.vtk\\\n --ants_warp warp.nii.gz\n\nImportant: The input surface needs to be in *T1 world LPS* coordinates\n(aligned over the T1 in MI-Brain).\n\nThe script will use the linear affine first and then the warp image.\nThe resulting surface will be in *b0 world LPS* coordinates\n(aligned over the b0 in MI-Brain).\n\nFormerly: scil_apply_transform_to_surface.py.\n\npositional arguments:\n in_surface Input surface (.vtk).\n ants_affine Affine transform from ANTs (.txt or .mat).\n out_surface Output surface (.vtk).\n\noptions:\n -h, --help show this help message and exit\n --ants_warp ANTS_WARP\n Warp image from ANTs (Nifti image).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", + "synonyms": [ + [ + "tool", + "tools" + ], + [ + "supported", + "supported" + ], + [ + "image", + "image" + ], + [ + "applied", + "apply" + ], + [ + "tractography", + "tractography" + ], + [ + "level", + "level" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_surface_convert", + "docstring": "Script to convert surface formats\n\nSupported formats:\n \".vtk\", \".vtp\", \".ply\", \".stl\", \".xml\", \".obj\"\n and FreeSurfer surfaces\n\n> scil_surface_convert.py surf.vtk converted_surf.ply\n\nFormerly: scil_convert_surface.py", + "help": "usage: scil_surface_convert.py [-h] [--xform XFORM] [--to_lps]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface out_surface\n\nScript to convert surface formats\n\nSupported formats:\n \".vtk\", \".vtp\", \".ply\", \".stl\", \".xml\", \".obj\"\n and FreeSurfer surfaces\n\n> scil_surface_convert.py surf.vtk converted_surf.ply\n\nFormerly: scil_convert_surface.py\n\npositional arguments:\n in_surface Input a surface (FreeSurfer or supported by VTK).\n out_surface Output surface (formats supported by VTK).\n\noptions:\n -h, --help show this help message and exit\n --xform XFORM Path of the copy-paste output from mri_info \n Using: mri_info $input >> log.txt, \n The file log.txt would be this parameter\n --to_lps Flip for Surface/MI-Brain LPS\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", + "synonyms": [ + [ + "supported", + "supported" + ], + [ + "parameter", + "parameter" + ], + [ + "tractography", + "tractography" + ], + [ + "level", + "level" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_surface_flip", + "docstring": "Script to flip a given surface (FreeSurfer or VTK supported).\n\nCan flip surface coordinates around a single or multiple axes\nCan also be used to reverse the orientation of the surface normals.\n\nFormerly: scil_flip_surface.py", + "help": "usage: scil_surface_flip.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface out_surface {x,y,z,n} [{x,y,z,n} ...]\n\nScript to flip a given surface (FreeSurfer or VTK supported).\n\nCan flip surface coordinates around a single or multiple axes\nCan also be used to reverse the orientation of the surface normals.\n\nFormerly: scil_flip_surface.py\n\npositional arguments:\n in_surface Input surface (.vtk).\n out_surface Output flipped surface (.vtk).\n {x,y,z,n} The axes you want to flip. eg: to flip the x and y axes use: x y. to reverse the surface normals use: n\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", + "synonyms": [ + [ + "orientation", + "orientation" + ], + [ + "supported", + "supported" + ], + [ + "tractography", + "tractography" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_surface_smooth", + "docstring": "Script to smooth a surface with a Laplacian blur.\n\nFor a standard FreeSurfer white matter mesh a step_size from 0.1 to 10\nis recommended\n\nSmoothing time = step_size * nb_steps\n small amount of smoothing [step_size 1, nb_steps 10]\n moderate amount of smoothing [step_size 10, nb_steps 100]\n large amount of smoothing [step_size 100, nb_steps 1000]\n\nFormerly: scil_smooth_surface.py", + "help": "usage: scil_surface_smooth.py [-h] [-m VTS_MASK] [-n NB_STEPS] [-s STEP_SIZE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface out_surface\n\nScript to smooth a surface with a Laplacian blur.\n\nFor a standard FreeSurfer white matter mesh a step_size from 0.1 to 10\nis recommended\n\nSmoothing time = step_size * nb_steps\n small amount of smoothing [step_size 1, nb_steps 10]\n moderate amount of smoothing [step_size 10, nb_steps 100]\n large amount of smoothing [step_size 100, nb_steps 1000]\n\nFormerly: scil_smooth_surface.py\n\npositional arguments:\n in_surface Input surface (.vtk).\n out_surface Output smoothed surface (.vtk).\n\noptions:\n -h, --help show this help message and exit\n -m VTS_MASK, --vts_mask VTS_MASK\n Vertex mask no smoothing where mask equals 0 (.npy).\n -n NB_STEPS, --nb_steps NB_STEPS\n Number of steps for laplacian smooth [2].\n -s STEP_SIZE, --step_size STEP_SIZE\n Laplacian smooth step size [5.0].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", + "synonyms": [ + [ + "white", + "white" + ], + [ + "large", + "small" + ], + [ + "step", + "step" + ], + [ + "large", + "large" + ], + [ + "tractography", + "tractography" + ], + [ + "matter", + "matter" + ], + [ + "level", + "level" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_tracking_local", + "docstring": "Local streamline HARDI tractography.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nAlgo 'eudx': select the peak from the spherical function (SF) most closely\naligned to the previous direction, and follow an average of it and the previous\ndirection [1].\n\nAlgo 'det': select the orientation corresponding to the maximum of the\nspherical function.\n\nAlgo 'prob': select a direction drawn from the empirical distribution function\ndefined from the SF.\n\nAlgo 'ptt': select the propagation direction using Parallel-Transport\nTractography (PTT) framework, see [2] for more details.\n\nNOTE: eudx can be used with pre-computed peaks from fodf as well as\nevecs_v1.nii.gz from scil_dti_metrics.py (experimental).\n\nNOTE: If tracking with PTT, the step-size should be smaller than usual,\ni.e 0.1-0.2mm or lower. The maximum angle between segments (theta) should\nbe between 10 and 20 degrees.\n\nThe local tracking algorithm can also run on the GPU using the --use_gpu\noption (experimental). By default, GPU tracking behaves the same as\nDIPY. Below is a list of known divergences between the CPU and GPU\nimplementations:\n * Backend: The CPU implementation uses DIPY's LocalTracking and the\n GPU implementation uses an in-house OpenCL implementation.\n * Algo: For the GPU implementation, the only available algorithm is\n Algo 'prob'.\n * SH interpolation: For GPU tracking, SH interpolation can be set to either\n nearest neighbour or trilinear (default). With DIPY, the only available\n method is trilinear.\n * Forward tracking: For GPU tracking, the `--forward_only` flag can be used\n to disable backward tracking. This option isn't available for CPU\n tracking.\n\nAll the input nifti files must be in isotropic resolution.\n\n\nReferences\n----------\n[1]: Garyfallidis, E. (2012). Towards an accurate brain tractography\n[PhD thesis]. University of Cambridge. United Kingdom.\n\n[2]: Aydogan, D. B., & Shi, Y. (2020). Parallel transport tractography.\nIEEE transactions on medical imaging, 40(2), 635-647.\n\nFormerly: scil_compute_local_tracking.py", + "help": "usage: scil_tracking_local.py [-h] [--step STEP_SIZE] [--min_length m]\n [--max_length M] [--theta THETA]\n [--sfthres sf_th]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--npv NPV | --nt NT] [--sh_to_pmf]\n [--algo {det,prob,ptt,eudx}]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--sub_sphere SUB_SPHERE]\n [--probe_length PROBE_LENGTH]\n [--probe_radius PROBE_RADIUS]\n [--probe_quality PROBE_QUALITY]\n [--probe_count PROBE_COUNT]\n [--support_exponent SUPPORT_EXPONENT]\n [--use_gpu] [--sh_interp {trilinear,nearest}]\n [--forward_only] [--batch_size BATCH_SIZE]\n [--compress [COMPRESS_TH]] [-f] [--save_seeds]\n [--seed SEED] [-v [{DEBUG,INFO,WARNING}]]\n in_odf in_seed in_mask out_tractogram\n\nLocal streamline HARDI tractography.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nAlgo 'eudx': select the peak from the spherical function (SF) most closely\naligned to the previous direction, and follow an average of it and the previous\ndirection [1].\n\nAlgo 'det': select the orientation corresponding to the maximum of the\nspherical function.\n\nAlgo 'prob': select a direction drawn from the empirical distribution function\ndefined from the SF.\n\nAlgo 'ptt': select the propagation direction using Parallel-Transport\nTractography (PTT) framework, see [2] for more details.\n\nNOTE: eudx can be used with pre-computed peaks from fodf as well as\nevecs_v1.nii.gz from scil_dti_metrics.py (experimental).\n\nNOTE: If tracking with PTT, the step-size should be smaller than usual,\ni.e 0.1-0.2mm or lower. The maximum angle between segments (theta) should\nbe between 10 and 20 degrees.\n\nThe local tracking algorithm can also run on the GPU using the --use_gpu\noption (experimental). By default, GPU tracking behaves the same as\nDIPY. Below is a list of known divergences between the CPU and GPU\nimplementations:\n * Backend: The CPU implementation uses DIPY's LocalTracking and the\n GPU implementation uses an in-house OpenCL implementation.\n * Algo: For the GPU implementation, the only available algorithm is\n Algo 'prob'.\n * SH interpolation: For GPU tracking, SH interpolation can be set to either\n nearest neighbour or trilinear (default). With DIPY, the only available\n method is trilinear.\n * Forward tracking: For GPU tracking, the `--forward_only` flag can be used\n to disable backward tracking. This option isn't available for CPU\n tracking.\n\nAll the input nifti files must be in isotropic resolution.\n\nReferences\n----------\n[1]: Garyfallidis, E. (2012). Towards an accurate brain tractography\n[PhD thesis]. University of Cambridge. United Kingdom.\n\n[2]: Aydogan, D. B., & Shi, Y. (2020). Parallel transport tractography.\nIEEE transactions on medical imaging, 40(2), 635-647.\n\nFormerly: scil_compute_local_tracking.py\n\npositional arguments:\n in_odf File containing the orientation diffusion function \n as spherical harmonics file (.nii.gz). Ex: ODF or fODF.\n in_seed Seeding mask (.nii.gz).\n in_mask Tracking mask (.nii.gz).\n Tracking will stop outside this mask. The last point of each \n streamline (triggering the stopping criteria) IS added to the streamline.\n out_tractogram Tractogram output file (must be .trk or .tck).\n\noptions:\n -h, --help show this help message and exit\n\nTracking options:\n --step STEP_SIZE Step size in mm. [0.5]\n --min_length m Minimum length of a streamline in mm. [10.0]\n --max_length M Maximum length of a streamline in mm. [300.0]\n --theta THETA Maximum angle between 2 steps. If the angle is too big, streamline is \n stopped and the following point is NOT included.\n [\"eudx\"=60, \"det\"=45, \"prob\"=20, \"ptt\"=20]\n --sfthres sf_th Spherical function relative threshold. [0.1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --sh_to_pmf If set, map sherical harmonics to spherical function (pmf) before \n tracking (faster, requires more memory)\n --algo {det,prob,ptt,eudx}\n Algorithm to use. [prob]\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Dipy sphere; set of possible directions.\n Default: [repulsion724]\n --sub_sphere SUB_SPHERE\n Subdivides each face of the sphere into 4^s new faces. [0]\n\nSeeding options:\n When no option is provided, uses --npv 1.\n\n --npv NPV Number of seeds per voxel.\n --nt NT Total number of seeds to use.\n\nPTT options:\n --probe_length PROBE_LENGTH\n The length of the probes. Smaller value\n yields more dispersed fibers. [1.0]\n --probe_radius PROBE_RADIUS\n The radius of the probe. A large probe_radius\n helps mitigate noise in the pmf but it might\n make it harder to sample thin and intricate\n connections, also the boundary of fiber\n bundles might be eroded. [0]\n --probe_quality PROBE_QUALITY\n The quality of the probe. This parameter sets\n the number of segments to split the cylinder\n along the length of the probe (minimum=2) [3]\n --probe_count PROBE_COUNT\n The number of probes. This parameter sets the\n number of parallel lines used to model the\n cylinder (minimum=1). [1]\n --support_exponent SUPPORT_EXPONENT\n Data support exponent, used for rejection\n sampling. [3]\n\nGPU options:\n --use_gpu Enable GPU tracking (experimental).\n --sh_interp {trilinear,nearest}\n SH image interpolation method. [trilinear]\n --forward_only Perform forward tracking only.\n --batch_size BATCH_SIZE\n Approximate size of GPU batches (number\n of streamlines to track in parallel). [10000]\n\nOutput options:\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.\n A rule of thumb is to set it to 0.1mm for deterministic \n streamlines and to 0.2mm for probabilitic streamlines.[0.1]\n -f Force overwriting of the output files.\n --save_seeds If set, save the seeds used for the tracking \n in the data_per_streamline property.\n Hint: you can then use scil_tractogram_seed_density_map.\n --seed SEED Random number generator seed.\n\nLogging options:\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [ + [ + "seeding", + "seeding" + ], + [ + "methods", + "method" + ], + [ + "direction", + "direction" + ], + [ + "diffusion", + "diffusion" + ], + [ + "Data", + "data", + "data" + ], + [ + "memory", + "memory" + ], + [ + "streamlines", + "streamlines" + ], + [ + "maps", + "map" + ], + [ + "image", + "image" + ], + [ + "divergence", + "divergences" + ], + [ + "step", + "step" + ], + [ + "large", + "large" + ], + [ + "tractography", + "tractography" + ], + [ + "bundles", + "bundles" + ], + [ + "level", + "level" + ], + [ + "algorithm", + "algorithm" + ], + [ + "random", + "random" + ], + [ + "connections", + "connections" + ], + [ + "function", + "functions", + "function" + ], + [ + "average", + "average" + ], + [ + "tracking", + "tracking" + ], + [ + "total", + "total" + ], + [ + "brain", + "brain" + ], + [ + "size", + "size" + ], + [ + "streamline", + "streamline" + ], + [ + "higher", + "lower" + ], + [ + "orientation", + "orientation" + ], + [ + "imaging", + "imaging" + ], + [ + "larger", + "smaller" + ], + [ + "parameter", + "parameter" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "voxel", + "voxel" + ], + [ + "defined", + "defined" + ] + ], + "keywords": [] + }, + { + "name": "scil_tracking_local_dev", + "docstring": "Local streamline HARDI tractography using scilpy-only methods -- no dipy (i.e\nno cython). The goal of this is to have a python-only version that can be\nmodified more easily by our team when testing new algorithms and parameters,\nand that can be used as parent classes in sub-projects of our lab such as in\ndwi_ml.\n\nWARNING. MUCH SLOWER THAN scil_tracking_local.py. We recommand using multi-\nprocessing with option --nb_processes.\n\nSimilar to scil_tracking_local:\n The tracking direction is chosen in the aperture cone defined by the\n previous tracking direction and the angular constraint.\n - Algo 'det': the maxima of the spherical function (SF) the most closely\n aligned to the previous direction.\n - Algo 'prob': a direction drawn from the empirical distribution function\n defined from the SF.\n\nContrary to scil_tracking_local:\n - Algo 'eudx' is not yet available!\n - Input nifti files do not necessarily need to be in isotropic resolution.\n - The script works with asymmetric input ODF.\n - The interpolation for the tracking mask and spherical function can be\n one of 'nearest' or 'trilinear'.\n - Runge-Kutta integration is supported for the step function.\n\nA few notes on Runge-Kutta integration.\n 1. Runge-Kutta integration is used to approximate the next tracking\n direction by estimating directions from future tracking steps. This\n works well for deterministic tracking. However, in the context of\n probabilistic tracking, the next tracking directions cannot be estimated\n in advance, because they are picked randomly from a distribution. It is\n therefore recommanded to keep the rk_order to 1 for probabilistic\n tracking.\n 2. As a rule of thumb, doubling the rk_order will double the computation\n time in the worst case.\n\nReferences: [1] Girard, G., Whittingstall K., Deriche, R., and\n Descoteaux, M. (2014). Towards quantitative connectivity analysis:\n reducing tractography biases. Neuroimage, 98, 266-278.\n\nFormerly: scil_compute_local_tracking_dev.py", + "help": "usage: scil_tracking_local_dev.py [-h] [--step STEP_SIZE] [--min_length m]\n [--max_length M] [--theta THETA]\n [--sfthres sf_th]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--npv NPV | --nt NT] [--algo {det,prob}]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--sub_sphere SUB_SPHERE]\n [--sfthres_init sf_th] [--rk_order K]\n [--max_invalid_nb_points MAX]\n [--forward_only]\n [--sh_interp {nearest,trilinear}]\n [--mask_interp {nearest,trilinear}]\n [--keep_last_out_point]\n [--n_repeats_per_seed N_REPEATS_PER_SEED]\n [--rng_seed RNG_SEED] [--skip SKIP]\n [--processes NBR] [--compress [COMPRESS_TH]]\n [-f] [--save_seeds]\n [-v [{DEBUG,INFO,WARNING}]]\n in_odf in_seed in_mask out_tractogram\n\nLocal streamline HARDI tractography using scilpy-only methods -- no dipy (i.e\nno cython). The goal of this is to have a python-only version that can be\nmodified more easily by our team when testing new algorithms and parameters,\nand that can be used as parent classes in sub-projects of our lab such as in\ndwi_ml.\n\nWARNING. MUCH SLOWER THAN scil_tracking_local.py. We recommand using multi-\nprocessing with option --nb_processes.\n\nSimilar to scil_tracking_local:\n The tracking direction is chosen in the aperture cone defined by the\n previous tracking direction and the angular constraint.\n - Algo 'det': the maxima of the spherical function (SF) the most closely\n aligned to the previous direction.\n - Algo 'prob': a direction drawn from the empirical distribution function\n defined from the SF.\n\nContrary to scil_tracking_local:\n - Algo 'eudx' is not yet available!\n - Input nifti files do not necessarily need to be in isotropic resolution.\n - The script works with asymmetric input ODF.\n - The interpolation for the tracking mask and spherical function can be\n one of 'nearest' or 'trilinear'.\n - Runge-Kutta integration is supported for the step function.\n\nA few notes on Runge-Kutta integration.\n 1. Runge-Kutta integration is used to approximate the next tracking\n direction by estimating directions from future tracking steps. This\n works well for deterministic tracking. However, in the context of\n probabilistic tracking, the next tracking directions cannot be estimated\n in advance, because they are picked randomly from a distribution. It is\n therefore recommanded to keep the rk_order to 1 for probabilistic\n tracking.\n 2. As a rule of thumb, doubling the rk_order will double the computation\n time in the worst case.\n\nReferences: [1] Girard, G., Whittingstall K., Deriche, R., and\n Descoteaux, M. (2014). Towards quantitative connectivity analysis:\n reducing tractography biases. Neuroimage, 98, 266-278.\n\nFormerly: scil_compute_local_tracking_dev.py\n\npositional arguments:\n in_odf File containing the orientation diffusion function \n as spherical harmonics file (.nii.gz). Ex: ODF or fODF.\n in_seed Seeding mask (.nii.gz).\n in_mask Tracking mask (.nii.gz).\n Tracking will stop outside this mask. The last point of each \n streamline (triggering the stopping criteria) IS added to the streamline.\n out_tractogram Tractogram output file (must be .trk or .tck).\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nTracking options:\n --step STEP_SIZE Step size in mm. [0.5]\n --min_length m Minimum length of a streamline in mm. [10.0]\n --max_length M Maximum length of a streamline in mm. [300.0]\n --theta THETA Maximum angle between 2 steps. If the angle is too big, streamline is \n stopped and the following point is NOT included.\n [\"eudx\"=60, \"det\"=45, \"prob\"=20, \"ptt\"=20]\n --sfthres sf_th Spherical function relative threshold. [0.1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --algo {det,prob} Algorithm to use. [prob]\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Dipy sphere; set of possible directions.\n Default: [repulsion724]\n --sub_sphere SUB_SPHERE\n Subdivides each face of the sphere into 4^s new faces. [0]\n --sfthres_init sf_th Spherical function relative threshold value for the \n initial direction. [0.5]\n --rk_order K The order of the Runge-Kutta integration used for the step function.\n For more information, refer to the note in the script description. [1]\n --max_invalid_nb_points MAX\n Maximum number of steps without valid direction, \n ex: if threshold on ODF or max angles are reached.\n Default: 0, i.e. do not add points following an invalid direction.\n --forward_only If set, tracks in one direction only (forward) given the \n initial seed. The direction is randomly drawn from the ODF.\n --sh_interp {nearest,trilinear}\n Spherical harmonic interpolation: nearest-neighbor \n or trilinear. [trilinear]\n --mask_interp {nearest,trilinear}\n Mask interpolation: nearest-neighbor or trilinear. [nearest]\n --keep_last_out_point\n If set, keep the last point (once out of the tracking mask) of \n the streamline. Default: discard them. This is the default in \n Dipy too. Note that points obtained after an invalid direction \n (ex when angle is too sharp or sh_threshold not reached) are \n never added.\n --n_repeats_per_seed N_REPEATS_PER_SEED\n By default, each seed position is used only once. This option\n allows for tracking from the exact same seed n_repeats_per_seed\n times. [1]\n\nSeeding options:\n When no option is provided, uses --npv 1.\n\n --npv NPV Number of seeds per voxel.\n --nt NT Total number of seeds to use.\n\nRandom seeding options:\n --rng_seed RNG_SEED Initial value for the random number generator. [0]\n --skip SKIP Skip the first N random number. \n Useful if you want to create new streamlines to add to \n a previously created tractogram with a fixed --rng_seed.\n Ex: If tractogram_1 was created with -nt 1,000,000, \n you can create tractogram_2 with \n --skip 1,000,000.\n\nMemory options:\n --processes NBR Number of sub-processes to start. \n Default: [1]\n\nOutput options:\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.\n A rule of thumb is to set it to 0.1mm for deterministic \n streamlines and to 0.2mm for probabilitic streamlines.[0.1]\n -f Force overwriting of the output files.\n --save_seeds If set, save the seeds used for the tracking \n in the data_per_streamline property.\n Hint: you can then use scil_tractogram_seed_density_map.\n", + "synonyms": [ + [ + "seeding", + "seeding" + ], + [ + "order", + "order" + ], + [ + "direction", + "direction" + ], + [ + "connectivity", + "connectivity" + ], + [ + "create", + "create" + ], + [ + "future", + "future" + ], + [ + "diffusion", + "diffusion" + ], + [ + "processes", + "processes" + ], + [ + "memory", + "memory" + ], + [ + "processing", + "processing" + ], + [ + "random", + "randomly" + ], + [ + "streamlines", + "streamlines" + ], + [ + "probabilistic", + "probabilistic" + ], + [ + "step", + "step" + ], + [ + "tractography", + "tractography" + ], + [ + "level", + "level" + ], + [ + "analysis", + "analysis" + ], + [ + "invalid", + "invalid" + ], + [ + "supported", + "supported" + ], + [ + "algorithm", + "algorithm" + ], + [ + "random", + "random" + ], + [ + "function", + "functions", + "function" + ], + [ + "integrating", + "integration" + ], + [ + "algorithm", + "algorithms" + ], + [ + "valid", + "valid" + ], + [ + "tracking", + "tracking" + ], + [ + "total", + "total" + ], + [ + "size", + "size" + ], + [ + "streamline", + "streamline" + ], + [ + "orientation", + "orientation" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "project", + "projects" + ], + [ + "methods", + "methods" + ], + [ + "voxel", + "voxel" + ], + [ + "parameters", + "parameters" + ], + [ + "defined", + "defined" + ] + ], + "keywords": [] + }, + { + "name": "scil_tracking_pft", + "docstring": "Local streamline HARDI tractography including Particle Filtering tracking.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nThe tracking is done inside partial volume estimation maps and uses the\nparticle filtering tractography (PFT) algorithm. See\nscil_tracking_pft_maps.py to generate PFT required maps.\n\nStreamlines longer than min_length and shorter than max_length are kept.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\nDefault parameters as suggested in [1].\n\nAlgo 'det': the maxima of the spherical function (SF) the most closely aligned\nto the previous direction.\nAlgo 'prob': a direction drawn from the empirical distribution function defined\nfrom the SF.\n\nFor streamline compression, a rule of thumb is to set it to 0.1mm for the\ndeterministic algorithm and 0.2mm for probabilitic algorithm.\n\nAll the input nifti files must be in isotropic resolution.\n\nFormerly: scil_compute_pft.py", + "help": "usage: scil_tracking_pft.py [-h] [--algo {det,prob}] [--step STEP_SIZE]\n [--min_length MIN_LENGTH]\n [--max_length MAX_LENGTH] [--theta THETA] [--act]\n [--sfthres SF_THRESHOLD]\n [--sfthres_init SF_THRESHOLD_INIT]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--npv NPV | --nt NT] [--particles PARTICLES]\n [--back BACK_TRACKING]\n [--forward FORWARD_TRACKING] [--all] [--seed SEED]\n [-f] [--save_seeds] [--compress [COMPRESS_TH]]\n [-v [{DEBUG,INFO,WARNING}]]\n in_sh in_seed in_map_include map_exclude_file\n out_tractogram\n\nLocal streamline HARDI tractography including Particle Filtering tracking.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nThe tracking is done inside partial volume estimation maps and uses the\nparticle filtering tractography (PFT) algorithm. See\nscil_tracking_pft_maps.py to generate PFT required maps.\n\nStreamlines longer than min_length and shorter than max_length are kept.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\nDefault parameters as suggested in [1].\n\nAlgo 'det': the maxima of the spherical function (SF) the most closely aligned\nto the previous direction.\nAlgo 'prob': a direction drawn from the empirical distribution function defined\nfrom the SF.\n\nFor streamline compression, a rule of thumb is to set it to 0.1mm for the\ndeterministic algorithm and 0.2mm for probabilitic algorithm.\n\nAll the input nifti files must be in isotropic resolution.\n\nFormerly: scil_compute_pft.py\n\npositional arguments:\n in_sh Spherical harmonic file (.nii.gz).\n in_seed Seeding mask (.nii.gz).\n in_map_include The probability map (.nii.gz) of ending the\n streamline and including it in the output (CMC, PFT [1])\n map_exclude_file The probability map (.nii.gz) of ending the\n streamline and excluding it in the output (CMC, PFT [1]).\n out_tractogram Tractogram output file (must be .trk or .tck).\n\nGeneric options:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nTracking options:\n --algo {det,prob} Algorithm to use (must be \"det\" or \"prob\"). [prob]\n --step STEP_SIZE Step size in mm. [0.2]\n --min_length MIN_LENGTH\n Minimum length of a streamline in mm. [10.0]\n --max_length MAX_LENGTH\n Maximum length of a streamline in mm. [300.0]\n --theta THETA Maximum angle between 2 steps. [\"det\"=45, \"prob\"=20]\n --act If set, uses anatomically-constrained tractography (ACT) \n instead of continuous map criterion (CMC).\n --sfthres SF_THRESHOLD\n Spherical function relative threshold. [0.1]\n --sfthres_init SF_THRESHOLD_INIT\n Spherical function relative threshold value for the \n initial direction. [0.5]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n\nSeeding options:\n When no option is provided, uses --npv 1.\n\n --npv NPV Number of seeds per voxel.\n --nt NT Total number of seeds to use.\n\nPFT options:\n --particles PARTICLES\n Number of particles to use for PFT. [15]\n --back BACK_TRACKING Length of PFT back tracking (mm). [2.0]\n --forward FORWARD_TRACKING\n Length of PFT forward tracking (mm). [1.0]\n\nOutput options:\n --all If set, keeps \"excluded\" streamlines.\n NOT RECOMMENDED, except for debugging.\n --seed SEED Random number generator seed.\n -f Force overwriting of the output files.\n --save_seeds If set, save the seeds used for the tracking \n in the data_per_streamline property.\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.[0.1]\n\nReferences: [1] Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M. (2014). Towards quantitative connectivity analysis: reducing tractography biases. Neuroimage, 98, 266-278.\n", + "synonyms": [ + [ + "seeding", + "seeding" + ], + [ + "anatomically", + "anatomically" + ], + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "maps", + "map" + ], + [ + "direction", + "direction" + ], + [ + "connectivity", + "connectivity" + ], + [ + "algorithm", + "algorithm" + ], + [ + "maps", + "maps" + ], + [ + "probability", + "probability" + ], + [ + "random", + "random" + ], + [ + "step", + "step" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "function", + "functions", + "function" + ], + [ + "tractography", + "tractography" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "voxel", + "voxel" + ], + [ + "tracking", + "tracking" + ], + [ + "level", + "level" + ], + [ + "total", + "total" + ], + [ + "parameters", + "parameters" + ], + [ + "analysis", + "analysis" + ], + [ + "defined", + "defined" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_tracking_pft_maps", + "docstring": "Compute include and exclude maps, and the seeding interface mask from partial\nvolume estimation (PVE) maps. Maps should have values in [0,1], gm+wm+csf=1 in\nall voxels of the brain, gm+wm+csf=0 elsewhere.\n\nReferences: Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M.\n(2014). Towards quantitative connectivity analysis: reducing tractography\nbiases. Neuroimage.\n\nFormerly: scil_compute_maps_for_particle_filter_tracking.py", + "help": "usage: scil_tracking_pft_maps.py [-h] [--include filename]\n [--exclude filename] [--interface filename]\n [-t THRESHOLD] [-f]\n [-v [{DEBUG,INFO,WARNING}]]\n in_wm in_gm in_csf\n\nCompute include and exclude maps, and the seeding interface mask from partial\nvolume estimation (PVE) maps. Maps should have values in [0,1], gm+wm+csf=1 in\nall voxels of the brain, gm+wm+csf=0 elsewhere.\n\nReferences: Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M.\n(2014). Towards quantitative connectivity analysis: reducing tractography\nbiases. Neuroimage.\n\nFormerly: scil_compute_maps_for_particle_filter_tracking.py\n\npositional arguments:\n in_wm White matter PVE map (nifti). From normal FAST output, has a PVE_2 name suffix.\n in_gm Grey matter PVE map (nifti). From normal FAST output, has a PVE_1 name suffix.\n in_csf Cerebrospinal fluid PVE map (nifti). From normal FAST output, has a PVE_0 name suffix.\n\noptions:\n -h, --help show this help message and exit\n --include filename Output include map (nifti). [map_include.nii.gz]\n --exclude filename Output exclude map (nifti). [map_exclude.nii.gz]\n --interface filename Output interface seeding mask (nifti). [interface.nii.gz]\n -t THRESHOLD Minimum gm and wm PVE values in a voxel to be into the interface. [0.1]\n -f Force overwriting of the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [ + [ + "seeding", + "seeding" + ], + [ + "maps", + "map" + ], + [ + "connectivity", + "connectivity" + ], + [ + "white", + "white" + ], + [ + "maps", + "maps" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "voxel", + "voxels" + ], + [ + "tractography", + "tractography" + ], + [ + "matter", + "matter" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "analysis", + "analysis" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_tracking_pft_maps_edit", + "docstring": "Modify PFT maps to allow PFT tracking in given mask (e.g edema).\n\nFormerly: scil_add_tracking_mask_to_pft_maps.py.", + "help": "usage: scil_tracking_pft_maps_edit.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n map_include map_exclude additional_mask\n map_include_corr map_exclude_corr\n\nModify PFT maps to allow PFT tracking in given mask (e.g edema).\n\nFormerly: scil_add_tracking_mask_to_pft_maps.py.\n\npositional arguments:\n map_include PFT map include.\n map_exclude PFT map exclude.\n additional_mask Allow PFT tracking in this mask.\n map_include_corr Corrected PFT map include output file name.\n map_exclude_corr Corrected PFT map exclude output file name.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "tracking", + "tracking" + ], + [ + "level", + "level" + ], + [ + "maps", + "map" + ], + [ + "maps", + "maps" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_apply_transform", + "docstring": "Transform a tractogram using an affine/rigid transformation and nonlinear\ndeformation (optional).\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nApplying transformation to a tractogram can lead to invalid streamlines (out of\nthe bounding box), and thus three strategies are available:\n1) Do nothing, may crash at saving if invalid streamlines are present.\n [This is the default]\n2) --keep_invalid, save invalid streamlines. Leave it to the user to run\n scil_tractogram_remove_invalid.py if needed.\n3) --remove_invalid, automatically remove invalid streamlines before saving.\n Should not remove more than a few streamlines. Typically, the streamlines\n that are rejected are the ones reaching the limits of the brain, ex, near\n the pons.\n4) --cut_invalid, automatically cut invalid streamlines before saving, i.e. the\n streamlines are kept but the points out of the bounding box are cut.\n\nExample:\nTo apply a transformation from ANTs to a tractogram, if the ANTs command was\nMOVING->REFERENCE...\n1) To apply the original transformation:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --inverse\n --in_deformation 1InverseWarp.nii.gz\n\n2) To apply the inverse transformation, i.e. REFERENCE->MOVING:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --in_deformation 1Warp.nii.gz\n --reverse_operation\n\nFormerly: scil_apply_transform_to_tractogram.py", + "help": "usage: scil_tractogram_apply_transform.py [-h] [--inverse]\n [--in_deformation file]\n [--reverse_operation]\n [--cut_invalid | --remove_invalid | --keep_invalid]\n [--no_empty] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_moving_tractogram in_target_file\n in_transfo out_tractogram\n\nTransform a tractogram using an affine/rigid transformation and nonlinear\ndeformation (optional).\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nApplying transformation to a tractogram can lead to invalid streamlines (out of\nthe bounding box), and thus three strategies are available:\n1) Do nothing, may crash at saving if invalid streamlines are present.\n [This is the default]\n2) --keep_invalid, save invalid streamlines. Leave it to the user to run\n scil_tractogram_remove_invalid.py if needed.\n3) --remove_invalid, automatically remove invalid streamlines before saving.\n Should not remove more than a few streamlines. Typically, the streamlines\n that are rejected are the ones reaching the limits of the brain, ex, near\n the pons.\n4) --cut_invalid, automatically cut invalid streamlines before saving, i.e. the\n streamlines are kept but the points out of the bounding box are cut.\n\nExample:\nTo apply a transformation from ANTs to a tractogram, if the ANTs command was\nMOVING->REFERENCE...\n1) To apply the original transformation:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --inverse\n --in_deformation 1InverseWarp.nii.gz\n\n2) To apply the inverse transformation, i.e. REFERENCE->MOVING:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --in_deformation 1Warp.nii.gz\n --reverse_operation\n\nFormerly: scil_apply_transform_to_tractogram.py\n\npositional arguments:\n in_moving_tractogram Path of the tractogram to be transformed.\n Bounding box validity will not be checked (could \n contain invalid streamlines).\n in_target_file Path of the reference target file (trk or nii).\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_tractogram Output tractogram filename (transformed data).\n\noptions:\n -h, --help show this help message and exit\n --no_empty Do not write file if there is no streamline.\n You may save an empty file if you use remove_invalid.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nTransformation options:\n --inverse Apply the inverse linear transformation.\n --in_deformation file\n Path to the file containing a deformation field.\n --reverse_operation Apply the transformation in reverse (see doc), warp\n first, then linear.\n\nManagement of invalid streamlines:\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_invalid Remove the streamlines landing out of the bounding box.\n --keep_invalid Keep the streamlines landing out of the bounding box.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "invalid", + "invalid" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "applied", + "apply" + ], + [ + "Data", + "data", + "data" + ], + [ + "level", + "level" + ], + [ + "applied", + "applying" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_apply_transform_to_hdf5", + "docstring": "Transform tractogram(s) contained in the hdf5 output from a connectivity\nscript, using an affine/rigid transformation and nonlinear deformation\n(optional).\n\nSee scil_tractogram_apply_transform.py to apply directly to a tractogram.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nOr use >> scil_tractogram_apply_transform.py --help\n\nFormerly: scil_apply_transform_to_hdf5.py", + "help": "usage: scil_tractogram_apply_transform_to_hdf5.py [-h] [--inverse]\n [--in_deformation file]\n [--reverse_operation]\n [--cut_invalid | --remove_invalid | --keep_invalid]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_hdf5 in_target_file\n in_transfo out_hdf5\n\nTransform tractogram(s) contained in the hdf5 output from a connectivity\nscript, using an affine/rigid transformation and nonlinear deformation\n(optional).\n\nSee scil_tractogram_apply_transform.py to apply directly to a tractogram.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nOr use >> scil_tractogram_apply_transform.py --help\n\nFormerly: scil_apply_transform_to_hdf5.py\n\npositional arguments:\n in_hdf5 Path of the hdf5 containing the moving tractogram, to be transformed. (.h5 extension).\n in_target_file Path of the reference target file (.trk or .nii).\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_hdf5 Output tractogram filename (transformed data).\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nTransformation options:\n --inverse Apply the inverse linear transformation.\n --in_deformation file\n Path to the file containing a deformation field.\n --reverse_operation Apply the transformation in reverse (see doc), warp\n first, then linear.\n\nManagement of invalid streamlines:\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_invalid Remove the streamlines landing out of the bounding box.\n --keep_invalid Keep the streamlines landing out of the bounding box.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "connectivity", + "connectivity" + ], + [ + "invalid", + "invalid" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "applied", + "apply" + ], + [ + "Data", + "data", + "data" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_assign_custom_color", + "docstring": "The script uses scalars from an anatomy, data_per_point or data_per_streamline\n(e.g. commit_weights) to visualize them on the streamlines.\nSaves the RGB values in the data_per_point 'color' with 3 values per point:\n(color_x, color_y, color_z).\n\nIf called with .tck, the output will always be .trk, because data_per_point has\nno equivalent in tck file.\n\nIf used with a visualization software like MI-Brain\n(https://github.com/imeka/mi-brain), the 'color' dps is applied by default at\nloading time.\n\nCOLORING METHOD\nThis script maps the raw values from these sources to RGB using a colormap.\n --use_dpp: The data from each point is converted to a color.\n --use_dps: The same color is applied to all points of the streamline.\n --from_anatomy: The voxel's color is used for the points of the streamlines\n crossing it. See also scil_tractogram_project_map_to_streamlines.py. You\n can have more options to project maps to dpp, and then use --use_dpp here.\n --along_profile: The data used here is each point's position in the\n streamline. To have nice results, you should first uniformize head/tail.\n See scil_tractogram_uniformize_endpoints.py.\n --local_angle.\n\nCOLORING OPTIONS\nA minimum and a maximum range can be provided to clip values. If the range of\nvalues is too large for intuitive visualization, a log transform can be\napplied.\n\nIf the data provided from --use_dps, --use_dpp and --from_anatomy are integer\nlabels, they can be mapped using a LookUp Table (--LUT).\nThe file provided as a LUT should be either .txt or .npy and if the size is\nN=20, then the data provided should be between 1-20.\n\nA custom colormap can be provided using --colormap. It should be a string\ncontaining a colormap name OR multiple Matplotlib named colors separated by -.\nThe colormap used for mapping values to colors can be saved to a png/jpg image\nusing the --out_colorbar option.\n\nSee also: scil_tractogram_assign_uniform_color.py, for simplified options.\n\nFormerly: scil_assign_custom_color_to_tractogram.py", + "help": "", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "methods", + "method" + ], + [ + "streamline", + "streamline" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "image", + "image" + ], + [ + "maps", + "maps" + ], + [ + "project", + "project" + ], + [ + "Data", + "data", + "data" + ], + [ + "large", + "large" + ], + [ + "applied", + "applied" + ], + [ + "voxel", + "voxel" + ], + [ + "brain", + "brain" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_assign_uniform_color", + "docstring": "Assign an hexadecimal RGB color to one or more Trackvis (.trk) tractogram.\n(If called with .tck, the output will always be .trk, because data_per_point\nhas no equivalent in tck file.)\n\nSaves the RGB values in the data_per_point 'color' with values\n(color_x, color_y, color_z).\n\nThe hexadecimal RGB color should be formatted as 0xRRGGBB or \"#RRGGBB\".\n\nSee also: scil_tractogram_assign_custom_color.py\n\nFormerly: scil_assign_uniform_color_to_tractograms.py", + "help": "usage: scil_tractogram_assign_uniform_color.py [-h]\n (--fill_color str | --dict_colors file.json)\n (--out_suffix [suffix] | --out_tractogram file.trk)\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_tractograms\n [in_tractograms ...]\n\nAssign an hexadecimal RGB color to one or more Trackvis (.trk) tractogram.\n(If called with .tck, the output will always be .trk, because data_per_point\nhas no equivalent in tck file.)\n\nSaves the RGB values in the data_per_point 'color' with values\n(color_x, color_y, color_z).\n\nThe hexadecimal RGB color should be formatted as 0xRRGGBB or \"#RRGGBB\".\n\nSee also: scil_tractogram_assign_custom_color.py\n\nFormerly: scil_assign_uniform_color_to_tractograms.py\n\npositional arguments:\n in_tractograms Input tractograms (.trk or .tck).\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nColoring Methods:\n --fill_color str Can be hexadecimal (ie. either \"#RRGGBB\" or 0xRRGGBB).\n --dict_colors file.json\n Json file: dictionnary mapping each tractogram's basename to a color.\n Do not put your file's extension in your dict.\n Same convention as --fill_color.\n\nOutput options:\n --out_suffix [suffix]\n Specify suffix to append to input basename.\n Mandatory choice if you run this script on multiple tractograms.\n Mandatory choice with --dict_colors.\n [None]\n --out_tractogram file.trk\n Output filename of colored tractogram (.trk).\n", + "synonyms": [ + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "methods", + "methods" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_commit", + "docstring": "Convex Optimization Modeling for Microstructure Informed Tractography (COMMIT)\nestimates, globally, how a given tractogram explains the DWI in terms of signal\nfit, assuming a certain forward microstructure model. It assigns a weight to\neach streamline, which represents how well it explains the DWI signal globally.\nThe default forward microstructure model is stick-zeppelin-ball, which requires\nmulti-shell data and a peak file (principal fiber directions in each voxel,\ntypically from a field of fODFs).\n\nIt is possible to use the ball-and-stick model for single-shell and multi-shell\ndata. In this case, the peak file is not mandatory. Multi-shell should follow a\n\"NODDI protocol\" (low and high b-values), multiple shells with similar b-values\nshould not be used with COMMIT.\n\nThe output from COMMIT is:\n- fit_NRMSE.nii.gz\n fiting error (Normalized Root Mean Square Error)\n- fit_RMSE.nii.gz\n fiting error (Root Mean Square Error)\n- results.pickle\n Dictionary containing the experiment parameters and final weights\n- compartment_EC.nii.gz\n (est. Extra-Cellular signal fraction)\n- compartment_IC.nii.gz\n (est. Intra-Cellular signal fraction)\n- compartment_ISO.nii.gz\n (est. isotropic signal fraction (freewater comportment)):\n Each of COMMIT compartments\n- streamline_weights.txt\n Text file containing the commit weights for each streamline of the\n input tractogram.\n- streamlines_length.txt\n Text file containing the length (mm) of each streamline.\n- streamline_weights_by_length.txt\n Text file containing the commit weights for each streamline of the\n input tractogram, ordered by their length.\n- tot_streamline_weights\n Text file containing the total commit weights of each streamline.\n Equal to commit_weights * streamlines_length (W_i * L_i)\n- essential.trk / non_essential.trk\n Tractograms containing the streamlines below or equal (essential) and\n above (non_essential) a threshold_weights of 0.\n- decompose_commit.h5\n In the case where the input is a hdf5 file only, we will save an output\n hdf5 with the following information separated into each bundle's dps:\n - streamlines_weights\n - streamline_weights_by_length\n For each bundle, only the essential streamlines are kept.\n\nThis script can divide the input tractogram in two using a threshold to apply\non the streamlines' weight. The threshold used is 0.0, keeping only streamlines\nthat have non-zero weight and that contribute to explain the DWI signal.\nStreamlines with 0 weight are essentially not necessary according to COMMIT.\n\nCOMMIT2 is available only for HDF5 data from\nscil_tractogram_segment_bundles_for_connectivity.py and\nwith the --ball_stick option. Use the --commit2 option to activite it, slightly\nlonger computation time. This wrapper offers a simplify way to call COMMIT,\nbut does not allow to use (or fine-tune) every parameter. If you want to use\nCOMMIT with full access to all parameters,\nvisit: https://github.com/daducci/COMMIT\n\nWhen tunning parameters, such as --iso_diff, --para_diff, --perp_diff or\n--lambda_commit_2 you should evaluate the quality of results by:\n - Looking at the 'density' (GTM) of the connnectome (essential tractogram)\n - Confirm the quality of WM bundles reconstruction (essential tractogram)\n - Inspect the (N)RMSE map and look for peaks or anomalies\n - Compare the density map before and after (essential tractogram)\n\nFormerly: scil_run_commit.py", + "help": "usage: scil_tractogram_commit.py [-h] [--nbr_dir NBR_DIR]\n [--nbr_iter NBR_ITER] [--in_peaks IN_PEAKS]\n [--in_tracking_mask IN_TRACKING_MASK]\n [--commit2]\n [--lambda_commit_2 LAMBDA_COMMIT_2]\n [--ball_stick] [--para_diff PARA_DIFF]\n [--perp_diff PERP_DIFF [PERP_DIFF ...]]\n [--iso_diff ISO_DIFF [ISO_DIFF ...]]\n [--keep_whole_tractogram]\n [--save_kernels DIRECTORY | --load_kernels DIRECTORY]\n [--compute_only] [--tolerance tol]\n [--skip_b0_check] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram in_dwi in_bval in_bvec out_dir\n\nConvex Optimization Modeling for Microstructure Informed Tractography (COMMIT)\nestimates, globally, how a given tractogram explains the DWI in terms of signal\nfit, assuming a certain forward microstructure model. It assigns a weight to\neach streamline, which represents how well it explains the DWI signal globally.\nThe default forward microstructure model is stick-zeppelin-ball, which requires\nmulti-shell data and a peak file (principal fiber directions in each voxel,\ntypically from a field of fODFs).\n\nIt is possible to use the ball-and-stick model for single-shell and multi-shell\ndata. In this case, the peak file is not mandatory. Multi-shell should follow a\n\"NODDI protocol\" (low and high b-values), multiple shells with similar b-values\nshould not be used with COMMIT.\n\nThe output from COMMIT is:\n- fit_NRMSE.nii.gz\n fiting error (Normalized Root Mean Square Error)\n- fit_RMSE.nii.gz\n fiting error (Root Mean Square Error)\n- results.pickle\n Dictionary containing the experiment parameters and final weights\n- compartment_EC.nii.gz\n (est. Extra-Cellular signal fraction)\n- compartment_IC.nii.gz\n (est. Intra-Cellular signal fraction)\n- compartment_ISO.nii.gz\n (est. isotropic signal fraction (freewater comportment)):\n Each of COMMIT compartments\n- streamline_weights.txt\n Text file containing the commit weights for each streamline of the\n input tractogram.\n- streamlines_length.txt\n Text file containing the length (mm) of each streamline.\n- streamline_weights_by_length.txt\n Text file containing the commit weights for each streamline of the\n input tractogram, ordered by their length.\n- tot_streamline_weights\n Text file containing the total commit weights of each streamline.\n Equal to commit_weights * streamlines_length (W_i * L_i)\n- essential.trk / non_essential.trk\n Tractograms containing the streamlines below or equal (essential) and\n above (non_essential) a threshold_weights of 0.\n- decompose_commit.h5\n In the case where the input is a hdf5 file only, we will save an output\n hdf5 with the following information separated into each bundle's dps:\n - streamlines_weights\n - streamline_weights_by_length\n For each bundle, only the essential streamlines are kept.\n\nThis script can divide the input tractogram in two using a threshold to apply\non the streamlines' weight. The threshold used is 0.0, keeping only streamlines\nthat have non-zero weight and that contribute to explain the DWI signal.\nStreamlines with 0 weight are essentially not necessary according to COMMIT.\n\nCOMMIT2 is available only for HDF5 data from\nscil_tractogram_segment_bundles_for_connectivity.py and\nwith the --ball_stick option. Use the --commit2 option to activite it, slightly\nlonger computation time. This wrapper offers a simplify way to call COMMIT,\nbut does not allow to use (or fine-tune) every parameter. If you want to use\nCOMMIT with full access to all parameters,\nvisit: https://github.com/daducci/COMMIT\n\nWhen tunning parameters, such as --iso_diff, --para_diff, --perp_diff or\n--lambda_commit_2 you should evaluate the quality of results by:\n - Looking at the 'density' (GTM) of the connnectome (essential tractogram)\n - Confirm the quality of WM bundles reconstruction (essential tractogram)\n - Inspect the (N)RMSE map and look for peaks or anomalies\n - Compare the density map before and after (essential tractogram)\n\nFormerly: scil_run_commit.py\n\npositional arguments:\n in_tractogram Input tractogram (.trk or .tck or .h5).\n in_dwi Diffusion-weighted image used by COMMIT (.nii.gz).\n in_bval b-values in the FSL format (.bval).\n in_bvec b-vectors in the FSL format (.bvec).\n out_dir Output directory for the COMMIT maps.\n\noptions:\n -h, --help show this help message and exit\n --nbr_dir NBR_DIR Number of directions, on the half of the sphere,\n representing the possible orientations of the response functions [500].\n --nbr_iter NBR_ITER Maximum number of iterations [1000].\n --in_peaks IN_PEAKS Peaks file representing principal direction(s) locally,\n typically coming from fODFs. This file is mandatory for the default \n stick-zeppelin-ball model.\n --in_tracking_mask IN_TRACKING_MASK\n Binary mask where tratography was allowed.\n If not set, uses a binary mask computed from the streamlines.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nCOMMIT2 options:\n --commit2 Run commit2, requires .h5 as input and will force\n ball&stick model.\n --lambda_commit_2 LAMBDA_COMMIT_2\n Specify the clustering prior strength [0.001].\n\nModel options:\n --ball_stick Use the ball&Stick model, disable the zeppelin compartment.\n Only model suitable for single-shell data.\n --para_diff PARA_DIFF\n Parallel diffusivity in mm^2/s.\n Default for both ball_stick and stick_zeppelin_ball: 1.7E-3.\n --perp_diff PERP_DIFF [PERP_DIFF ...]\n Perpendicular diffusivity in mm^2/s.\n Default for ball_stick: None\n Default for stick_zeppelin_ball: [0.51E-3]\n --iso_diff ISO_DIFF [ISO_DIFF ...]\n Istropic diffusivity in mm^2/s.\n Default for ball_stick: [2.0E-3]\n Default for stick_zeppelin_ball: [1.7E-3, 3.0E-3]\n\nTractogram options:\n --keep_whole_tractogram\n Save a tractogram copy with streamlines weights in the data_per_streamline\n [False].\n --compute_only Compute kernels only, --save_kernels must be used.\n\nKernels options:\n --save_kernels DIRECTORY\n Output directory for the COMMIT kernels.\n --load_kernels DIRECTORY\n Input directory where the COMMIT kernels are located.\n\nReferences:\n[1] Daducci, Alessandro, et al. \"COMMIT: convex optimization modeling for\n microstructure informed tractography.\" IEEE transactions on medical\n imaging 34.1 (2014): 246-257.\n[2] Schiavi, Simona, et al. \"A new method for accurate in vivo mapping of\n human brain connections using microstructural and anatomical information.\"\n Science advances 6.31 (2020): eaba8245.\n", + "synonyms": [ + [ + "methods", + "method" + ], + [ + "principal", + "principal" + ], + [ + "direction", + "direction" + ], + [ + "diffusion", + "diffusion" + ], + [ + "Data", + "data", + "data" + ], + [ + "processes", + "processes" + ], + [ + "vivo", + "vivo" + ], + [ + "experiment", + "experiment" + ], + [ + "error", + "error" + ], + [ + "streamlines", + "streamlines" + ], + [ + "maps", + "map" + ], + [ + "examine", + "evaluate" + ], + [ + "bundles", + "bundle" + ], + [ + "image", + "image" + ], + [ + "tractography", + "tractography" + ], + [ + "bundles", + "bundles" + ], + [ + "anatomical", + "anatomy", + "anatomical" + ], + [ + "level", + "level" + ], + [ + "streamline", + "simplify" + ], + [ + "weighted", + "weighted" + ], + [ + "high", + "high" + ], + [ + "applied", + "apply" + ], + [ + "orientation", + "orientations" + ], + [ + "signal", + "signal" + ], + [ + "connections", + "connections" + ], + [ + "total", + "total" + ], + [ + "false", + "false" + ], + [ + "brain", + "brain" + ], + [ + "human", + "human" + ], + [ + "streamline", + "streamline" + ], + [ + "imaging", + "imaging" + ], + [ + "high", + "low" + ], + [ + "maps", + "maps" + ], + [ + "parameter", + "parameter" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "binary", + "binary" + ], + [ + "voxel", + "voxel" + ], + [ + "parameters", + "parameters" + ], + [ + "function", + "functions", + "functions" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_compress", + "docstring": "Compress tractogram by removing collinear (or almost) points.\n\nThe compression threshold represents the maximum distance (in mm) to the\noriginal position of the point.\n\nFormerly: scil_compress_streamlines.py", + "help": "usage: scil_tractogram_compress.py [-h] [-e ERROR_RATE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nCompress tractogram by removing collinear (or almost) points.\n\nThe compression threshold represents the maximum distance (in mm) to the\noriginal position of the point.\n\nFormerly: scil_compress_streamlines.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file (trk or tck).\n out_tractogram Path of the output tractogram file (trk or tck).\n\noptions:\n -h, --help show this help message and exit\n -e ERROR_RATE Maximum compression distance in mm [0.1].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_compute_TODI", + "docstring": "Compute a Track Orientation Density Image (TODI).\n\nEach segment of the streamlines is weighted by its length (to support\ncompressed streamlines).\n\nThis script can afterwards output a Track Density Image (TDI) or a TODI with SF\nor SH representation, based on streamlines' segments.\n\nFormerly: scil_compute_todi.py", + "help": "usage: scil_tractogram_compute_TODI.py [-h] [--sphere SPHERE] [--mask MASK]\n [--sh_order SH_ORDER]\n [--normalize_per_voxel]\n [--smooth_todi | --asymmetric]\n [--n_steps N_STEPS]\n [--out_mask OUT_MASK]\n [--out_tdi OUT_TDI]\n [--out_todi_sf OUT_TODI_SF]\n [--out_todi_sh OUT_TODI_SH]\n [--reference REFERENCE]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram\n\nCompute a Track Orientation Density Image (TODI).\n\nEach segment of the streamlines is weighted by its length (to support\ncompressed streamlines).\n\nThis script can afterwards output a Track Density Image (TDI) or a TODI with SF\nor SH representation, based on streamlines' segments.\n\nFormerly: scil_compute_todi.py\n\npositional arguments:\n in_tractogram Input streamlines file.\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nComputing options:\n --sphere SPHERE Sphere used for the angular discretization. [repulsion724]\n --mask MASK If set, use the given mask.\n --sh_order SH_ORDER Order of the original SH. [8]\n --normalize_per_voxel\n If set, normalize each SF/SH at each voxel.\n --smooth_todi If set, smooth TODI (angular and spatial).\n --asymmetric If set, compute asymmetric TODI.\n Cannot be used with --smooth_todi.\n --n_steps N_STEPS Number of steps for streamline segments subdivision prior to binning [1].\n\nOutput files. Saves only when filename is set:\n --out_mask OUT_MASK Mask showing where TDI > 0.\n --out_tdi OUT_TDI Output Track Density Image (TDI).\n --out_todi_sf OUT_TODI_SF\n Output TODI, with SF (each directions\n on the sphere, requires a lot of memory)\n --out_todi_sh OUT_TODI_SH\n Output TODI, with SH coefficients.\n\nReferences:\n [1] Dhollander T, Emsell L, Van Hecke W, Maes F, Sunaert S, Suetens P.\n Track orientation density imaging (TODI) and\n track orientation distribution (TOD) based tractography.\n NeuroImage. 2014 Jul 1;94:312-36.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "spatial", + "spatial" + ], + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "weighted", + "weighted" + ], + [ + "orientation", + "orientation" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "imaging", + "imaging" + ], + [ + "image", + "image" + ], + [ + "tractography", + "tractography" + ], + [ + "based", + "based" + ], + [ + "memory", + "memory" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_compute_density_map", + "docstring": "Compute a density map from a streamlines file. Can be binary.\n\nThis script correctly handles compressed streamlines.\n\nFormerly: scil_compute_streamlines_density_map.py", + "help": "usage: scil_tractogram_compute_density_map.py [-h] [--binary [FIXED_VALUE]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_img\n\nCompute a density map from a streamlines file. Can be binary.\n\nThis script correctly handles compressed streamlines.\n\nFormerly: scil_compute_streamlines_density_map.py\n\npositional arguments:\n in_bundle Tractogram filename.\n out_img path of the output image file.\n\noptions:\n -h, --help show this help message and exit\n --binary [FIXED_VALUE]\n If set, will store the same value for all intersected voxels, \n creating a binary map.When set without a value, 1 is used (and dtype \n uint8). If a value is given, will be used as the stored value.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "maps", + "map" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "image", + "image" + ], + [ + "create", + "creating" + ], + [ + "voxel", + "voxels" + ], + [ + "binary", + "binary" + ], + [ + "level", + "level" + ], + [ + "intersected", + "intersected" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_convert", + "docstring": "Conversion of '.tck', '.trk', '.fib', '.vtk' and 'dpy' files using updated file\nformat standard. TRK file always needs a reference file, a NIFTI, for\nconversion. The FIB file format is in fact a VTK, MITK Diffusion supports it.\n\nFormerly: scil_convert_tractogram.py", + "help": "usage: scil_tractogram_convert.py [-h] [--no_bbox_check]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram output_name\n\nConversion of '.tck', '.trk', '.fib', '.vtk' and 'dpy' files using updated file\nformat standard. TRK file always needs a reference file, a NIFTI, for\nconversion. The FIB file format is in fact a VTK, MITK Diffusion supports it.\n\nFormerly: scil_convert_tractogram.py\n\npositional arguments:\n in_tractogram Tractogram filename. Format must be one of \n trk, tck, vtk, fib, dpy\n output_name Output filename. Format must be one of \n trk, tck, vtk, fib, dpy\n\noptions:\n -h, --help show this help message and exit\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "invalid", + "invalid" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "diffusion", + "diffusion" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_convert_hdf5_to_trk", + "docstring": "Save connections of a hdf5 created with\n>> scil_tractogram_segment_bundles_for_connectivity.py.\n\nUseful for quality control and visual inspections.\n\nIt can either save all connections (default), individual connections specified\nwith --edge_keys or connections from specific nodes specified with --node_keys.\n\nWith the option --save_empty, a label_lists, as a txt file, must be provided.\nThis option saves existing connections and empty connections.\n\nThe output is a directory containing the thousands of connections:\nout_dir/\n |-- LABEL1_LABEL1.trk\n |-- LABEL1_LABEL2.trk\n |-- [...]\n |-- LABEL90_LABEL90.trk\n\nFormerly: scil_save_connections_from_hdf5.py", + "help": "usage: scil_tractogram_convert_hdf5_to_trk.py [-h] [--include_dps]\n [--edge_keys LABEL1_LABEL2 [LABEL1_LABEL2 ...]\n | --node_keys NODE [NODE ...]]\n [--save_empty labels_list]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_hdf5 out_dir\n\nSave connections of a hdf5 created with\n>> scil_tractogram_segment_bundles_for_connectivity.py.\n\nUseful for quality control and visual inspections.\n\nIt can either save all connections (default), individual connections specified\nwith --edge_keys or connections from specific nodes specified with --node_keys.\n\nWith the option --save_empty, a label_lists, as a txt file, must be provided.\nThis option saves existing connections and empty connections.\n\nThe output is a directory containing the thousands of connections:\nout_dir/\n |-- LABEL1_LABEL1.trk\n |-- LABEL1_LABEL2.trk\n |-- [...]\n |-- LABEL90_LABEL90.trk\n\nFormerly: scil_save_connections_from_hdf5.py\n\npositional arguments:\n in_hdf5 HDF5 filename (.h5) containing decomposed connections.\n out_dir Path of the output directory.\n\noptions:\n -h, --help show this help message and exit\n --include_dps Include the data_per_streamline the metadata.\n --edge_keys LABEL1_LABEL2 [LABEL1_LABEL2 ...]\n Keys to identify the edges (connections) of interest.\n --node_keys NODE [NODE ...]\n Node keys to identify the sub-networks of interest.\n Equivalent to adding any --edge_keys node_LABEL2 or LABEL2_node.\n --save_empty labels_list\n Save empty connections. Then, the list of possible connections is \n not found from the hdf5 but inferred from labels_list, a txt file \n containing a list of nodes saved by the decomposition script.\n *If used together with edge_keys or node_keys, the provided nodes must \n exist in labels_list.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n CAREFUL. The whole output directory will be deleted if it exists.\n", + "synonyms": [ + [ + "visual", + "visual" + ], + [ + "network", + "networks", + "networks" + ], + [ + "individual", + "individual" + ], + [ + "connections", + "connections" + ], + [ + "exist", + "exist" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_count_streamlines", + "docstring": "Return the number of streamlines in a tractogram. Only support trk and tck in\norder to support the lazy loading from nibabel.\n\nFormerly: scil_count_streamlines.py", + "help": "usage: scil_tractogram_count_streamlines.py [-h] [--print_count_alone]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n in_tractogram\n\nReturn the number of streamlines in a tractogram. Only support trk and tck in\norder to support the lazy loading from nibabel.\n\nFormerly: scil_count_streamlines.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --print_count_alone If true, prints the result only. \n Else, prints the bundle name and count formatted as a json dict.(default)\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "streamlines", + "streamlines" + ], + [ + "true", + "true" + ], + [ + "level", + "level" + ], + [ + "bundles", + "bundle" + ], + [ + "result", + "result" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_cut_streamlines", + "docstring": "Filters streamlines and only keeps the parts of streamlines within or\nbetween the ROIs. Two options are available.\n\nInput mask:\n\nThe mask has either 1 entity/blob or\n2 entities/blobs (does not support disconnected voxels).\nThe option --biggest_blob can help if you have such a scenario.\n\nThe 1 entity scenario will 'trim' the streamlines so their longest segment is\nwithin the bounding box or a binary mask.\n\nThe 2 entities scenario will cut streamlines so their segment are within the\nbounding box or going from binary mask #1 to binary mask #2.\n\nInput label:\n\nThe label MUST contain 2 labels different from zero.\nLabel values could be anything.\nThe script will cut streamlines going from label 1 to label 2.\n\nBoth inputs and scenarios will erase data_per_point and data_per_streamline.\n\nFormerly: scil_cut_streamlines.py", + "help": "usage: scil_tractogram_cut_streamlines.py [-h] (--mask MASK | --label LABEL)\n [--label_ids LABEL_IDS LABEL_IDS]\n [--resample STEP_SIZE]\n [--biggest_blob]\n [--compress [COMPRESS_TH]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nFilters streamlines and only keeps the parts of streamlines within or\nbetween the ROIs. Two options are available.\n\nInput mask:\n\nThe mask has either 1 entity/blob or\n2 entities/blobs (does not support disconnected voxels).\nThe option --biggest_blob can help if you have such a scenario.\n\nThe 1 entity scenario will 'trim' the streamlines so their longest segment is\nwithin the bounding box or a binary mask.\n\nThe 2 entities scenario will cut streamlines so their segment are within the\nbounding box or going from binary mask #1 to binary mask #2.\n\nInput label:\n\nThe label MUST contain 2 labels different from zero.\nLabel values could be anything.\nThe script will cut streamlines going from label 1 to label 2.\n\nBoth inputs and scenarios will erase data_per_point and data_per_streamline.\n\nFormerly: scil_cut_streamlines.py\n\npositional arguments:\n in_tractogram Input tractogram file.\n out_tractogram Output tractogram file. Note: data_per_point will be discarded, if any!\n\noptions:\n -h, --help show this help message and exit\n --label_ids LABEL_IDS LABEL_IDS\n List of labels indices to use to cut streamlines (2 values).\n --resample STEP_SIZE Resample streamlines to a specific step-size in mm [None].\n --biggest_blob Use the biggest entity and force the 1 ROI scenario.\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.[0.1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMandatory mask options:\n Choose between mask or label input.\n\n --mask MASK Binary mask containing either 1 or 2 blobs.\n --label LABEL Label containing 2 blobs.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "step", + "step" + ], + [ + "voxel", + "voxels" + ], + [ + "binary", + "binary" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_detect_loops", + "docstring": "This script can be used to remove loops in two types of streamline datasets:\n\n - Whole brain: For this type, the script removes streamlines if they\n make a loop with an angle of more than 360 degrees. It's possible to change\n this angle with the --angle option. Warning: Don't use --qb option for a\n whole brain tractography.\n\n - Bundle dataset: For this type, it is possible to remove loops and\n streamlines outside the bundle. For the sharp angle turn, use --qb option.\n\nSee also:\n scil_tractogram_filter_by_anatomy.py\n\nFormerly: scil_detect_streamlines_loops.py", + "help": "usage: scil_tractogram_detect_loops.py [-h]\n [--looping_tractogram out_filename]\n [--qb [threshold]] [--angle ANGLE]\n [--display_counts] [--no_empty]\n [--indent INDENT] [--sort_keys]\n [--processes NBR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script can be used to remove loops in two types of streamline datasets:\n\n - Whole brain: For this type, the script removes streamlines if they\n make a loop with an angle of more than 360 degrees. It's possible to change\n this angle with the --angle option. Warning: Don't use --qb option for a\n whole brain tractography.\n\n - Bundle dataset: For this type, it is possible to remove loops and\n streamlines outside the bundle. For the sharp angle turn, use --qb option.\n\nSee also:\n scil_tractogram_filter_by_anatomy.py\n\nFormerly: scil_detect_streamlines_loops.py\n\npositional arguments:\n in_tractogram Tractogram input file name.\n out_tractogram Output tractogram without loops.\n\noptions:\n -h, --help show this help message and exit\n --looping_tractogram out_filename\n If set, saves detected looping streamlines.\n --qb [threshold] If set, uses QuickBundles to detect outliers (loops, sharp angle \n turns). Given threshold is the maximal streamline to bundle \n distance for a streamline to be considered as a tracking error.\n Default if set: [8.0]\n --angle ANGLE Maximum looping (or turning) angle of\n a streamline in degrees. [360]\n --display_counts Print streamline count before and after filtering\n --no_empty If set, will not save outputs if they are empty.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\nReferences:\n QuickBundles, based on [Garyfallidis12] Frontiers in Neuroscience, 2012.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "processes", + "processes" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "tractography", + "tractography" + ], + [ + "based", + "based" + ], + [ + "considered", + "considered" + ], + [ + "tracking", + "tracking" + ], + [ + "level", + "level" + ], + [ + "brain", + "brain" + ], + [ + "error", + "error" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_dpp_math", + "docstring": "Performs an operation on data per point (dpp) from input streamlines.\n\nAlthough the input data always comes from the dpp, the output can be either\na dpp or a data_per_streamline (dps), depending on the chosen options.\nTwo modes of operation are supported: dpp and dps.\n - In dps mode, the operation is performed on dpp across the dimension of\n the streamlines resulting in a single value (or array in the 4D case)\n per streamline, stored as dps.\n - In dpp mode, the operation is performed on each point separately,\n resulting in a new dpp.\n\nIf endpoints_only and dpp mode is set the operation will only be calculated at\nthe streamline endpoints the rest of the values along the streamline will be\nNaN.\n\nIf endpoints_only and dps mode is set operation will be calculated across the\ndata at the endpoints and stored as a single value (or array in the 4D case)\nper streamline.\n\nEndpoint only operation:\ncorrelation: correlation calculated between arrays extracted from streamline\nendpoints (data must be multivalued per point) and dps mode must be set.", + "help": "usage: scil_tractogram_dpp_math.py [-h] --mode {dpp,dps} --in_dpp_name key\n [key ...] --out_keys key [key ...]\n [--endpoints_only] [--keep_all_dpp_dps]\n [--overwrite_dpp_dps]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--no_bbox_check]\n OPERATION INPUT_FILE OUTPUT_FILE\n\nPerforms an operation on data per point (dpp) from input streamlines.\n\nAlthough the input data always comes from the dpp, the output can be either\na dpp or a data_per_streamline (dps), depending on the chosen options.\nTwo modes of operation are supported: dpp and dps.\n - In dps mode, the operation is performed on dpp across the dimension of\n the streamlines resulting in a single value (or array in the 4D case)\n per streamline, stored as dps.\n - In dpp mode, the operation is performed on each point separately,\n resulting in a new dpp.\n\nIf endpoints_only and dpp mode is set the operation will only be calculated at\nthe streamline endpoints the rest of the values along the streamline will be\nNaN.\n\nIf endpoints_only and dps mode is set operation will be calculated across the\ndata at the endpoints and stored as a single value (or array in the 4D case)\nper streamline.\n\nEndpoint only operation:\ncorrelation: correlation calculated between arrays extracted from streamline\nendpoints (data must be multivalued per point) and dps mode must be set.\n\npositional arguments:\n OPERATION The type of operation to be performed on the \n streamlines. Must be one of the following: \n [mean, sum, min, max, correlation.]\n INPUT_FILE Input tractogram containing streamlines and metadata.\n OUTPUT_FILE The file where the remaining streamlines \n are saved.\n\noptions:\n -h, --help show this help message and exit\n --mode {dpp,dps} Set to dps if the operation is to be performed \n across all dimensions resulting in a single value per \n streamline. Set to dpp if the operation is to be \n performed on each point separately resulting in a \n single value per point.\n --in_dpp_name key [key ...]\n Name or list of names of the data_per_point for \n operation to be performed on. If more than one dpp \n is selected, the same operation will be applied \n separately to each one.\n --out_keys key [key ...]\n Name of the resulting data_per_point or \n data_per_streamline to be saved in the output \n tractogram. If more than one --in_dpp_name was used, \n enter the same number of --out_keys values.\n --endpoints_only If set, will only perform operation on endpoints \n If not set, will perform operation on all streamline \n points.\n --keep_all_dpp_dps If set, previous data_per_point will be preserved \n in the output tractogram. Else, only --out_dpp_name \n keys will be saved.\n --overwrite_dpp_dps If set, if --keep_all_dpp_dps is set and some \n --out_keys keys already existed in your \n data_per_point or data_per_streamline, allow \n overwriting old data_per_point.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "key", + "key" + ], + [ + "invalid", + "invalid" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "supported", + "supported" + ], + [ + "exist", + "existed" + ], + [ + "Data", + "data", + "data" + ], + [ + "applied", + "applied" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_extract_ushape", + "docstring": "This script extracts streamlines depending on their U-shapeness.\nThis script is a replica of Trackvis method.\n\nWhen ufactor is close to:\n* 0 it defines straight streamlines\n* 1 it defines U-fibers\n* -1 it defines S-fibers\n\nFormerly: scil_extract_ushape.py", + "help": "usage: scil_tractogram_extract_ushape.py [-h] [--minU MINU] [--maxU MAXU]\n [--remaining_tractogram filename]\n [--no_empty] [--display_counts]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script extracts streamlines depending on their U-shapeness.\nThis script is a replica of Trackvis method.\n\nWhen ufactor is close to:\n* 0 it defines straight streamlines\n* 1 it defines U-fibers\n* -1 it defines S-fibers\n\nFormerly: scil_extract_ushape.py\n\npositional arguments:\n in_tractogram Tractogram input file name.\n out_tractogram Output tractogram file name.\n\noptions:\n -h, --help show this help message and exit\n --minU MINU Min ufactor value. [0.5]\n --maxU MAXU Max ufactor value. [1.0]\n --remaining_tractogram filename\n If set, saves remaining streamlines.\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "methods", + "method" + ], + [ + "streamline", + "streamline" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_anatomy", + "docstring": "This script filters streamlines in a tractogram according to their geometrical\nproperties (i.e. limiting their length and looping angle) and their anatomical\nending properties (i.e. the anatomical tissue or region their endpoints lie\nin).\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nThe filtering is performed sequentially in four steps, each step processing the\ndata on the output of the previous step:\n\n Step 1 - Remove streamlines below the minimum length and above the\n maximum length. These thresholds must be set with the ``--minL``\n and ``--maxL`` options.\n Step 2 - Ensure that no streamlines end in the cerebrospinal fluid\n according to the provided parcellation. A binary mask can be used\n alternatively through the ``--csf_bin`` option.\n Step 3 - Ensure that no streamlines end in white matter by ensuring that\n they reach the cortical regions according to the provided\n parcellation. The cortical regions of the parcellation can be\n dilated using the ``--ctx_dilation_radius``.\n Step 4 - Remove streamlines if they make a loop with an angle above a\n certain threshold. It's possible to change this angle with the\n ``-a`` option.\n\nLength and loop-based filtering (steps 1 and 2) will not have practical effects\nif no specific thresholds are provided (but will be still executed), since\ndefault values are 0 for the minimum allowed length and infinite for the\nmaximum allowed length and angle.\n\nThe anatomical region endings filtering requires a parcellation or label image\nfile including the cerebrospinal fluid and gray matter (cortical) regions\naccording to the Desikan-Killiany atlas. Intermediate tractograms (results of\neach step and outliers) and volumes can be saved throughout the process.\n\nExample usages:\n\n# Filter length, looping angle and anatomical ending region\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --minL 20 --maxL 200 -a 300\n# Filter only anatomical ending region, with WM dilation and provided csf mask\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --csf_bin csf_bin.nii.gz --ctx_dilation_radius 2\n\nFormerly: scil_filter_streamlines_anatomically.py\n\nNOTE: As of version 2.0.0, the dilation of the cortical mask has changed; from\nan in-house process to scipy's dilation. Results may differ from previous\nversions.", + "help": "usage: scil_tractogram_filter_by_anatomy.py [-h] [--minL MINL] [--maxL MAXL]\n [--angle ANGLE]\n [--csf_bin CSF_BIN]\n [--dilate_ctx value]\n [--save_intermediate_tractograms]\n [--save_volumes] [--save_counts]\n [--save_rejected] [--no_empty]\n [--indent INDENT] [--sort_keys]\n [--processes NBR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram in_wmparc out_path\n\nThis script filters streamlines in a tractogram according to their geometrical\nproperties (i.e. limiting their length and looping angle) and their anatomical\nending properties (i.e. the anatomical tissue or region their endpoints lie\nin).\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nThe filtering is performed sequentially in four steps, each step processing the\ndata on the output of the previous step:\n\n Step 1 - Remove streamlines below the minimum length and above the\n maximum length. These thresholds must be set with the ``--minL``\n and ``--maxL`` options.\n Step 2 - Ensure that no streamlines end in the cerebrospinal fluid\n according to the provided parcellation. A binary mask can be used\n alternatively through the ``--csf_bin`` option.\n Step 3 - Ensure that no streamlines end in white matter by ensuring that\n they reach the cortical regions according to the provided\n parcellation. The cortical regions of the parcellation can be\n dilated using the ``--ctx_dilation_radius``.\n Step 4 - Remove streamlines if they make a loop with an angle above a\n certain threshold. It's possible to change this angle with the\n ``-a`` option.\n\nLength and loop-based filtering (steps 1 and 2) will not have practical effects\nif no specific thresholds are provided (but will be still executed), since\ndefault values are 0 for the minimum allowed length and infinite for the\nmaximum allowed length and angle.\n\nThe anatomical region endings filtering requires a parcellation or label image\nfile including the cerebrospinal fluid and gray matter (cortical) regions\naccording to the Desikan-Killiany atlas. Intermediate tractograms (results of\neach step and outliers) and volumes can be saved throughout the process.\n\nExample usages:\n\n# Filter length, looping angle and anatomical ending region\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --minL 20 --maxL 200 -a 300\n# Filter only anatomical ending region, with WM dilation and provided csf mask\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --csf_bin csf_bin.nii.gz --ctx_dilation_radius 2\n\nFormerly: scil_filter_streamlines_anatomically.py\n\nNOTE: As of version 2.0.0, the dilation of the cortical mask has changed; from\nan in-house process to scipy's dilation. Results may differ from previous\nversions.\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n in_wmparc Path of the white matter parcellation atlas (.nii or .nii.gz)\n out_path Path to the output files.\n\noptions:\n -h, --help show this help message and exit\n --minL MINL Minimum length of streamlines, in mm. [0.0]\n --maxL MAXL Maximum length of streamlines, in mm. [inf]\n --angle ANGLE Maximum looping (or turning) angle of a streamline, \n in degrees. [inf]\n --csf_bin CSF_BIN Allow CSF endings filtering with this binary\n mask instead of using the atlas (.nii or .nii.gz)\n --dilate_ctx value If set, dilate the cortical labels. Value is the dilation \n radius, in voxels (an integer > 0)\n --save_intermediate_tractograms\n Save accepted and discarded streamlines after each step.\n --save_volumes Save volumetric images (e.g. binarised label \n images, etc) in the filtering process.\n --save_counts Save the streamline counts to a file (.json)\n --save_rejected Save rejected streamlines to output tractogram.\n --no_empty Do not write file if there is no streamlines.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\n References:\n [1] J\u00f6rgens, D., Descoteaux, M., Moreno, R., 2021. Challenges for\n tractogram \ufb01ltering. In: \u00d6zarslan, E., Schultz, T., Zhang, E., Fuster,\n A. (Eds.), Anisotropy Across Fields and Scales. Springer. Mathematics\n and Visualization.\n [2] Legarreta, J., Petit, L., Rheault, F., Theaud, G., Lemaire, C.,\n Descoteaux, M., Jodoin, P.M. Filtering in tractography using\n autoencoders (FINTA). Medical Image Analysis. 2021\n \n", + "synonyms": [ + [ + "process", + "process" + ], + [ + "limiting", + "limiting" + ], + [ + "region", + "regions", + "region" + ], + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "white", + "white" + ], + [ + "region", + "regions", + "regions" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "image", + "image" + ], + [ + "volume", + "volumes", + "volumes" + ], + [ + "Data", + "data", + "data" + ], + [ + "processes", + "processes" + ], + [ + "step", + "step" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "voxel", + "voxels" + ], + [ + "atlas", + "atlas" + ], + [ + "threshold", + "thresholds", + "thresholds" + ], + [ + "tractography", + "tractography" + ], + [ + "based", + "based" + ], + [ + "matter", + "matter" + ], + [ + "cortex", + "cortical", + "parietal", + "cortical" + ], + [ + "binary", + "binary" + ], + [ + "anatomical", + "anatomy", + "anatomical" + ], + [ + "processing", + "processing" + ], + [ + "specific", + "specific" + ], + [ + "level", + "level" + ], + [ + "analysis", + "analysis" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_length", + "docstring": "Script to filter streamlines based on their lengths.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Filtering by length is its step1)\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_length.py", + "help": "usage: scil_tractogram_filter_by_length.py [-h] [--minL MINL] [--maxL MAXL]\n [--no_empty] [--display_counts]\n [--save_rejected] [--indent INDENT]\n [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nScript to filter streamlines based on their lengths.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Filtering by length is its step1)\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_length.py\n\npositional arguments:\n in_tractogram Streamlines input file name.\n out_tractogram Streamlines output file name.\n\noptions:\n -h, --help show this help message and exit\n --minL MINL Minimum length of streamlines, in mm. [0.0]\n --maxL MAXL Maximum length of streamlines, in mm. [inf]\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering\n --save_rejected Save rejected streamlines to output tractogram.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "based", + "based" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_orientation", + "docstring": "Script to filter streamlines based on their distance traveled in a specific\ndimension (x, y, or z).\n\nUseful to help differentiate bundles.\n\nExamples: In a brain aligned with x coordinates in left - right axis and y\ncoordinates in anterior-posterior axis, a streamline from the ...\n - corpus callosum will likely travel a very short distance in the y axis.\n - cingulum will likely travel a very short distance in the x axis.\n\nNote: we consider that x, y, z are the coordinates of the streamlines; we\ndo not verify if they are aligned with the brain's orientation.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_orientation.py", + "help": "usage: scil_tractogram_filter_by_orientation.py [-h] [--min_x MIN_X]\n [--max_x MAX_X]\n [--min_y MIN_Y]\n [--max_y MAX_Y]\n [--min_z MIN_Z]\n [--max_z MAX_Z] [--use_abs]\n [--no_empty]\n [--display_counts]\n [--save_rejected filename]\n [--indent INDENT]\n [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_tractogram out_tractogram\n\nScript to filter streamlines based on their distance traveled in a specific\ndimension (x, y, or z).\n\nUseful to help differentiate bundles.\n\nExamples: In a brain aligned with x coordinates in left - right axis and y\ncoordinates in anterior-posterior axis, a streamline from the ...\n - corpus callosum will likely travel a very short distance in the y axis.\n - cingulum will likely travel a very short distance in the x axis.\n\nNote: we consider that x, y, z are the coordinates of the streamlines; we\ndo not verify if they are aligned with the brain's orientation.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_orientation.py\n\npositional arguments:\n in_tractogram Streamlines input file name.\n out_tractogram Streamlines output file name.\n\noptions:\n -h, --help show this help message and exit\n --min_x MIN_X Minimum distance in the first dimension, in mm.[0.0]\n --max_x MAX_X Maximum distance in the first dimension, in mm.[inf]\n --min_y MIN_Y Minimum distance in the second dimension, in mm.[0.0]\n --max_y MAX_Y Maximum distance in the second dimension, in mm.[inf]\n --min_z MIN_Z Minimum distance in the third dimension, in mm.[0.0]\n --max_z MAX_Z Maximum distance in the third dimension, in mm.[inf]\n --use_abs If set, will use the total of distances in absolute value (ex, coming back on yourself will contribute to the total distance instead of cancelling it).\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering.\n --save_rejected filename\n Save the SFT of rejected streamlines.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "anterior", + "posterior", + "anterior" + ], + [ + "anterior", + "posterior", + "posterior" + ], + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "corpus", + "corpus" + ], + [ + "orientation", + "orientation" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "left", + "left" + ], + [ + "based", + "based" + ], + [ + "bundles", + "bundles" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ], + [ + "total", + "total" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_roi", + "docstring": "Filtering of a tractogram based on any combination of conditions involving a\nROI (ex: keep streamlines whose endoints are inside the ROI, exclude\nstreamlines not entirely included in a ROI, etc.)\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Can reject streamlines with endpoints in the WM or the CSF based on\n labels)\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n\nCondition\n---------\nFor every type of ROI, two or three values are expected: MODE CRITERIA DISTANCE\n(DISTANCE is always optional)\n- MODE must be one of these values: ['any', 'all', 'either_end', 'both_ends']\n - any: any part of the streamline must be in the mask\n - all: all parts of the streamline must be in the mask.\n - either_end: at least one end of the streamline must be in the mask.\n - both_ends: both ends of the streamline must be in the mask.\n- CRITERIA must be one of these values: ['include', 'exclude']\n - Include: If condition from MODE is respected, streamline is included.\n - Exlucde: If condition from MODE is respected, streamline is excluded.\n- DISTANCE must be an int and is optional.\n\n\nType of ROI\n-----------\n- Drawn ROI: Directly loaded from a binary file.\n- Atlas ROI: Selected label from an atlas.\n - ID is one or multiple integer values in the atlas. If multiple values,\n ID needs to be between quotes.\n Example: \"1:6 9 10:15\" will use values between 1 and 6 and values\n between 10 and 15 included as well as value 9.\n- BDO: The ROI is the interior of a bounding box.\n- Planes: The ROI is the equivalent of a one-voxel plane.\n * Using mode 'all' with x/y/z plane works but makes very little sense.\n\nNote: `--drawn_roi MASK.nii.gz all include` is equivalent to\n `--drawn_roi INVERSE_MASK.nii.gz any exclude`\n\nFor example, this allows to find out all streamlines entirely in the WM in one\ncommand (without manually inverting the mask first) or to remove any streamline\nstaying in the GM without getting out.\n\nSupports multiple filtering conditions\n--------------------------------------\nMultiple filtering conditions can be used, with varied ROI types if necessary.\nCombining two conditions is equivalent to a logical AND between the conditions.\nOrder of application does not matter for the final result, but may change the\nintermediate files, if any.\n\nDistance management\n-------------------\nDISTANCE is optional, and it should be used carefully with large voxel size\n(e.g > 2.5mm). The value is in voxel for ROIs and in mm for bounding boxes.\nAnisotropic data will affect each direction differently.\n When using --overwrite_distance, any filtering option with given criteria\nwill have its DISTANCE value replaced.\n\nFormerly: scil_filter_tractogram.py", + "help": "usage: scil_tractogram_filter_by_roi.py [-h]\n [--drawn_roi DRAWN_ROI [DRAWN_ROI ...]]\n [--atlas_roi ATLAS_ROI [ATLAS_ROI ...]]\n [--bdo BDO [BDO ...]]\n [--x_plane X_PLANE [X_PLANE ...]]\n [--y_plane Y_PLANE [Y_PLANE ...]]\n [--z_plane Z_PLANE [Z_PLANE ...]]\n [--filtering_list FILTERING_LIST]\n [--overwrite_distance OVERWRITE_DISTANCE [OVERWRITE_DISTANCE ...]]\n [--save_masks_atlas_roi SAVE_MASKS_ATLAS_ROI]\n [--no_empty] [--display_counts]\n [--save_rejected FILENAME]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nFiltering of a tractogram based on any combination of conditions involving a\nROI (ex: keep streamlines whose endoints are inside the ROI, exclude\nstreamlines not entirely included in a ROI, etc.)\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Can reject streamlines with endpoints in the WM or the CSF based on\n labels)\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n\nCondition\n---------\nFor every type of ROI, two or three values are expected: MODE CRITERIA DISTANCE\n(DISTANCE is always optional)\n- MODE must be one of these values: ['any', 'all', 'either_end', 'both_ends']\n - any: any part of the streamline must be in the mask\n - all: all parts of the streamline must be in the mask.\n - either_end: at least one end of the streamline must be in the mask.\n - both_ends: both ends of the streamline must be in the mask.\n- CRITERIA must be one of these values: ['include', 'exclude']\n - Include: If condition from MODE is respected, streamline is included.\n - Exlucde: If condition from MODE is respected, streamline is excluded.\n- DISTANCE must be an int and is optional.\n\nType of ROI\n-----------\n- Drawn ROI: Directly loaded from a binary file.\n- Atlas ROI: Selected label from an atlas.\n - ID is one or multiple integer values in the atlas. If multiple values,\n ID needs to be between quotes.\n Example: \"1:6 9 10:15\" will use values between 1 and 6 and values\n between 10 and 15 included as well as value 9.\n- BDO: The ROI is the interior of a bounding box.\n- Planes: The ROI is the equivalent of a one-voxel plane.\n * Using mode 'all' with x/y/z plane works but makes very little sense.\n\nNote: `--drawn_roi MASK.nii.gz all include` is equivalent to\n `--drawn_roi INVERSE_MASK.nii.gz any exclude`\n\nFor example, this allows to find out all streamlines entirely in the WM in one\ncommand (without manually inverting the mask first) or to remove any streamline\nstaying in the GM without getting out.\n\nSupports multiple filtering conditions\n--------------------------------------\nMultiple filtering conditions can be used, with varied ROI types if necessary.\nCombining two conditions is equivalent to a logical AND between the conditions.\nOrder of application does not matter for the final result, but may change the\nintermediate files, if any.\n\nDistance management\n-------------------\nDISTANCE is optional, and it should be used carefully with large voxel size\n(e.g > 2.5mm). The value is in voxel for ROIs and in mm for bounding boxes.\nAnisotropic data will affect each direction differently.\n When using --overwrite_distance, any filtering option with given criteria\nwill have its DISTANCE value replaced.\n\nFormerly: scil_filter_tractogram.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n out_tractogram Path of the output tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --drawn_roi DRAWN_ROI [DRAWN_ROI ...]\n ROI_NAME MODE CRITERIA DISTANCE (distance in voxel is optional)\n Filename of a hand drawn ROI (.nii or .nii.gz).\n --atlas_roi ATLAS_ROI [ATLAS_ROI ...]\n ATLAS_NAME ID MODE CRITERIA DISTANCE (distance in voxel is optional)\n Filename of an atlas (.nii or .nii.gz).\n --bdo BDO [BDO ...] BDO_NAME MODE CRITERIA DISTANCE (distance in mm is optional)\n Filename of a bounding box (bdo) file from MI-Brain.\n --x_plane X_PLANE [X_PLANE ...]\n PLANE MODE CRITERIA DISTANCE (distance in voxel is optional)\n Slice number in X, in voxel space.\n --y_plane Y_PLANE [Y_PLANE ...]\n PLANE MODE CRITERIA DISTANCE (distance in voxel is optional)\n Slice number in Y, in voxel space.\n --z_plane Z_PLANE [Z_PLANE ...]\n PLANE MODE CRITERIA DISTANCE (distance in voxel is optional)\n Slice number in Z, in voxel space.\n --filtering_list FILTERING_LIST\n Text file containing one rule per line\n (i.e. drawn_roi mask.nii.gz both_ends include 1).\n --overwrite_distance OVERWRITE_DISTANCE [OVERWRITE_DISTANCE ...]\n MODE CRITERIA DISTANCE (distance in voxel for ROIs and in mm for bounding box).\n If set, it will overwrite the distance associated to a specific mode/criteria.\n --save_masks_atlas_roi SAVE_MASKS_ATLAS_ROI\n If set, will save the atlas roi masks. The value to provide is the \n prefix, ex: my_path/atlas_roi_. Whole filename will be \n my_path/atlas_roi_{id}.nii.gz\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering\n --save_rejected FILENAME\n Save rejected streamlines to output tractogram.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "streamlines", + "streamlines" + ], + [ + "planes", + "plane" + ], + [ + "streamline", + "streamline" + ], + [ + "direction", + "direction" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "planes", + "planes" + ], + [ + "space", + "space" + ], + [ + "Data", + "data", + "data" + ], + [ + "application", + "application" + ], + [ + "conditions", + "conditions" + ], + [ + "large", + "large" + ], + [ + "atlas", + "atlas" + ], + [ + "result", + "result" + ], + [ + "based", + "based" + ], + [ + "matter", + "matter" + ], + [ + "binary", + "binary" + ], + [ + "voxel", + "voxel" + ], + [ + "specific", + "specific" + ], + [ + "level", + "level" + ], + [ + "brain", + "brain" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_fix_trk", + "docstring": "This script is made to fix DSI-Studio or Startrack TRK file\n(unknown space/convention) to make it compatible with TrackVis,\nMI-Brain, Dipy Horizon (Stateful Tractogram).\n\nDSI-Studio\n==========\n\nThe script either make it match with an anatomy from DSI-Studio (AC-PC aligned,\nsometimes flipped) or if --in_native_fa is provided it moves it back to native\nDWI space (this involved registration).\n\nSince DSI-Studio sometimes leaves some skull around the brain, the --auto_crop\naims to stabilize registration. If this option fails, manually BET both FA.\nRegistration is more robust at resolution above 2mm (iso), be careful.\n\nIf you are fixing bundles, use this script once with --save_transfo and verify\nresults. Once satisfied, call the scripts on bundles using a bash for loop with\n--load_transfo to save computation.\n\nWe recommand the --cut_invalid to remove invalid points of streamlines rather\nremoving entire streamlines.\n\nThis script was tested on various datasets and worked on all of them. However,\nalways verify the results and if a specific case does not work. Open an issue\non the Scilpy GitHub repository.\n\nStartrack\n==========\n\nThe script will create a new stateful tractogram using the reference in\norder to fix the missing information in the header of the trk.\n\n\nWARNING: This script is still experimental, DSI-Studio and Startrack\nevolve quickly and results may vary depending on the data itself\nas well as DSI-studio/Startrack version.\n\nFormerly: scil_fix_dsi_studio_trk.py", + "help": "usage: scil_tractogram_fix_trk.py [-h] [--software string]\n [--cut_invalid | --remove_invalid]\n [--in_dsi_fa IN_DSI_FA]\n [--in_native_fa IN_NATIVE_FA] [--auto_crop]\n [--save_transfo FILE | --load_transfo FILE]\n [--reference REFERENCE] [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script is made to fix DSI-Studio or Startrack TRK file\n(unknown space/convention) to make it compatible with TrackVis,\nMI-Brain, Dipy Horizon (Stateful Tractogram).\n\nDSI-Studio\n==========\n\nThe script either make it match with an anatomy from DSI-Studio (AC-PC aligned,\nsometimes flipped) or if --in_native_fa is provided it moves it back to native\nDWI space (this involved registration).\n\nSince DSI-Studio sometimes leaves some skull around the brain, the --auto_crop\naims to stabilize registration. If this option fails, manually BET both FA.\nRegistration is more robust at resolution above 2mm (iso), be careful.\n\nIf you are fixing bundles, use this script once with --save_transfo and verify\nresults. Once satisfied, call the scripts on bundles using a bash for loop with\n--load_transfo to save computation.\n\nWe recommand the --cut_invalid to remove invalid points of streamlines rather\nremoving entire streamlines.\n\nThis script was tested on various datasets and worked on all of them. However,\nalways verify the results and if a specific case does not work. Open an issue\non the Scilpy GitHub repository.\n\nStartrack\n==========\n\nThe script will create a new stateful tractogram using the reference in\norder to fix the missing information in the header of the trk.\n\nWARNING: This script is still experimental, DSI-Studio and Startrack\nevolve quickly and results may vary depending on the data itself\nas well as DSI-studio/Startrack version.\n\nFormerly: scil_fix_dsi_studio_trk.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file from DSI studio (.trk).\n out_tractogram Path of the output tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --software string Software used to create in_tractogram.\n Choices: ['dsi_studio', 'startrack']\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_invalid Remove the streamlines landing out of the bounding box.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nDSI options:\n --in_dsi_fa IN_DSI_FA\n Path of the input FA from DSI Studio (.nii.gz).\n --in_native_fa IN_NATIVE_FA\n Path of the input FA from Dipy/MRtrix (.nii.gz).\n Move the tractogram back to a \"proper\" space, include registration.\n --auto_crop If both FA are not already BET, perform registration \n using a centered-cube crop to ignore the skull.\n A good BET for both is more robust.\n --save_transfo FILE Save estimated transformation to avoid recomputing (.txt).\n --load_transfo FILE Load estimated transformation to apply to other files (.txt).\n\nStarTrack options:\n --reference REFERENCE\n Reference anatomy (.nii or .nii.gz).\n", + "synonyms": [ + [ + "order", + "order" + ], + [ + "variety", + "various" + ], + [ + "streamlines", + "streamlines" + ], + [ + "invalid", + "invalid" + ], + [ + "create", + "create" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "work", + "work" + ], + [ + "unknown", + "unknown" + ], + [ + "applied", + "apply" + ], + [ + "space", + "space" + ], + [ + "involved", + "involved" + ], + [ + "Data", + "data", + "data" + ], + [ + "working", + "worked" + ], + [ + "bundles", + "bundles" + ], + [ + "specific", + "specific" + ], + [ + "level", + "level" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_flip", + "docstring": "Flip streamlines locally around specific axes.\n\nIMPORTANT: this script should only be used in case of absolute necessity.\nIt's better to fix the real tools than to force flipping streamlines to\nhave them fit in the tools.\n\nFormerly: scil_flip_streamlines.py", + "help": "usage: scil_tractogram_flip.py [-h] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram {x,y,z}\n [{x,y,z} ...]\n\nFlip streamlines locally around specific axes.\n\nIMPORTANT: this script should only be used in case of absolute necessity.\nIt's better to fix the real tools than to force flipping streamlines to\nhave them fit in the tools.\n\nFormerly: scil_flip_streamlines.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n out_tractogram Path of the output tractogram file.\n {x,y,z} The axes you want to flip. eg: to flip the x and y axes use: x y.\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "tool", + "tools" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_math", + "docstring": "Performs an operation on a list of streamline files. The supported\noperations are:\n\ndifference: Keep the streamlines from the first file that are not in\n any of the following files.\n\nintersection: Keep the streamlines that are present in all files.\n\nunion: Keep all streamlines while removing duplicates.\n\nconcatenate: Keep all streamlines with duplicates.\n\nlazy_concatenate: Keep all streamlines with duplicates, never load the whole\n tractograms in memory. Only works with trk/tck file,\n metadata will be lost and invalid streamlines are kept.\n\nIf a file 'duplicate.trk' have identical streamlines, calling the script using\nthe difference/intersection/union with a single input will remove these\nduplicated streamlines.\n\nTo allow a soft match, use the --precision option to increase the allowed\nthreshold for similarity. A precision of 1 represents 10**(-1), so a\nmaximum distance of 0.1mm is allowed. If the streamlines are identical, the\ndefault value of 3 (or 0.001mm distance) should work.\n\nIf there is a 0.5mm shift, use a precision of 0 (or 1mm distance) and the\n--robust option. Should make it work, but slightly slower. Will merge all\nstreamlines similar when rounded to that precision level.\n\nThe metadata (data per point, data per streamline) of the streamlines that\nare kept in the output will be preserved. This requires that all input files\nshare the same type of metadata. If this is not the case, use the option\n--no_metadata to strip the metadata from the output. Or --fake_metadata to\ninitialize dummy metadata in the file missing them.\n\nFormerly: scil_streamlines_math.py", + "help": "usage: scil_tractogram_math.py [-h] [--precision NBR_OF_DECIMALS] [--robust]\n [--no_metadata] [--fake_metadata]\n [--save_indices OUT_INDEX_FILE] [--save_empty]\n [--no_bbox_check] [--indent INDENT]\n [--sort_keys] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n OPERATION INPUT_FILES [INPUT_FILES ...]\n OUTPUT_FILE\n\nPerforms an operation on a list of streamline files. The supported\noperations are:\n\ndifference: Keep the streamlines from the first file that are not in\n any of the following files.\n\nintersection: Keep the streamlines that are present in all files.\n\nunion: Keep all streamlines while removing duplicates.\n\nconcatenate: Keep all streamlines with duplicates.\n\nlazy_concatenate: Keep all streamlines with duplicates, never load the whole\n tractograms in memory. Only works with trk/tck file,\n metadata will be lost and invalid streamlines are kept.\n\nIf a file 'duplicate.trk' have identical streamlines, calling the script using\nthe difference/intersection/union with a single input will remove these\nduplicated streamlines.\n\nTo allow a soft match, use the --precision option to increase the allowed\nthreshold for similarity. A precision of 1 represents 10**(-1), so a\nmaximum distance of 0.1mm is allowed. If the streamlines are identical, the\ndefault value of 3 (or 0.001mm distance) should work.\n\nIf there is a 0.5mm shift, use a precision of 0 (or 1mm distance) and the\n--robust option. Should make it work, but slightly slower. Will merge all\nstreamlines similar when rounded to that precision level.\n\nThe metadata (data per point, data per streamline) of the streamlines that\nare kept in the output will be preserved. This requires that all input files\nshare the same type of metadata. If this is not the case, use the option\n--no_metadata to strip the metadata from the output. Or --fake_metadata to\ninitialize dummy metadata in the file missing them.\n\nFormerly: scil_streamlines_math.py\n\npositional arguments:\n OPERATION The type of operation to be performed on the streamlines. Must\n be one of the following: difference, intersection, union, concatenate, lazy_concatenate.\n INPUT_FILES The list of files that contain the streamlines to operate on.\n OUTPUT_FILE The file where the remaining streamlines are saved.\n\noptions:\n -h, --help show this help message and exit\n --precision NBR_OF_DECIMALS, -p NBR_OF_DECIMALS\n Precision used to compare streamlines [4].\n --robust, -r Use version robust to small translation/rotation.\n --no_metadata, -n Strip the streamline metadata from the output.\n --fake_metadata Skip the metadata verification, create fake metadata if missing, can lead to unexpected behavior.\n --save_indices OUT_INDEX_FILE, -s OUT_INDEX_FILE\n Save the streamline indices to the supplied json file.\n --save_empty If set, we will save all results, even if tractogram if empty.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "precision", + "precision" + ], + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "increase", + "increase" + ], + [ + "similarity", + "similarity" + ], + [ + "invalid", + "invalid" + ], + [ + "create", + "create" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "supported", + "supported" + ], + [ + "work", + "work" + ], + [ + "Data", + "data", + "data" + ], + [ + "large", + "small" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "memory", + "memory" + ], + [ + "level", + "level" + ], + [ + "difference", + "difference" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_pairwise_comparison", + "docstring": "This script is designed to compare and help visualize differences between two\ntractograms. This can be especially useful in studies where multiple\ntractograms from different algorithms or parameters need to be compared.\n\nA similar script (scil_bundle_pairwise_comparison.py) is available for bundles,\nwith metrics more adapted to bundles (and spatial agreement).\n\nThe difference is computed in terms of\n- A voxel-wise spatial distance between streamlines crossing each voxel.\n This can help to see if both tractography reconstructions at each voxel\n look similar (out_diff.nii.gz)\n- An angular correlation (ACC) between streamline orientation from TODI.\n This compares the local orientation of streamlines at each voxel\n (out_acc.nii.gz)\n- A patch-wise correlation between streamline density maps from both\n tractograms. This compares where the high/low density regions agree or not\n (out_corr.nii.gz)\n- A heatmap combining all the previous metrics using a harmonic means of the\n normalized metrics to summarize general agreement (out_heatmap.nii.gz)", + "help": "usage: scil_tractogram_pairwise_comparison.py [-h] [--out_dir OUT_DIR]\n [--out_prefix OUT_PREFIX]\n [--in_mask IN_FILE]\n [--skip_streamlines_distance]\n [--processes NBR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram_1 in_tractogram_2\n\nThis script is designed to compare and help visualize differences between two\ntractograms. This can be especially useful in studies where multiple\ntractograms from different algorithms or parameters need to be compared.\n\nA similar script (scil_bundle_pairwise_comparison.py) is available for bundles,\nwith metrics more adapted to bundles (and spatial agreement).\n\nThe difference is computed in terms of\n- A voxel-wise spatial distance between streamlines crossing each voxel.\n This can help to see if both tractography reconstructions at each voxel\n look similar (out_diff.nii.gz)\n- An angular correlation (ACC) between streamline orientation from TODI.\n This compares the local orientation of streamlines at each voxel\n (out_acc.nii.gz)\n- A patch-wise correlation between streamline density maps from both\n tractograms. This compares where the high/low density regions agree or not\n (out_corr.nii.gz)\n- A heatmap combining all the previous metrics using a harmonic means of the\n normalized metrics to summarize general agreement (out_heatmap.nii.gz)\n\npositional arguments:\n in_tractogram_1 Input tractogram 1.\n in_tractogram_2 Input tractogram 2.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Directory where all output files will be saved.\n If not specified, outputs will be saved in the current directory.\n --out_prefix OUT_PREFIX\n Prefix for output files. Useful for distinguishing between different runs [out].\n --in_mask IN_FILE Optional input mask.\n --skip_streamlines_distance\n Skip computation of the spatial distance between streamlines. Slowest part of the computation.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "spatial", + "spatial" + ], + [ + "streamlines", + "streamlines" + ], + [ + "studies", + "study", + "studies" + ], + [ + "streamline", + "streamline" + ], + [ + "differences", + "differences" + ], + [ + "orientation", + "orientation" + ], + [ + "region", + "regions", + "regions" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "high", + "low" + ], + [ + "high", + "high" + ], + [ + "maps", + "maps" + ], + [ + "processes", + "processes" + ], + [ + "algorithm", + "algorithms" + ], + [ + "tractography", + "tractography" + ], + [ + "bundles", + "bundles" + ], + [ + "general", + "general" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "parameters", + "parameters" + ], + [ + "difference", + "difference" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_print_info", + "docstring": "Prints information on a loaded tractogram: number of streamlines, and\nmean / min / max / std of\n - length in number of points\n - length in mm\n - step size.\n\nFor trk files: also prints the data_per_point and data_per_streamline keys.\n\nSee also:\n - scil_header_print_info.py to see the header, affine, volume dimension.\n - scil_bundle_shape_measures.py to see bundle-specific information.", + "help": "usage: scil_tractogram_print_info.py [-h] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [--indent INDENT] [--sort_keys]\n in_tractogram\n\nPrints information on a loaded tractogram: number of streamlines, and\nmean / min / max / std of\n - length in number of points\n - length in mm\n - step size.\n\nFor trk files: also prints the data_per_point and data_per_streamline keys.\n\nSee also:\n - scil_header_print_info.py to see the header, affine, volume dimension.\n - scil_bundle_shape_measures.py to see bundle-specific information.\n\npositional arguments:\n in_tractogram Tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "step", + "step" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_project_map_to_streamlines", + "docstring": "Projects maps extracted from a map onto the points of streamlines.\n\nThe default options will take data from a nifti image (3D or 4D) and project it\nonto the points of streamlines. If the image is 4D, the data is stored as a\nlist of 1D arrays per streamline. If the image is 3D, the data is stored as a\nlist of values per streamline.\n\nSee also scil_tractogram_project_streamlines_to_map.py for the reverse action.\n\n* Note that the data from your maps will be projected only on the coordinates\nof the points of your streamlines. Data underlying the whole segments between\ntwo consecutive points is not used. If your streamlines are strongly\ncompressed, or if they have a very big step size, the result will possibly\nreflect poorly your map. You may use scil_tractogram_resample.py to upsample\nyour streamlines first.\n\n* Hint: The streamlines themselves are not modified here, only their dpp. To\navoid multiplying data on disk, you could use the following arguments to save\nthe new dpp in your current tractogram:\n>> scil_tractogram_project_map_to_streamlines.py $in_bundle $in_bundle\n --keep_all_dpp -f", + "help": "usage: scil_tractogram_project_map_to_streamlines.py [-h] --in_maps IN_MAPS\n [IN_MAPS ...]\n --out_dpp_name\n OUT_DPP_NAME\n [OUT_DPP_NAME ...]\n [--trilinear]\n [--endpoints_only]\n [--keep_all_dpp]\n [--overwrite_dpp]\n [--reference REFERENCE]\n [-f]\n [-v [{DEBUG,INFO,WARNING}]]\n in_tractogram\n out_tractogram\n\nProjects maps extracted from a map onto the points of streamlines.\n\nThe default options will take data from a nifti image (3D or 4D) and project it\nonto the points of streamlines. If the image is 4D, the data is stored as a\nlist of 1D arrays per streamline. If the image is 3D, the data is stored as a\nlist of values per streamline.\n\nSee also scil_tractogram_project_streamlines_to_map.py for the reverse action.\n\n* Note that the data from your maps will be projected only on the coordinates\nof the points of your streamlines. Data underlying the whole segments between\ntwo consecutive points is not used. If your streamlines are strongly\ncompressed, or if they have a very big step size, the result will possibly\nreflect poorly your map. You may use scil_tractogram_resample.py to upsample\nyour streamlines first.\n\n* Hint: The streamlines themselves are not modified here, only their dpp. To\navoid multiplying data on disk, you could use the following arguments to save\nthe new dpp in your current tractogram:\n>> scil_tractogram_project_map_to_streamlines.py $in_bundle $in_bundle\n --keep_all_dpp -f\n\npositional arguments:\n in_tractogram Fiber bundle file.\n out_tractogram Output file.\n\noptions:\n -h, --help show this help message and exit\n --in_maps IN_MAPS [IN_MAPS ...]\n Nifti map to project onto streamlines.\n --out_dpp_name OUT_DPP_NAME [OUT_DPP_NAME ...]\n Name of the data_per_point to be saved in the \n output tractogram.\n --trilinear If set, will use trilinear interpolation \n else will use nearest neighbor interpolation \n by default.\n --endpoints_only If set, will only project the map onto the \n endpoints of the streamlines (all other values along \n streamlines will be NaN). If not set, will project \n the map onto all points of the streamlines.\n --keep_all_dpp If set, previous data_per_point will be preserved \n in the output tractogram. Else, only --out_dpp_name \n keys will be saved.\n --overwrite_dpp If set, if --keep_all_dpp is set and some \n --out_dpp_name keys already existed in your \n data_per_point, allow overwriting old data_per_point.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -f Force overwriting of the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "maps", + "map" + ], + [ + "action", + "action" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "image", + "image" + ], + [ + "exist", + "existed" + ], + [ + "maps", + "maps" + ], + [ + "project", + "project" + ], + [ + "Data", + "data", + "data" + ], + [ + "step", + "step" + ], + [ + "result", + "result" + ], + [ + "project", + "projects" + ], + [ + "level", + "level" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_project_streamlines_to_map", + "docstring": "Projects metrics onto the underlying voxels of a streamlines. This script can\nproject data from data_per_point (dpp) or data_per_streamline (dps) to maps.\n\nYou choose to project data from all points of the streamlines, or from the\nendpoints only. The idea then is to visualize the cortical areas affected by\nmetrics (assuming streamlines start/end in the cortex).\n\nSee also scil_tractogram_project_map_to_streamlines.py for the reverse action.\n\nHow to the data is loaded:\n - From dps: uses the same value for each point of the streamline.\n - From dpp: one value per point.\n\nHow the data is used:\n 1. Average all points of the streamline to get a mean value, set this value\n to all points.\n 2. Average the two endpoints and get their mean value, set this value to\n all points.\n 3. Keep each point individually.\n\nHow the data is projected to a map:\n A. Using each point.\n B. Using the endpoints only.\n\nFor more complex operations than the average per streamline, see\nscil_tractogram_dpp_math.py.", + "help": "usage: scil_tractogram_project_streamlines_to_map.py [-h]\n (--use_dps key [key ...] | --use_dpp key [key ...] | --load_dps file [file ...] | --load_dpp file [file ...])\n (--mean_endpoints | --mean_streamline | --point_by_point)\n (--to_endpoints | --to_wm)\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bundle out_prefix\n\nProjects metrics onto the underlying voxels of a streamlines. This script can\nproject data from data_per_point (dpp) or data_per_streamline (dps) to maps.\n\nYou choose to project data from all points of the streamlines, or from the\nendpoints only. The idea then is to visualize the cortical areas affected by\nmetrics (assuming streamlines start/end in the cortex).\n\nSee also scil_tractogram_project_map_to_streamlines.py for the reverse action.\n\nHow to the data is loaded:\n - From dps: uses the same value for each point of the streamline.\n - From dpp: one value per point.\n\nHow the data is used:\n 1. Average all points of the streamline to get a mean value, set this value\n to all points.\n 2. Average the two endpoints and get their mean value, set this value to\n all points.\n 3. Keep each point individually.\n\nHow the data is projected to a map:\n A. Using each point.\n B. Using the endpoints only.\n\nFor more complex operations than the average per streamline, see\nscil_tractogram_dpp_math.py.\n\npositional arguments:\n in_bundle Fiber bundle file.\n out_prefix Folder + prefix to save endpoints metric(s). We will save \n one nifti file per per dpp/dps key given.\n Ex: my_path/subjX_bundleY_ with --use_dpp key1 will output \n my_path/subjX_bundleY_key1.nii.gz\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nWhere to get the statistics from. (Choose one):\n --use_dps key [key ...]\n Use the data_per_streamline from the tractogram.\n It must be a .trk\n --use_dpp key [key ...]\n Use the data_per_point from the tractogram. \n It must be a trk.\n --load_dps file [file ...]\n Load data per streamline (scalar) .txt or .npy.\n Must load an array with the right shape.\n --load_dpp file [file ...]\n Load data per point (scalar) from .txt or .npy.\n Must load an array with the right shape.\n\nProcessing choices. (Choose one):\n --mean_endpoints Uses one single value per streamline: the mean of the two \n endpoints.\n --mean_streamline Use one single value per streamline: the mean of all \n points of the streamline.\n --point_by_point Directly project the streamlines values onto the map.\n\nWhere to send the statistics. (Choose one):\n --to_endpoints Project metrics onto a mask of the endpoints.\n --to_wm Project metrics into streamlines coverage.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "areas", + "areas" + ], + [ + "streamline", + "streamline" + ], + [ + "maps", + "map" + ], + [ + "key", + "key" + ], + [ + "action", + "action" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "cortex", + "cortical", + "cortex" + ], + [ + "maps", + "maps" + ], + [ + "project", + "project" + ], + [ + "Data", + "data", + "data" + ], + [ + "voxel", + "voxels" + ], + [ + "shape", + "shape" + ], + [ + "complex", + "complex" + ], + [ + "cortex", + "cortical", + "parietal", + "cortical" + ], + [ + "project", + "projects" + ], + [ + "average", + "average" + ], + [ + "processing", + "processing" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_qbx", + "docstring": "Compute clusters using QuickBundlesX and save them separately.\nWe cannot know the number of clusters in advance.\n\nQuickbundles:\nGaryfallidis, E. et al. (2012). Quickbundles, a method for tractography\nsimplification. Frontiers in neuroscience, 6, 175.\n\nQuickbundlesX:\nGaryfallidis, E. et al. (2016) QuickBundlesX: sequential clustering of millions\nof streamlines in multiple levels of detail at record execution time. 24th\nInternational Society of Magnetic Resonance in Medicine (ISMRM).\n\n\"QuickBundlesX shows a remarkable 20+X speedup over its predecessor\"\n\nFormerly: scil_compute_qbx.py", + "help": "usage: scil_tractogram_qbx.py [-h] [--nb_points NB_POINTS]\n [--out_centroids OUT_CENTROIDS]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram dist_thresh out_clusters_dir\n\nCompute clusters using QuickBundlesX and save them separately.\nWe cannot know the number of clusters in advance.\n\nQuickbundles:\nGaryfallidis, E. et al. (2012). Quickbundles, a method for tractography\nsimplification. Frontiers in neuroscience, 6, 175.\n\nQuickbundlesX:\nGaryfallidis, E. et al. (2016) QuickBundlesX: sequential clustering of millions\nof streamlines in multiple levels of detail at record execution time. 24th\nInternational Society of Magnetic Resonance in Medicine (ISMRM).\n\n\"QuickBundlesX shows a remarkable 20+X speedup over its predecessor\"\n\nFormerly: scil_compute_qbx.py\n\npositional arguments:\n in_tractogram Tractogram filename.\n Path of the input tractogram or bundle.\n dist_thresh Last QuickBundlesX threshold in mm. Typically \n the value are between 10-20mm.\n out_clusters_dir Path where to save the clusters directory.\n\noptions:\n -h, --help show this help message and exit\n --nb_points NB_POINTS\n Streamlines will be resampled to have this number of points [20].\n --out_centroids OUT_CENTROIDS\n Output tractogram filename.\n Format must be readable by the Nibabel API.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "methods", + "method" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "level", + "levels" + ], + [ + "bundles", + "bundle" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "tractography", + "tractography" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_register", + "docstring": "Generate a linear transformation matrix from the registration of 2 tractograms.\nTypically, this script is run before scil_tractogram_apply_transform.py.\n\nFor more information on how to use the various registration scripts, see the\ndoc at:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_register_tractogram.py", + "help": "usage: scil_tractogram_register.py [-h] [--out_name OUT_NAME] [--only_rigid]\n [--moving_tractogram_ref MOVING_TRACTOGRAM_REF]\n [--static_tractogram_ref STATIC_TRACTOGRAM_REF]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n moving_tractogram static_tractogram\n\nGenerate a linear transformation matrix from the registration of 2 tractograms.\nTypically, this script is run before scil_tractogram_apply_transform.py.\n\nFor more information on how to use the various registration scripts, see the\ndoc at:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_register_tractogram.py\n\npositional arguments:\n moving_tractogram Path of the moving tractogram.\n static_tractogram Path of the target tractogram.\n\noptions:\n -h, --help show this help message and exit\n --out_name OUT_NAME Filename of the transformation matrix. \n The registration type will be appended as a suffix,\n [_.txt]. \n Default: [transformation.txt]\n --only_rigid If set, will only use a rigid transformation (uses affine by default).\n --moving_tractogram_ref MOVING_TRACTOGRAM_REF\n Reference anatomy for moving_tractogram (if tck/vtk/fib/dpy) file\n support (.nii or .nii.gz).\n --static_tractogram_ref STATIC_TRACTOGRAM_REF\n Reference anatomy for static_tractogram (if tck/vtk/fib/dpy) file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] E. Garyfallidis, O. Ocegueda, D. Wassermann, M. Descoteaux\nRobust and efficient linear registration of white-matter fascicles in the\nspace of streamlines, NeuroImage, Volume 117, 15 August 2015, Pages 124-140\n(http://www.sciencedirect.com/science/article/pii/S1053811915003961)\n", + "synonyms": [ + [ + "variety", + "various" + ], + [ + "streamlines", + "streamlines" + ], + [ + "white", + "white" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "space", + "space" + ], + [ + "matter", + "matter" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_remove_invalid", + "docstring": "Removal of streamlines that are out of the volume bounding box. In voxel space\nno negative coordinate and no above volume dimension coordinate are possible.\nAny streamline that do not respect these two conditions are removed.\n\nThe --cut_invalid option will cut streamlines so that their longest segment are\nwithin the bounding box\n\nFormerly: scil_remove_invalid_streamlines.py", + "help": "usage: scil_tractogram_remove_invalid.py [-h] [--cut_invalid]\n [--remove_single_point]\n [--remove_overlapping_points]\n [--threshold THRESHOLD] [--no_empty]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nRemoval of streamlines that are out of the volume bounding box. In voxel space\nno negative coordinate and no above volume dimension coordinate are possible.\nAny streamline that do not respect these two conditions are removed.\n\nThe --cut_invalid option will cut streamlines so that their longest segment are\nwithin the bounding box\n\nFormerly: scil_remove_invalid_streamlines.py\n\npositional arguments:\n in_tractogram Tractogram filename. Format must be one of \n trk, tck, vtk, fib, dpy.\n out_tractogram Output filename. Format must be one of \n trk, tck, vtk, fib, dpy.\n\noptions:\n -h, --help show this help message and exit\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_single_point\n Consider single point streamlines invalid.\n --remove_overlapping_points\n Consider streamlines with overlapping points invalid.\n --threshold THRESHOLD\n Maximum distance between two points to be considered overlapping [0.001 mm].\n --no_empty Do not save empty tractogram.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "invalid", + "invalid" + ], + [ + "positive", + "negative" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "space", + "space" + ], + [ + "conditions", + "conditions" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "considered", + "considered" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_resample", + "docstring": "Script to resample a tractogram to a set number of streamlines.\nDefault behavior:\n- IF number of requested streamlines is lower than streamline count: DOWNSAMPLE\n- IF number of requested streamlines is higher than streamline count: UPSAMPLE\nTo prevent upsample if not desired use --never_upsample.\n\nCan be useful to build training sets for machine learning algorithms, to\nupsample under-represented bundles or downsample over-represented bundles.\n\nWorks by either selecting a subset of streamlines or by generating new\nstreamlines by adding gaussian noise to existing ones.\n\nUpsampling:\n Includes smoothing to compensate for the noisiness of new streamlines\n generated by the process.\nDownsampling:\n Includes the possibility of choosing randomly *per Quickbundle cluster* to\n ensure that all clusters are represented in the final tractogram.\n\nExample usage:\n$ scil_tractogram_resample.py input.trk 1000 output.trk --point_wise_std 0.5 --gaussian 5 --keep_invalid_streamlines\n$ scil_visualize_bundles.py output.trk --local_coloring --width=0.1", + "help": "usage: scil_tractogram_resample.py [-h] [--never_upsample]\n [--point_wise_std POINT_WISE_STD]\n [--tube_radius TUBE_RADIUS]\n [--gaussian SIGMA] [-e ERROR_RATE]\n [--keep_invalid_streamlines]\n [--downsample_per_cluster]\n [--qbx_thresholds QBX_THRESHOLDS [QBX_THRESHOLDS ...]]\n [--seed SEED] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram nb_streamlines out_tractogram\n\nScript to resample a tractogram to a set number of streamlines.\nDefault behavior:\n- IF number of requested streamlines is lower than streamline count: DOWNSAMPLE\n- IF number of requested streamlines is higher than streamline count: UPSAMPLE\nTo prevent upsample if not desired use --never_upsample.\n\nCan be useful to build training sets for machine learning algorithms, to\nupsample under-represented bundles or downsample over-represented bundles.\n\nWorks by either selecting a subset of streamlines or by generating new\nstreamlines by adding gaussian noise to existing ones.\n\nUpsampling:\n Includes smoothing to compensate for the noisiness of new streamlines\n generated by the process.\nDownsampling:\n Includes the possibility of choosing randomly *per Quickbundle cluster* to\n ensure that all clusters are represented in the final tractogram.\n\nExample usage:\n$ scil_tractogram_resample.py input.trk 1000 output.trk --point_wise_std 0.5 --gaussian 5 --keep_invalid_streamlines\n$ scil_visualize_bundles.py output.trk --local_coloring --width=0.1\n\npositional arguments:\n in_tractogram Input tractography file.\n nb_streamlines Number of streamlines to resample the tractogram to.\n out_tractogram Output tractography file.\n\noptions:\n -h, --help show this help message and exit\n --never_upsample Make sure to never upsample a tractogram.\n Useful when downsample batch of files using bash.\n --seed SEED Use a specific random seed for the resampling.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nUpsampling params:\n --point_wise_std POINT_WISE_STD\n Noise to add to existing streamlines points to generate new ones [1].\n --tube_radius TUBE_RADIUS\n Maximum distance to generate streamlines around the original ones [1].\n --gaussian SIGMA Sigma for smoothing. Use the value of surrounding X,Y,Z points on the streamline to blur the streamlines.\n A good sigma choice would be around 5.\n -e ERROR_RATE Maximum compression distance in mm [0.1].\n --keep_invalid_streamlines\n Keep invalid newly generated streamlines that may go out of the \n bounding box.\n\nDownsampling params:\n --downsample_per_cluster\n If set, downsampling will be done per cluster (computed with \n Quickbundles) to ensure that at least some streamlines are \n kept per bundle. Else, random downsampling is performed (default).\n --qbx_thresholds QBX_THRESHOLDS [QBX_THRESHOLDS ...]\n If you chose option '--downsample_per_cluster', you may set \n the QBx threshold value(s) here. Default: [40, 30, 20]\n", + "synonyms": [ + [ + "process", + "process" + ], + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "higher", + "lower" + ], + [ + "invalid", + "invalid" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "learning", + "learning" + ], + [ + "bundles", + "bundle" + ], + [ + "possibility", + "possibility" + ], + [ + "random", + "random" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "algorithm", + "algorithms" + ], + [ + "tractography", + "tractography" + ], + [ + "bundles", + "bundles" + ], + [ + "specific", + "specific" + ], + [ + "level", + "level" + ], + [ + "higher", + "higher" + ], + [ + "random", + "randomly" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_resample_nb_points", + "docstring": "Script to resample a set of streamlines to either a new number of points per\nstreamline or to a fixed step size. WARNING: data_per_point is not carried.\n\nFormerly: scil_resample_streamlines.py", + "help": "usage: scil_tractogram_resample_nb_points.py [-h]\n (--nb_pts_per_streamline NB_PTS_PER_STREAMLINE | --step_size STEP_SIZE)\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nScript to resample a set of streamlines to either a new number of points per\nstreamline or to a fixed step size. WARNING: data_per_point is not carried.\n\nFormerly: scil_resample_streamlines.py\n\npositional arguments:\n in_tractogram Streamlines input file name.\n out_tractogram Streamlines output file name.\n\noptions:\n -h, --help show this help message and exit\n --nb_pts_per_streamline NB_PTS_PER_STREAMLINE\n Number of points per streamline in the output.\n --step_size STEP_SIZE\n Step size in the output (in mm).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "step", + "step" + ], + [ + "streamlines", + "streamlines" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "level", + "level" + ], + [ + "streamline", + "streamline" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_seed_density_map", + "docstring": "Compute a density map of seeds saved in .trk file.\n\nFormerly: scil_compute_seed_density_map.py", + "help": "usage: scil_tractogram_seed_density_map.py [-h] [--binary [FIXED_VALUE]]\n [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n tractogram_filename\n seed_density_filename\n\nCompute a density map of seeds saved in .trk file.\n\nFormerly: scil_compute_seed_density_map.py\n\npositional arguments:\n tractogram_filename Tracts filename. Format must be .trk. \n File should contain a \"seeds\" value in the data_per_streamline.\n These seeds must be in space: voxel, origin: corner.\n seed_density_filename\n Output seed density filename. Format must be Nifti.\n\noptions:\n -h, --help show this help message and exit\n --binary [FIXED_VALUE]\n If set, will store the same value for all intersected voxels, creating a binary map.\n When set without a value, 1 is used (and dtype uint8).\n If a value is given, will be used as the stored value.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "maps", + "map" + ], + [ + "invalid", + "invalid" + ], + [ + "space", + "space" + ], + [ + "create", + "creating" + ], + [ + "voxel", + "voxels" + ], + [ + "binary", + "binary" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "tract", + "tracts", + "tracts" + ], + [ + "intersected", + "intersected" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_segment_and_score", + "docstring": "Scores input tractogram overall and bundlewise.\n\nOutputs\n-------\n\n - results.json: Contains a full tractometry report.\n - processing_stats.json: Contains information on the segmentation of\n bundles (ex: the number of wpc per criteria).\n - Splits the input tractogram into\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (if args.compute_ic)\n segmented_WPC/*_wpc.trk (if args.save_wpc_separately)\n IS.trk OR NC.trk (if args.compute_ic)\n\nBy default, if a streamline fits in many bundles, it will be included in every\none. This means a streamline may be a VS for a bundle and an IS for\n(potentially many) others. If you want to assign each streamline to at most one\nbundle, use the `--unique` flag.\n\nConfig file\n-----------\n\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\nMandatory:\n - endpoints OR [head AND tail]: filename for the endpoints ROI.\n If 'enpoints' is used, we will automatically separate the mask into two\n ROIs, acting as head and tail. Quality check is strongly recommended.\n\nOptional:\n Concerning metrics:\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n Concerning inclusion criteria (other streamlines will be WPC):\n - all_mask: ROI serving as \"all\" criteria: to be included in the bundle,\n ALL points of a streamline must be inside the mask.*\n - any_mask: ROI serving as \"any\" criteria: streamlines\n must touch that mask in at least one point (\"any\" point) to be included\n in the bundle.\n - angle: angle criteria. Streamlines containing loops and sharp turns above\n given angle will be rejected from the bundle.\n - length: maximum and minimum lengths per bundle.\n - length_x / length_x_abs: maximum and minimum total distance in the x\n direction (i.e. first coordinate).**\n - length_y / length_y_abs: maximum and minimum total distance in the y\n direction (i.e. second coordinate).**\n - length_z / length_z_abs: maximum and minimum total distance in the z\n direction (i.e. third coordinate).**\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n** With absolute values: coming back on yourself will contribute to the total\ndistance instead of cancelling it.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n \"angle\": 300,\n \"length\": [140, 150],\n \"endpoints\": \"PATH/file1.nii.gz\"\n }\n}", + "help": "usage: scil_tractogram_segment_and_score.py [-h] [--json_prefix p]\n [--gt_dir DIR]\n [--use_gt_masks_as_all_masks]\n [--dilate_endpoints NB_PASS]\n [--remove_invalid]\n [--save_wpc_separately]\n [--compute_ic] [--unique]\n [--remove_wpc_belonging_to_another_bundle]\n [--no_empty] [--indent INDENT]\n [--sort_keys] [--no_bbox_check]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram gt_config out_dir\n\nScores input tractogram overall and bundlewise.\n\nOutputs\n-------\n\n - results.json: Contains a full tractometry report.\n - processing_stats.json: Contains information on the segmentation of\n bundles (ex: the number of wpc per criteria).\n - Splits the input tractogram into\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (if args.compute_ic)\n segmented_WPC/*_wpc.trk (if args.save_wpc_separately)\n IS.trk OR NC.trk (if args.compute_ic)\n\nBy default, if a streamline fits in many bundles, it will be included in every\none. This means a streamline may be a VS for a bundle and an IS for\n(potentially many) others. If you want to assign each streamline to at most one\nbundle, use the `--unique` flag.\n\nConfig file\n-----------\n\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\nMandatory:\n - endpoints OR [head AND tail]: filename for the endpoints ROI.\n If 'enpoints' is used, we will automatically separate the mask into two\n ROIs, acting as head and tail. Quality check is strongly recommended.\n\nOptional:\n Concerning metrics:\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n Concerning inclusion criteria (other streamlines will be WPC):\n - all_mask: ROI serving as \"all\" criteria: to be included in the bundle,\n ALL points of a streamline must be inside the mask.*\n - any_mask: ROI serving as \"any\" criteria: streamlines\n must touch that mask in at least one point (\"any\" point) to be included\n in the bundle.\n - angle: angle criteria. Streamlines containing loops and sharp turns above\n given angle will be rejected from the bundle.\n - length: maximum and minimum lengths per bundle.\n - length_x / length_x_abs: maximum and minimum total distance in the x\n direction (i.e. first coordinate).**\n - length_y / length_y_abs: maximum and minimum total distance in the y\n direction (i.e. second coordinate).**\n - length_z / length_z_abs: maximum and minimum total distance in the z\n direction (i.e. third coordinate).**\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n** With absolute values: coming back on yourself will contribute to the total\ndistance instead of cancelling it.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n \"angle\": 300,\n \"length\": [140, 150],\n \"endpoints\": \"PATH/file1.nii.gz\"\n }\n}\n\nTractometry\n-----------\nGlobal connectivity metrics:\n\n- Computed by default:\n - VS: valid streamlines, belonging to a bundle (i.e. respecting all the\n criteria for that bundle; endpoints, limit_mask, gt_mask.).\n - IS: invalid streamlines. All other streamlines. IS = IC + NC.\n\n- Optional:\n - WPC: wrong path connections, streamlines connecting correct ROIs but not\n respecting the other criteria for that bundle. Such streamlines always\n exist but they are only saved separately if specified in the options.\n Else, they are merged back with the IS.\n By definition. WPC are only computed if \"limits masks\" are provided.\n - IC: invalid connections, streamlines joining an incorrect combination of\n ROIs. Use carefully, quality depends on the quality of your ROIs and no\n analysis is done on the shape of the streamlines.\n - NC: no connections. Invalid streamlines minus invalid connections.\n\n- Fidelity metrics:\n - OL: Overlap. Percentage of ground truth voxels containing streamline(s)\n for a given bundle.\n - OR: Overreach. Amount of voxels containing streamline(s) when they\n shouldn't, for a given bundle. We compute two versions :\n OR_pct_vs = divided by the total number of voxel covered by the bundle.\n (percentage of the voxels touched by VS).\n Values range between 0 and 100%. Values are not defined when we\n recovered no streamline for a bundle, but we set the OR_pct_vs to 0\n in that case.\n OR_pct_gt = divided by the total size of the ground truth bundle mask.\n Values could be higher than 100%.\n - f1 score: which is the same as the Dice score.\n\npositional arguments:\n in_tractogram Input tractogram to score\n gt_config .json dict configured as specified above.\n out_dir Output directory for the resulting segmented bundles.\n\noptions:\n -h, --help show this help message and exit\n --json_prefix p Prefix of the two output json files. Ex: 'study_x_'.Files will be saved inside out_dir.\n Suffixes will be 'processing_stats.json' and 'results.json'.\n --no_empty Do not write file if there is no streamline.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nAdditions to gt_config:\n --gt_dir DIR Root path of the ground truth files listed in the gt_config. \n If not set, filenames in the config file are considered \n as absolute paths.\n --use_gt_masks_as_all_masks\n If set, the gt_config's 'gt_mask' will also be used as\n 'all_mask' for each bundle. Note that this means the\n OR will necessarily be 0.\n\nPreprocessing:\n --dilate_endpoints NB_PASS\n Dilate endpoint masks n-times. Default: 0.\n --remove_invalid Remove invalid streamlines before scoring.\n\nTractometry choices:\n --save_wpc_separately\n If set, streamlines rejected from VC based on the config\n file criteria will be saved separately from IS (and IC)\n in one file *_wpc.tck per bundle.\n --compute_ic If set, IS are split into NC + IC, where IC are computed as one bundle per\n pair of ROI not belonging to a true connection, named\n *_*_IC.tck.\n --unique If set, streamlines are assigned to the first bundle they fit in and not to all.\n --remove_wpc_belonging_to_another_bundle\n If set, WPC actually belonging to any VB (in the \n case of overlapping ROIs) will be removed\n from the WPC classification.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "connect", + "connecting", + "connects", + "connecting" + ], + [ + "direction", + "direction" + ], + [ + "connectivity", + "connectivity" + ], + [ + "unique", + "unique" + ], + [ + "based", + "based" + ], + [ + "shape", + "shape" + ], + [ + "considered", + "considered" + ], + [ + "higher", + "higher" + ], + [ + "streamlines", + "streamlines" + ], + [ + "assigned", + "assigned" + ], + [ + "bundles", + "bundle" + ], + [ + "bundles", + "bundles" + ], + [ + "level", + "level" + ], + [ + "analysis", + "analysis" + ], + [ + "invalid", + "invalid" + ], + [ + "connections", + "connections" + ], + [ + "true", + "true" + ], + [ + "result", + "result" + ], + [ + "valid", + "valid" + ], + [ + "connection", + "connection" + ], + [ + "total", + "total" + ], + [ + "size", + "size" + ], + [ + "streamline", + "streamline" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "global", + "global" + ], + [ + "voxel", + "voxels" + ], + [ + "exist", + "exist" + ], + [ + "voxel", + "voxel" + ], + [ + "defined", + "defined" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_segment_bundles", + "docstring": "Compute BundleSeg & supports multi-atlas & multi-parameters (RBx-like).\nThe model needs to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are not sure about the transformation 'direction' you can try\nscil_tractogram_segment_bundles.py (with the -v option), a warning will popup\nif the provided transformation is not used correctly.\n\nThe number of folders inside 'models_directories' will increase the number of\nruns. Each folder is considered like an atlas and bundles inside will initiate\nmore BundleSeg executions. The more atlases you have, the more robust the\nrecognition will be.\n\n--minimal_vote_ratio is a value between 0 and 1. If you have 5 input model\ndirectories and a minimal_vote_ratio of 0.5, you will need at least 3 votes\n\nExample data and usage available at: https://zenodo.org/record/7950602\n\nFor RAM usage, it is recommanded to use this heuristic:\n (size of inputs tractogram (GB) * number of processes) < RAM (GB)\nThis is important because many instances of data structures are initialized\nin parallel and can lead to a RAM overflow.\n\nFormerly: scil_recognize_multi_bundles.py", + "help": "usage: scil_tractogram_segment_bundles.py [-h] [--out_dir OUT_DIR]\n [--minimal_vote_ratio MINIMAL_VOTE_RATIO]\n [--seed SEED] [--inverse]\n [--reference REFERENCE]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractograms [in_tractograms ...]\n in_config_file in_directory\n in_transfo\n\nCompute BundleSeg & supports multi-atlas & multi-parameters (RBx-like).\nThe model needs to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are not sure about the transformation 'direction' you can try\nscil_tractogram_segment_bundles.py (with the -v option), a warning will popup\nif the provided transformation is not used correctly.\n\nThe number of folders inside 'models_directories' will increase the number of\nruns. Each folder is considered like an atlas and bundles inside will initiate\nmore BundleSeg executions. The more atlases you have, the more robust the\nrecognition will be.\n\n--minimal_vote_ratio is a value between 0 and 1. If you have 5 input model\ndirectories and a minimal_vote_ratio of 0.5, you will need at least 3 votes\n\nExample data and usage available at: https://zenodo.org/record/7950602\n\nFor RAM usage, it is recommanded to use this heuristic:\n (size of inputs tractogram (GB) * number of processes) < RAM (GB)\nThis is important because many instances of data structures are initialized\nin parallel and can lead to a RAM overflow.\n\nFormerly: scil_recognize_multi_bundles.py\n\npositional arguments:\n in_tractograms Input tractogram filename (.trk or .tck).\n in_config_file Path of the config file (.json)\n in_directory Path of parent folder of models directories.\n Each folder inside will be considered as adifferent atlas.\n in_transfo Path for the transformation to model space (.txt, .npy or .mat).\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Path for the output directory [voting_results].\n --minimal_vote_ratio MINIMAL_VOTE_RATIO\n Streamlines will only be considered for saving if\n recognized often enough [0.5].\n --seed SEED Random number generator seed 0.\n --inverse Use the inverse transformation.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] St-Onge, Etienne, Kurt G. Schilling, and Francois Rheault.\n\"BundleSeg: A versatile,reliable and reproducible approach to white\nmatter bundle segmentation.\" International Workshop on Computational\nDiffusion MRI. Cham: Springer Nature Switzerland (2023)\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "structure", + "structures", + "structures" + ], + [ + "increase", + "increase" + ], + [ + "reliable", + "reliable" + ], + [ + "direction", + "direction" + ], + [ + "white", + "white" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "diffusion", + "diffusion" + ], + [ + "space", + "space" + ], + [ + "random", + "random" + ], + [ + "Data", + "data", + "data" + ], + [ + "processes", + "processes" + ], + [ + "atlas", + "atlas" + ], + [ + "bundles", + "bundles" + ], + [ + "matter", + "matter" + ], + [ + "considered", + "considered" + ], + [ + "level", + "level" + ], + [ + "parameters", + "parameters" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_segment_bundles_for_connectivity", + "docstring": "Compute a connectivity matrix from a tractogram and a parcellation.\n\nCurrent strategy is to keep the longest streamline segment connecting 2\nregions. If the streamline crosses other gray matter regions before reaching\nits final connected region, the kept connection is still the longest. This is\nrobust to compressed streamlines.\n\nThe output file is a hdf5 (.h5) where the keys are 'LABEL1_LABEL2' and each\ngroup is composed of 'data', 'offsets' and 'lengths' from the array_sequence.\nThe 'data' is stored in VOX/CORNER for simplicity and efficiency. See script\nscil_tractogram_convert_hdf5_to_trk.py to convert to a list of .trk bundles.\n\nFor the --outlier_threshold option the default is a recommended good trade-off\nfor a freesurfer parcellation. With smaller parcels (brainnetome, glasser) the\nthreshold should most likely be reduced.\n\nGood candidate connections to QC are the brainstem to precentral gyrus\nconnection and precentral left to precentral right connection, or equivalent\nin your parcellation.\n\nNOTE: this script can take a while to run. Please be patient.\nExample: on a tractogram with 1.8M streamlines, running on a SSD:\n- 15 minutes without post-processing, only saving final bundles.\n- 30 minutes with full post-processing, only saving final bundles.\n- 60 minutes with full post-processing, saving all possible files.\n\nFormerly: scil_decompose_connectivity.py", + "help": "usage: scil_tractogram_segment_bundles_for_connectivity.py [-h] [--no_pruning]\n [--no_remove_loops]\n [--no_remove_outliers]\n [--no_remove_curv_dev]\n [--min_length MIN_LENGTH]\n [--max_length MAX_LENGTH]\n [--outlier_threshold OUTLIER_THRESHOLD]\n [--loop_max_angle LOOP_MAX_ANGLE]\n [--curv_qb_distance CURV_QB_DISTANCE]\n [--out_dir OUT_DIR]\n [--save_raw_connections]\n [--save_intermediate]\n [--save_discarded]\n [--out_labels_list OUT_FILE]\n [--reference REFERENCE]\n [--no_bbox_check]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_tractograms\n [in_tractograms ...]\n in_labels out_hdf5\n\nCompute a connectivity matrix from a tractogram and a parcellation.\n\nCurrent strategy is to keep the longest streamline segment connecting 2\nregions. If the streamline crosses other gray matter regions before reaching\nits final connected region, the kept connection is still the longest. This is\nrobust to compressed streamlines.\n\nThe output file is a hdf5 (.h5) where the keys are 'LABEL1_LABEL2' and each\ngroup is composed of 'data', 'offsets' and 'lengths' from the array_sequence.\nThe 'data' is stored in VOX/CORNER for simplicity and efficiency. See script\nscil_tractogram_convert_hdf5_to_trk.py to convert to a list of .trk bundles.\n\nFor the --outlier_threshold option the default is a recommended good trade-off\nfor a freesurfer parcellation. With smaller parcels (brainnetome, glasser) the\nthreshold should most likely be reduced.\n\nGood candidate connections to QC are the brainstem to precentral gyrus\nconnection and precentral left to precentral right connection, or equivalent\nin your parcellation.\n\nNOTE: this script can take a while to run. Please be patient.\nExample: on a tractogram with 1.8M streamlines, running on a SSD:\n- 15 minutes without post-processing, only saving final bundles.\n- 30 minutes with full post-processing, only saving final bundles.\n- 60 minutes with full post-processing, saving all possible files.\n\nFormerly: scil_decompose_connectivity.py\n\npositional arguments:\n in_tractograms Tractogram filenames. Format must be one of \n trk, tck, vtk, fib, dpy.\n in_labels Labels file name (nifti). Labels must have 0 as background.\n out_hdf5 Output hdf5 file (.h5).\n\noptions:\n -h, --help show this help message and exit\n --out_labels_list OUT_FILE\n Save the labels list as text file.\n Needed for scil_connectivity_compute_matrices.py and others.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nPost-processing options:\n --no_pruning If set, will NOT prune on length.\n Length criteria in --min_length, --max_length.\n --no_remove_loops If set, will NOT remove streamlines making loops.\n Angle criteria based on --loop_max_angle.\n --no_remove_outliers If set, will NOT remove outliers using QB.\n Criteria based on --outlier_threshold.\n --no_remove_curv_dev If set, will NOT remove streamlines that deviate from the mean curvature.\n Threshold based on --curv_qb_distance.\n\nPruning options:\n --min_length MIN_LENGTH\n Pruning minimal segment length. [20.0]\n --max_length MAX_LENGTH\n Pruning maximal segment length. [200.0]\n\nOutliers and loops options:\n --outlier_threshold OUTLIER_THRESHOLD\n Outlier removal threshold when using hierarchical QB. [0.6]\n --loop_max_angle LOOP_MAX_ANGLE\n Maximal winding angle over which a streamline is considered as looping. [330.0]\n --curv_qb_distance CURV_QB_DISTANCE\n Clustering threshold for centroids curvature filtering with QB. [10.0]\n\nSaving options:\n --out_dir OUT_DIR Output directory for each connection as separate file (.trk).\n --save_raw_connections\n If set, will save all raw cut connections in a subdirectory.\n --save_intermediate If set, will save the intermediate results of filtering.\n --save_discarded If set, will save discarded streamlines in subdirectories.\n Includes loops, outliers and qb_loops.\n", + "synonyms": [ + [ + "region", + "regions", + "region" + ], + [ + "dorsolateral", + "gyrus", + "occipital", + "parietal", + "gyrus" + ], + [ + "streamlines", + "streamlines" + ], + [ + "comprised", + "composed" + ], + [ + "connect", + "connecting", + "connects", + "connecting" + ], + [ + "streamline", + "streamline" + ], + [ + "connectivity", + "connectivity" + ], + [ + "invalid", + "invalid" + ], + [ + "post", + "post" + ], + [ + "region", + "regions", + "regions" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "larger", + "smaller" + ], + [ + "parcels", + "parcels" + ], + [ + "Data", + "data", + "data" + ], + [ + "connections", + "connections" + ], + [ + "processes", + "processes" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "left", + "left" + ], + [ + "based", + "based" + ], + [ + "bundles", + "bundles" + ], + [ + "matter", + "matter" + ], + [ + "connection", + "connection" + ], + [ + "considered", + "considered" + ], + [ + "processing", + "processing" + ], + [ + "level", + "level" + ], + [ + "connected", + "connected" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_segment_one_bundle", + "docstring": "Compute a simple Recobundles (single-atlas & single-parameters).\nThe model need to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are unsure about the transformation 'direction' use the verbose\noption (-v) and try with and without the --inverse flag. If you are not using\nthe right transformation 'direction' a warning will popup. If there is no\nwarning in both case it means the transformation is very close to identity and\nboth 'direction' will work.\n\nFormerly: scil_recognize_single_bundles.py", + "help": "usage: scil_tractogram_segment_one_bundle.py [-h]\n [--tractogram_clustering_thr TRACTOGRAM_CLUSTERING_THR]\n [--model_clustering_thr MODEL_CLUSTERING_THR]\n [--pruning_thr PRUNING_THR]\n [--slr_threads SLR_THREADS]\n [--seed SEED] [--inverse]\n [--no_empty]\n [--in_pickle IN_PICKLE | --out_pickle OUT_PICKLE]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram in_model in_transfo\n out_tractogram\n\nCompute a simple Recobundles (single-atlas & single-parameters).\nThe model need to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are unsure about the transformation 'direction' use the verbose\noption (-v) and try with and without the --inverse flag. If you are not using\nthe right transformation 'direction' a warning will popup. If there is no\nwarning in both case it means the transformation is very close to identity and\nboth 'direction' will work.\n\nFormerly: scil_recognize_single_bundles.py\n\npositional arguments:\n in_tractogram Input tractogram filename.\n in_model Model to use for recognition.\n in_transfo Path for the transformation to model space (.txt, .npy or .mat).\n out_tractogram Output tractogram filename.\n\noptions:\n -h, --help show this help message and exit\n --tractogram_clustering_thr TRACTOGRAM_CLUSTERING_THR\n Clustering threshold used for the whole brain [8mm].\n --model_clustering_thr MODEL_CLUSTERING_THR\n Clustering threshold used for the model [4mm].\n --pruning_thr PRUNING_THR\n MDF threshold used for final streamlines selection [6mm].\n --slr_threads SLR_THREADS\n Number of threads for SLR [1].\n --seed SEED Random number generator seed [None].\n --inverse Use the inverse transformation.\n --no_empty Do not write file if there is no streamline.\n --in_pickle IN_PICKLE\n Input pickle clusters map file.\n Will override the tractogram_clustering_thr parameter.\n --out_pickle OUT_PICKLE\n Output pickle clusters map file.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nGaryfallidis, E., Cote, M. A., Rheault, F., ... &\nDescoteaux, M. (2018). Recognition of white matter\nbundles using local and global streamline-based registration and\nclustering. NeuroImage, 170, 283-295.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "maps", + "map" + ], + [ + "direction", + "direction" + ], + [ + "white", + "white" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "global", + "global" + ], + [ + "work", + "work" + ], + [ + "space", + "space" + ], + [ + "parameter", + "parameter" + ], + [ + "random", + "random" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "atlas", + "atlas" + ], + [ + "based", + "based" + ], + [ + "bundles", + "bundles" + ], + [ + "matter", + "matter" + ], + [ + "level", + "level" + ], + [ + "parameters", + "parameters" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_shuffle", + "docstring": "Shuffle the ordering of streamlines.\n\nFormerly: scil_shuffle_streamlines.py", + "help": "usage: scil_tractogram_shuffle.py [-h] [--seed SEED] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nShuffle the ordering of streamlines.\n\nFormerly: scil_shuffle_streamlines.py\n\npositional arguments:\n in_tractogram Input tractography file.\n out_tractogram Output tractography file.\n\noptions:\n -h, --help show this help message and exit\n --seed SEED Random number generator seed [None].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "random", + "random" + ], + [ + "streamlines", + "streamlines" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "level", + "level" + ], + [ + "tractography", + "tractography" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_smooth", + "docstring": "This script will smooth the streamlines, usually to remove the 'wiggles' in\nprobabilistic tracking.\n\nTwo choices of methods are available:\n- Gaussian will use the surrounding coordinates for smoothing. Streamlines are\nresampled to 1mm step-size and the smoothing is performed on the coordinate\narray. The sigma will be indicative of the number of points surrounding the\ncenter points to be used for blurring.\n- Spline will fit a spline curve to every streamline using a sigma and the\nnumber of control points. The sigma represents the allowed distance from the\ncontrol points. The final streamlines are obtained by evaluating the spline at\nconstant intervals so that it will have the same number of points as initially.\n\nThis script enforces endpoints to remain the same.\n\nWARNING:\n- too low of a sigma (e.g: 1) with a lot of control points (e.g: 15)\nwill create crazy streamlines that could end up out of the bounding box.\n- data_per_point will be lost.\n\nFormerly: scil_smooth_streamlines.py", + "help": "usage: scil_tractogram_smooth.py [-h]\n (--gaussian SIGMA | --spline SIGMA NB_CTRL_POINT)\n [--compress [COMPRESS_TH]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script will smooth the streamlines, usually to remove the 'wiggles' in\nprobabilistic tracking.\n\nTwo choices of methods are available:\n- Gaussian will use the surrounding coordinates for smoothing. Streamlines are\nresampled to 1mm step-size and the smoothing is performed on the coordinate\narray. The sigma will be indicative of the number of points surrounding the\ncenter points to be used for blurring.\n- Spline will fit a spline curve to every streamline using a sigma and the\nnumber of control points. The sigma represents the allowed distance from the\ncontrol points. The final streamlines are obtained by evaluating the spline at\nconstant intervals so that it will have the same number of points as initially.\n\nThis script enforces endpoints to remain the same.\n\nWARNING:\n- too low of a sigma (e.g: 1) with a lot of control points (e.g: 15)\nwill create crazy streamlines that could end up out of the bounding box.\n- data_per_point will be lost.\n\nFormerly: scil_smooth_streamlines.py\n\npositional arguments:\n in_tractogram Input tractography file.\n out_tractogram Output tractography file.\n\noptions:\n -h, --help show this help message and exit\n --gaussian SIGMA Sigma for smoothing. Use the value of surronding\n X,Y,Z points on the streamline to blur the streamlines.\n A good sigma choice would be around 5.\n --spline SIGMA NB_CTRL_POINT\n Sigma for smoothing. Model each streamline as a spline.\n A good sigma choice would be around 5 and control point around 10.\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.[0.1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "probabilistic", + "probabilistic" + ], + [ + "streamline", + "streamline" + ], + [ + "create", + "create" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "high", + "low" + ], + [ + "step", + "step" + ], + [ + "tractography", + "tractography" + ], + [ + "methods", + "methods" + ], + [ + "tracking", + "tracking" + ], + [ + "level", + "level" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_tractogram_split", + "docstring": "Split a tractogram into multiple files, 2 options available :\nSplit into X files, or split into files of Y streamlines.\n\nBy default, streamlines to add to each chunk will be chosen randomly.\nOptionally, you can split streamlines...\n - sequentially (the first n/nb_chunks streamlines in the first chunk and so\n on).\n - randomly, but per Quickbundles clusters.\n\nFormerly: scil_split_tractogram.py", + "help": "usage: scil_tractogram_split.py [-h] [--out_dir OUT_DIR]\n (--chunk_size CHUNK_SIZE | --nb_chunks NB_CHUNKS)\n [--split_per_cluster | --do_not_randomize]\n [--qbx_thresholds t [t ...]] [--seed SEED]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_prefix\n\nSplit a tractogram into multiple files, 2 options available :\nSplit into X files, or split into files of Y streamlines.\n\nBy default, streamlines to add to each chunk will be chosen randomly.\nOptionally, you can split streamlines...\n - sequentially (the first n/nb_chunks streamlines in the first chunk and so\n on).\n - randomly, but per Quickbundles clusters.\n\nFormerly: scil_split_tractogram.py\n\npositional arguments:\n in_tractogram Tractogram input file name.\n out_prefix Prefix for the output tractogram, index will be appended \n automatically (ex, _0.trk), based on input type.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Put all output tractogram in a specific directory.\n --chunk_size CHUNK_SIZE\n The maximum number of streamlines per file.\n --nb_chunks NB_CHUNKS\n Divide the file in equal parts.\n --split_per_cluster If set, splitting will be done per cluster (computed with \n Quickbundles) to ensure that at least some streamlines are \n kept from each bundle in each chunk. Else, random splitting is\n performed (default).\n --do_not_randomize If set, splitting is done sequentially through the original \n sft instead of using random indices.\n --qbx_thresholds t [t ...]\n If you chose option '--split_per_cluster', you may set the \n QBx threshold value(s) here. Default: [40, 30, 20]\n --seed SEED Use a specific random seed for the subsampling.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "random", + "random" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "based", + "based" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ], + [ + "random", + "randomly" + ] + ], + "keywords": [] + }, + { + "name": "scil_viz_bingham_fit", + "docstring": "Visualize 2-dimensional Bingham volume slice loaded from disk. The volume is\nassumed to be saved from scil_fodf_to_bingham.py.\n\nGiven an image of Bingham coefficients, this script displays a slice in a\ngiven orientation.", + "help": "usage: scil_viz_bingham_fit.py [-h] [--slice_index SLICE_INDEX]\n [--win_dims WIDTH HEIGHT]\n [--interactor {image,trackball}]\n [--axis_name {sagittal,coronal,axial}]\n [--silent] [--output OUTPUT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--color_per_lobe]\n in_bingham\n\nVisualize 2-dimensional Bingham volume slice loaded from disk. The volume is\nassumed to be saved from scil_fodf_to_bingham.py.\n\nGiven an image of Bingham coefficients, this script displays a slice in a\ngiven orientation.\n\npositional arguments:\n in_bingham Input SH image file.\n\noptions:\n -h, --help show this help message and exit\n --slice_index SLICE_INDEX\n Index of the slice to visualize along a given axis. Defaults to middle of volume.\n --win_dims WIDTH HEIGHT\n The dimensions for the vtk window. [(768, 768)]\n --interactor {image,trackball}\n Specify interactor mode for vtk window. [trackball]\n --axis_name {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n --silent Disable interactive visualization.\n --output OUTPUT Path to output file.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Name of the sphere used to reconstruct SF. [symmetric362]\n --color_per_lobe Color each bingham distribution with a different color. [False]\n", + "synonyms": [ + [ + "axial", + "axial" + ], + [ + "orientation", + "orientation" + ], + [ + "coronal", + "sagittal", + "coronal" + ], + [ + "image", + "image" + ], + [ + "middle", + "middle" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "coronal", + "sagittal", + "sagittal" + ], + [ + "level", + "level" + ], + [ + "false", + "false" + ] + ], + "keywords": [] + }, + { + "name": "scil_viz_bundle", + "docstring": "Visualize bundles.\n\nExample usages:\n\n# Visualize streamlines as tubes, each bundle with a different color\n>>> scil_viz_bundle.py path_to_bundles/ --shape tube --random_coloring 1337\n\n# Visualize a tractogram with each streamlines drawn as lines, colored with\n# their local orientation, but only load 1 in 10 streamlines\n>>> scil_viz_bundle.py tractogram.trk --shape line --subsample 10\n\n# Visualize CSTs as large tubes and color them from a list of colors in a file\n>>> scil_viz_bundle.py path_to_bundles/CST_* --width 0.5\n --color_dict colors.json", + "help": "usage: scil_viz_bundle.py [-h]\n [--random_coloring SEED | --uniform_coloring R G B | --local_coloring | --color_dict JSON | --color_from_streamlines KEY | --color_from_points KEY]\n [--shape {line,tube}] [--width WIDTH]\n [--subsample SUBSAMPLE] [--downsample DOWNSAMPLE]\n [--background R G B] [-v [{DEBUG,INFO,WARNING}]]\n in_bundles [in_bundles ...]\n\nVisualize bundles.\n\nExample usages:\n\n# Visualize streamlines as tubes, each bundle with a different color\n>>> scil_viz_bundle.py path_to_bundles/ --shape tube --random_coloring 1337\n\n# Visualize a tractogram with each streamlines drawn as lines, colored with\n# their local orientation, but only load 1 in 10 streamlines\n>>> scil_viz_bundle.py tractogram.trk --shape line --subsample 10\n\n# Visualize CSTs as large tubes and color them from a list of colors in a file\n>>> scil_viz_bundle.py path_to_bundles/CST_* --width 0.5\n --color_dict colors.json\n\npositional arguments:\n in_bundles List of tractography files supported by nibabel.\n\noptions:\n -h, --help show this help message and exit\n --shape {line,tube} Display streamlines either as lines or tubes.\n [Default: tube]\n --width WIDTH Width of tubes or lines representing streamlines\n [Default: 0.25]\n --subsample SUBSAMPLE\n Only load 1 in N streamlines.\n [Default: 1]\n --downsample DOWNSAMPLE\n Downsample streamlines to N points.\n [Default: None]\n --background R G B RBG values [0, 255] of the color of the background.\n [Default: [0, 0, 0]]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nColouring options:\n --random_coloring SEED\n Assign a random color to bundles.\n --uniform_coloring R G B\n Assign a uniform color to streamlines.\n --local_coloring Assign coloring to streamlines depending on their local orientations.\n --color_dict JSON JSON file containing colors for each bundle.\n Bundle filenames are indicated as keys and colors as values.\n A 'default' key and value can be included.\n --color_from_streamlines KEY\n Extract a color per streamline from the data_per_streamline property of the tractogram at the specified key.\n --color_from_points KEY\n Extract a color per point from the data_per_point property of the tractogram at the specified key.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "streamline", + "streamline" + ], + [ + "key", + "key" + ], + [ + "indicating", + "indicated" + ], + [ + "orientation", + "orientation" + ], + [ + "supported", + "supported" + ], + [ + "bundles", + "bundle" + ], + [ + "random", + "random" + ], + [ + "orientation", + "orientations" + ], + [ + "large", + "large" + ], + [ + "tractography", + "tractography" + ], + [ + "bundles", + "bundles" + ], + [ + "shape", + "shape" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_viz_bundle_screenshot_mni", + "docstring": "Register bundle to a template for screenshots using a reference.\nThe template can be any MNI152 (any resolution, cropped or not)\nIf your in_anat has a skull, select a MNI152 template with a skull and\nvice-versa.\n\nIf the bundle is already in MNI152 space, do not use --target_template.\n\nAxial, coronal and sagittal slices are captured.\nSagittal can be capture from the left (default) or the right.\n\nFor the --roi argument: If 1 value is provided, the ROI will be white,\nif 4 values are provided, the ROI will be colored with the RGB values\nprovided, if 5 values are provided, it is RGBA (values from 0-255).", + "help": "usage: scil_viz_bundle_screenshot_mni.py [-h]\n [--target_template TARGET_TEMPLATE]\n [--local_coloring | --uniform_coloring R G B | --reference_coloring COLORBAR]\n [--roi ROI [ROI ...]] [--right]\n [--anat_opacity ANAT_OPACITY]\n [--output_suffix OUTPUT_SUFFIX]\n [--out_dir OUT_DIR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle in_anat\n\nRegister bundle to a template for screenshots using a reference.\nThe template can be any MNI152 (any resolution, cropped or not)\nIf your in_anat has a skull, select a MNI152 template with a skull and\nvice-versa.\n\nIf the bundle is already in MNI152 space, do not use --target_template.\n\nAxial, coronal and sagittal slices are captured.\nSagittal can be capture from the left (default) or the right.\n\nFor the --roi argument: If 1 value is provided, the ROI will be white,\nif 4 values are provided, the ROI will be colored with the RGB values\nprovided, if 5 values are provided, it is RGBA (values from 0-255).\n\npositional arguments:\n in_bundle Path of the input bundle.\n in_anat Path of the reference file (.nii or nii.gz).\n\noptions:\n -h, --help show this help message and exit\n --target_template TARGET_TEMPLATE\n Path to the target MNI152 template for registration. If in_anat has a skull, select a MNI152 template with a skull and vice-versa.\n --local_coloring Color streamlines using local segments orientation.\n --uniform_coloring R G B\n Color streamlines with uniform coloring.\n --reference_coloring COLORBAR\n Color streamlines with reference coloring (0-255).\n --roi ROI [ROI ...] Path to a ROI file (.nii or nii.gz).\n --right Take screenshot from the right instead of the left for the sagittal plane.\n --anat_opacity ANAT_OPACITY\n Set the opacity for the anatomy, use 0 for complete transparency, 1 for opaque. [0.3]\n --output_suffix OUTPUT_SUFFIX\n Add a suffix to the output, else the axis name is used.\n --out_dir OUT_DIR Put all images in a specific directory.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "axial", + "axial" + ], + [ + "streamlines", + "streamlines" + ], + [ + "planes", + "plane" + ], + [ + "white", + "white" + ], + [ + "orientation", + "orientation" + ], + [ + "coronal", + "sagittal", + "coronal" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "bundles", + "bundle" + ], + [ + "space", + "space" + ], + [ + "left", + "left" + ], + [ + "coronal", + "sagittal", + "sagittal" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ] + ], + "keywords": [] + }, + { + "name": "scil_viz_bundle_screenshot_mosaic", + "docstring": "Visualize bundles from a list. The script will output a mosaic (image) with\nscreenshots, 6 views per bundle in the list.", + "help": "usage: scil_viz_bundle_screenshot_mosaic.py [-h] [--uniform_coloring R G B]\n [--random_coloring SEED]\n [--zoom ZOOM] [--ttf TTF]\n [--ttf_size TTF_SIZE]\n [--opacity_background OPACITY_BACKGROUND]\n [--resolution_of_thumbnails RESOLUTION_OF_THUMBNAILS]\n [--light_screenshot]\n [--no_information]\n [--no_bundle_name]\n [--no_streamline_number]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_volume in_bundles\n [in_bundles ...] out_image\n\nVisualize bundles from a list. The script will output a mosaic (image) with\nscreenshots, 6 views per bundle in the list.\n\npositional arguments:\n in_volume Volume used as background (e.g. T1, FA, b0).\n in_bundles List of tractography files supported by nibabel or binary mask files.\n out_image Name of the output image mosaic (e.g. mosaic.jpg, mosaic.png).\n\noptions:\n -h, --help show this help message and exit\n --uniform_coloring R G B\n Assign an uniform color to streamlines (or ROIs).\n --random_coloring SEED\n Assign a random color to streamlines (or ROIs).\n --zoom ZOOM Rendering zoom. A value greater than 1 is a zoom-in,\n a value less than 1 is a zoom-out [1.0].\n --ttf TTF Path of the true type font to use for legends.\n --ttf_size TTF_SIZE Font size (int) to use for the legends [35].\n --opacity_background OPACITY_BACKGROUND\n Opacity of background image, between 0 and 1.0 [0.4].\n --resolution_of_thumbnails RESOLUTION_OF_THUMBNAILS\n Resolution of thumbnails used in mosaic [300].\n --light_screenshot Keep only 3 views instead of 6 [False].\n --no_information Don't display axis and bundle information [False].\n --no_bundle_name Don't display bundle name [False].\n --no_streamline_number\n Don't display bundle streamlines number [False].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "greater", + "greater" + ], + [ + "streamlines", + "streamlines" + ], + [ + "views", + "views" + ], + [ + "anatomical", + "anatomy", + "anatomy" + ], + [ + "supported", + "supported" + ], + [ + "bundles", + "bundle" + ], + [ + "image", + "image" + ], + [ + "rendered", + "rendering", + "rendering" + ], + [ + "random", + "random" + ], + [ + "true", + "true" + ], + [ + "tractography", + "tractography" + ], + [ + "bundles", + "bundles" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "level", + "level" + ], + [ + "false", + "false" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_viz_connectivity", + "docstring": "Script to display a connectivity matrix and adjust the desired visualization.\nMade to work with scil_tractogram_segment_bundles_for_connectivity.py and\nscil_connectivity_reorder_rois.py.\n\nThis script can either display the axis labels as:\n- Coordinates (0..N)\n- Labels (using --labels_list)\n- Names (using --labels_list and --lookup_table)\nExamples of labels_list.txt and lookup_table.json can be found in the\nfreesurfer_flow output (https://github.com/scilus/freesurfer_flow)\n\nIf the matrix was made from a bigger matrix using\nscil_connectivity_reorder_rois.py, provide the text file(s), using\n--labels_list and/or --reorder_txt.\n\nThe chord chart is always displaying parting in the order they are defined\n(clockwise), the color is attributed in that order following a colormap. The\nthickness of the line represent the 'size/intensity', the greater the value is\nthe thicker the line will be. In order to hide the low values, two options are\navailable:\n- Angle threshold + alpha, any connections with a small angle on the chord\n chart will be slightly transparent to increase the focus on bigger\n connections.\n- Percentile, hide any connections with a value below that percentile", + "help": "", + "synonyms": [ + [ + "order", + "order" + ], + [ + "greater", + "greater" + ], + [ + "represent", + "represent" + ], + [ + "increase", + "increase" + ], + [ + "connectivity", + "connectivity" + ], + [ + "high", + "low" + ], + [ + "work", + "work" + ], + [ + "connections", + "connections" + ], + [ + "large", + "small" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "defined", + "defined" + ], + [ + "size", + "size" + ], + [ + "larger", + "bigger" + ] + ], + "keywords": [] + }, + { + "name": "scil_viz_dti_screenshot", + "docstring": "Register DWI to a template for screenshots.\nThe templates are on http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009\n\nFor quick quality control, the MNI template can be downsampled to 2mm iso.\nAxial, coronal and sagittal slices are captured.", + "help": "usage: scil_viz_dti_screenshot.py [-h] [--shells SHELLS [SHELLS ...]]\n [--out_suffix OUT_SUFFIX]\n [--out_dir OUT_DIR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_template\n\nRegister DWI to a template for screenshots.\nThe templates are on http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009\n\nFor quick quality control, the MNI template can be downsampled to 2mm iso.\nAxial, coronal and sagittal slices are captured.\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bval file, in FSL format.\n in_bvec Path of the bvec file, in FSL format.\n in_template Path to the target MNI152 template for \n registration, use the one provided online.\n\noptions:\n -h, --help show this help message and exit\n --shells SHELLS [SHELLS ...]\n Shells to use for DTI fit (usually below 1200), b0 must be listed.\n --out_suffix OUT_SUFFIX\n Add a suffix to the output, else the axis name is used.\n --out_dir OUT_DIR Put all images in a specific directory.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "axial", + "axial" + ], + [ + "coronal", + "sagittal", + "coronal" + ], + [ + "diffusion", + "diffusion" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "coronal", + "sagittal", + "sagittal" + ], + [ + "specific", + "specific" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_viz_fodf", + "docstring": "Visualize 2-dimensional fODF slice loaded from disk.\n\nGiven an image of SH coefficients, this script displays a slice in a\ngiven orientation. The user can also add a background on top of which the\nfODF are to be displayed. Using a full SH basis, the script can be used to\nvisualize asymmetric fODF. The user can supply a peaks image to visualize\npeaks on top of fODF.\n\nIf a transparency_mask is given (e.g. a brain mask), all values outside the\nmask non-zero values are set to full transparency in the saved scene.\n\n!!! CAUTION !!! The script is memory intensive about (9kB of allocated RAM per\nvoxel, or 9GB for a 1M voxel volume) with a sphere interpolated to 362 points.", + "help": "usage: scil_viz_fodf.py [-h] [--slice_index SLICE_INDEX]\n [--win_dims WIDTH HEIGHT]\n [--interactor {image,trackball}]\n [--axis_name {sagittal,coronal,axial}] [--silent]\n [--in_transparency_mask IN_TRANSPARENCY_MASK]\n [--output OUTPUT] [-f]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--sphere {repulsion200,repulsion100,symmetric724,symmetric642,symmetric362,repulsion724}]\n [--sph_subdivide SPH_SUBDIVIDE] [--mask MASK]\n [--colormap COLORMAP | --color_rgb COLOR_RGB COLOR_RGB COLOR_RGB]\n [--scale SCALE] [--radial_scale_off] [--norm_off]\n [-v [{DEBUG,INFO,WARNING}]] [--background BACKGROUND]\n [--bg_range MIN MAX] [--bg_opacity BG_OPACITY]\n [--bg_offset BG_OFFSET]\n [--bg_interpolation {nearest,linear}]\n [--bg_color BG_COLOR BG_COLOR BG_COLOR]\n [--peaks PEAKS]\n [--peaks_color PEAKS_COLOR PEAKS_COLOR PEAKS_COLOR]\n [--peaks_width PEAKS_WIDTH]\n [--peaks_values PEAKS_VALUES | --peaks_length PEAKS_LENGTH]\n [--variance VARIANCE] [--variance_k VARIANCE_K]\n [--var_color VAR_COLOR VAR_COLOR VAR_COLOR]\n in_fodf\n\nVisualize 2-dimensional fODF slice loaded from disk.\n\nGiven an image of SH coefficients, this script displays a slice in a\ngiven orientation. The user can also add a background on top of which the\nfODF are to be displayed. Using a full SH basis, the script can be used to\nvisualize asymmetric fODF. The user can supply a peaks image to visualize\npeaks on top of fODF.\n\nIf a transparency_mask is given (e.g. a brain mask), all values outside the\nmask non-zero values are set to full transparency in the saved scene.\n\n!!! CAUTION !!! The script is memory intensive about (9kB of allocated RAM per\nvoxel, or 9GB for a 1M voxel volume) with a sphere interpolated to 362 points.\n\npositional arguments:\n in_fodf Input SH image file.\n\noptions:\n -h, --help show this help message and exit\n --slice_index SLICE_INDEX\n Index of the slice to visualize along a given axis. Defaults to middle of volume.\n --win_dims WIDTH HEIGHT\n The dimensions for the vtk window. [(768, 768)]\n --interactor {image,trackball}\n Specify interactor mode for vtk window. [trackball]\n --axis_name {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n --silent Disable interactive visualization.\n --in_transparency_mask IN_TRANSPARENCY_MASK\n Input mask image file.\n --output OUTPUT Path to output file.\n -f Force overwriting of the output files.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --sphere {repulsion200,repulsion100,symmetric724,symmetric642,symmetric362,repulsion724}\n Name of the sphere used to reconstruct SF. [symmetric362]\n --sph_subdivide SPH_SUBDIVIDE\n Number of subdivisions for given sphere. If not supplied, use the given sphere as is.\n --mask MASK Optional mask file. Only fODF inside the mask are displayed.\n --colormap COLORMAP Colormap for the ODF slicer. If None, then a RGB colormap will be used. [None]\n --color_rgb COLOR_RGB COLOR_RGB COLOR_RGB\n Uniform color for the ODF slicer given as RGB, scaled between 0 and 1. [None]\n --scale SCALE Scaling factor for FODF. [0.5]\n --radial_scale_off Disable radial scale for ODF slicer.\n --norm_off Disable normalization of ODF slicer.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nBackground arguments:\n --background BACKGROUND\n Background image file. If RGB, values must be between 0 and 255.\n --bg_range MIN MAX The range of values mapped to range [0, 1] for background image. [(bg.min(), bg.max())]\n --bg_opacity BG_OPACITY\n The opacity of the background image. Opacity of 0.0 means transparent and 1.0 is completely visible. [1.0]\n --bg_offset BG_OFFSET\n The offset of the background image. [0.5]\n --bg_interpolation {nearest,linear}\n Interpolation mode for the background image. [nearest]\n --bg_color BG_COLOR BG_COLOR BG_COLOR\n The color of the overall background, behind everything. Must be RGB values scaled between 0 and 1. [(0, 0, 0)]\n\nPeaks arguments:\n --peaks PEAKS Peaks image file.\n --peaks_color PEAKS_COLOR PEAKS_COLOR PEAKS_COLOR\n Color used for peaks, as RGB values scaled between 0 and 1. If None, then a RGB colormap is used. [None]\n --peaks_width PEAKS_WIDTH\n Width of peaks segments. [1.0]\n\nPeaks scaling arguments:\n Choose between peaks values and arbitrary length.\n\n --peaks_values PEAKS_VALUES\n Peaks values file.\n --peaks_length PEAKS_LENGTH\n Length of the peaks segments. [0.65]\n\nVariance arguments:\n For the visualization of fodf uncertainty, the variance is used as follow: mean + k * sqrt(variance), where mean is the input fodf (in_fodf) and k is the scaling factor (variance_k).\n\n --variance VARIANCE FODF variance file.\n --variance_k VARIANCE_K\n Scaling factor (k) for the computation of the fodf uncertainty. [1]\n --var_color VAR_COLOR VAR_COLOR VAR_COLOR\n Color of variance outline. Must be RGB values scaled between 0 and 1. [(1, 1, 1)]\n", + "synonyms": [ + [ + "axial", + "axial" + ], + [ + "variance", + "variance" + ], + [ + "orientation", + "orientation" + ], + [ + "coronal", + "sagittal", + "coronal" + ], + [ + "image", + "image" + ], + [ + "middle", + "middle" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "coronal", + "sagittal", + "sagittal" + ], + [ + "memory", + "memory" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "brain", + "brain" + ] + ], + "keywords": [] + }, + { + "name": "scil_viz_gradients_screenshot", + "docstring": "Vizualisation for directions on a sphere, either from a gradient sampling (i.e.\na list of b-vectors) or from a Dipy sphere.", + "help": "usage: scil_viz_gradients_screenshot.py [-h]\n (--in_gradient_scheme IN_GRADIENT_SCHEME [IN_GRADIENT_SCHEME ...] | --dipy_sphere {symmetric362,symmetric642,symmetric724,repulsion724,repulsion100,repulsion200})\n [--dis-sym]\n [--out_basename OUT_BASENAME]\n [--res RES] [--dis-sphere]\n [--dis-proj] [--plot_shells]\n [--same-color] [--opacity OPACITY]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n\nVizualisation for directions on a sphere, either from a gradient sampling (i.e.\na list of b-vectors) or from a Dipy sphere.\n\noptions:\n -h, --help show this help message and exit\n --in_gradient_scheme IN_GRADIENT_SCHEME [IN_GRADIENT_SCHEME ...]\n Gradient sampling filename. (only accepts .bvec and\n .bval together or only .b).\n --dipy_sphere {symmetric362,symmetric642,symmetric724,repulsion724,repulsion100,repulsion200}\n Dipy sphere choice.\n --dis-sym Disable antipodal symmetry.\n --out_basename OUT_BASENAME\n Output file name picture without extension (will be\n png file(s)).\n --res RES Resolution of the output picture(s).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n\nEnable/Disable renderings.:\n --dis-sphere Disable the rendering of the sphere.\n --dis-proj Disable rendering of the projection supershell.\n --plot_shells Enable rendering each shell individually.\n\nRendering options.:\n --same-color Use same color for all shell.\n --opacity OPACITY Opacity for the shells.\n", + "synonyms": [ + [ + "rendered", + "rendering", + "rendering" + ], + [ + "projection", + "projection" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_viz_tractogram_seeds", + "docstring": "Visualize seeds used to generate the tractogram or bundle.\nWhen tractography was run, each streamline produced by the tracking algorithm\nsaved its seeding point (its origin).\n\nThe tractogram must have been generated from scil_tracking_local.py or\nscil_tracking_pft.py with the --save_seeds option.", + "help": "usage: scil_viz_tractogram_seeds.py [-h] [--save SAVE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n tractogram\n\nVisualize seeds used to generate the tractogram or bundle.\nWhen tractography was run, each streamline produced by the tracking algorithm\nsaved its seeding point (its origin).\n\nThe tractogram must have been generated from scil_tracking_local.py or\nscil_tracking_pft.py with the --save_seeds option.\n\npositional arguments:\n tractogram Tractogram file (must be trk)\n\noptions:\n -h, --help show this help message and exit\n --save SAVE If set, save a screenshot of the result in the specified filename\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "seeding", + "seeding" + ], + [ + "streamline", + "streamline" + ], + [ + "bundles", + "bundle" + ], + [ + "algorithm", + "algorithm" + ], + [ + "result", + "result" + ], + [ + "tractography", + "tractography" + ], + [ + "tracking", + "tracking" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_viz_tractogram_seeds_3d", + "docstring": "Visualize seeds as 3D points, with heatmaps corresponding to seed density\n\nExample usages:\n\n>>> scil_viz_tractogram_seeds_3d.py seeds.nii.gz --tractogram tractogram.trk", + "help": "usage: scil_viz_tractogram_seeds_3d.py [-h] [--tractogram TRACTOGRAM]\n [--colormap COLORMAP]\n [--seed_opacity SEED_OPACITY]\n [--tractogram_shape {line,tube}]\n [--tractogram_opacity TRACTOGRAM_OPACITY]\n [--tractogram_width TRACTOGRAM_WIDTH]\n [--tractogram_color R G B [R G B ...]]\n [--background R G B [R G B ...]]\n [-v [{DEBUG,INFO,WARNING}]]\n in_seed_map\n\nVisualize seeds as 3D points, with heatmaps corresponding to seed density\n\nExample usages:\n\n>>> scil_viz_tractogram_seeds_3d.py seeds.nii.gz --tractogram tractogram.trk\n\npositional arguments:\n in_seed_map Seed density map.\n\noptions:\n -h, --help show this help message and exit\n --tractogram TRACTOGRAM\n Tractogram coresponding to the seeds.\n --colormap COLORMAP Name of the map for the density coloring. Can be any colormap that matplotlib offers.\n [Default: bone]\n --seed_opacity SEED_OPACITY\n Opacity of the contour generated.\n [Default: 0.5]\n --tractogram_shape {line,tube}\n Display streamlines either as lines or tubes.\n [Default: tube]\n --tractogram_opacity TRACTOGRAM_OPACITY\n Opacity of the streamlines.\n [Default: 0.5]\n --tractogram_width TRACTOGRAM_WIDTH\n Width of tubes or lines representing streamlines.\n [Default: 0.05]\n --tractogram_color R G B [R G B ...]\n Color for the tractogram.\n --background R G B [R G B ...]\n RBG values [0, 255] of the color of the background.\n [Default: [0, 0, 0]]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", + "synonyms": [ + [ + "streamlines", + "streamlines" + ], + [ + "maps", + "map" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_viz_volume_histogram", + "docstring": "Script to display a histogram of a metric (FA, MD, etc.) from a binary mask\n(wm mask, vascular mask, ect.).\nThese two images must be coregister with each other.\n\n>>> scil_viz_volume_histogram.py metric.nii.gz mask_bin.nii.gz 8\n out_filename_image.png", + "help": "usage: scil_viz_volume_histogram.py [-h] [--title TITLE] [--x_label X_LABEL]\n [--colors COLORS] [--show_only]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_metric in_mask n_bins out_png\n\nScript to display a histogram of a metric (FA, MD, etc.) from a binary mask\n(wm mask, vascular mask, ect.).\nThese two images must be coregister with each other.\n\n>>> scil_viz_volume_histogram.py metric.nii.gz mask_bin.nii.gz 8\n out_filename_image.png\n\npositional arguments:\n in_metric Metric map ex : FA, MD,... .\n in_mask Binary mask data to extract value.\n n_bins Number of bins to use for the histogram.\n out_png Output filename for the figure.\n\noptions:\n -h, --help show this help message and exit\n --show_only Do not save the figure, only display.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nHistogram options:\n --title TITLE Use the provided info for the histogram title. [Histogram]\n --x_label X_LABEL Use the provided info for the x axis name.\n --colors COLORS Use the provided info for the bars color. [#0504aa]\n", + "synonyms": [ + [ + "maps", + "map" + ], + [ + "Data", + "data", + "data" + ], + [ + "binary", + "binary" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_viz_volume_scatterplot", + "docstring": "Script to display scatter plot between two maps (ex. FA and MD, ihMT and MT).\nBy default, no mask is applied to the data.\nDifferent options are available to mask or threshold data:\n - a binary mask\n - two probability maps, which can be used to threshold maps with\n --in_prob_maps. A same threshold is applied on these two maps (--thr).\n - parcellation, which can be used to plot values for each region of\n an atlas (--in_atlas) or a subset of regions (--specific_label).\n Atlas option required a json file (--atlas_lut) with indices and\n names of each label corresponding to the atlas as following:\n \"1\": \"lh_A8m\",\n \"2\": \"rh_A8m\",\n The numbers must be corresponding to the label indices in the json file.\n\nBe careful, you can not use all of them at the same time.\n\nFor general scatter plot without mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n\nFor scatter plot with mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_bin_mask mask_wm.nii.gz\n\nFor tissue probability scatter plot:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --prob_maps wm_map.nii.gz gm_map.nii.gz\n\nFor scatter plot using atlas:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n --specific_label 34 67 87", + "help": "usage: scil_viz_volume_scatterplot.py [-h] [--out_dir OUT_DIR] [--thr THR]\n [--not_exclude_zero]\n [--in_bin_mask IN_BIN_MASK | --in_prob_maps IN_PROB_MAPS IN_PROB_MAPS | --in_atlas IN_ATLAS]\n [--atlas_lut ATLAS_LUT]\n [--specific_label SPECIFIC_LABEL [SPECIFIC_LABEL ...]]\n [--in_folder] [--title TITLE]\n [--x_label X_LABEL] [--y_label Y_LABEL]\n [--label LABEL]\n [--label_prob LABEL_PROB]\n [--marker MARKER]\n [--marker_size MARKER_SIZE]\n [--transparency TRANSPARENCY]\n [--dpi DPI] [--colors color1 color2]\n [--show_only]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_x_map in_y_map out_name\n\nScript to display scatter plot between two maps (ex. FA and MD, ihMT and MT).\nBy default, no mask is applied to the data.\nDifferent options are available to mask or threshold data:\n - a binary mask\n - two probability maps, which can be used to threshold maps with\n --in_prob_maps. A same threshold is applied on these two maps (--thr).\n - parcellation, which can be used to plot values for each region of\n an atlas (--in_atlas) or a subset of regions (--specific_label).\n Atlas option required a json file (--atlas_lut) with indices and\n names of each label corresponding to the atlas as following:\n \"1\": \"lh_A8m\",\n \"2\": \"rh_A8m\",\n The numbers must be corresponding to the label indices in the json file.\n\nBe careful, you can not use all of them at the same time.\n\nFor general scatter plot without mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n\nFor scatter plot with mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_bin_mask mask_wm.nii.gz\n\nFor tissue probability scatter plot:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --prob_maps wm_map.nii.gz gm_map.nii.gz\n\nFor scatter plot using atlas:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n --specific_label 34 67 87\n\npositional arguments:\n in_x_map Map in x axis, FA for example.\n in_y_map Map in y axis, MD for example.\n out_name Output filename for the figure without extension.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Output directory to save scatter plot.\n --thr THR Use to apply threshold only on probability maps (same for both map) with --in_prob_maps option. [0.9]\n --not_exclude_zero Keep zero value in data.\n --in_bin_mask IN_BIN_MASK\n Binary mask. Use this option to extract x and y maps value from specific mask or region: wm_mask or roi_mask for example.\n --in_prob_maps IN_PROB_MAPS IN_PROB_MAPS\n Probability maps, WM and GW for example.\n --in_atlas IN_ATLAS Path to the input atlas image.\n --show_only Do not save the figure, only display. Not avalaible with --in_atlas option.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nAtlas options:\n --atlas_lut ATLAS_LUT\n Path of the LUT file corresponding to atlas used to name the regions of interest.\n --specific_label SPECIFIC_LABEL [SPECIFIC_LABEL ...]\n Label list to use to do scatter plot. Label must corresponding to atlas LUT file. [None]\n --in_folder Save label plots in subfolder \"Label_plots\".\n\nScatter plot options:\n --title TITLE Use the provided info for the title name. [Scatter Plot]\n --x_label X_LABEL Use the provided info for the x axis name. [x]\n --y_label Y_LABEL Use the provided info for the y axis name. [y]\n --label LABEL Use the provided info for the legend box corresponding to mask or first probability map. [None]\n --label_prob LABEL_PROB\n Use the provided info for the legend box corresponding to the second probability map. [Threshold prob_map 2]\n --marker MARKER Use the provided info for the marker shape. [.]\n --marker_size MARKER_SIZE\n Use the provided info for the marker size. [15]\n --transparency TRANSPARENCY\n Use the provided info for the point transparency. [0.4]\n --dpi DPI Use the provided info for the dpi resolution. [300]\n --colors color1 color2\n", + "synonyms": [ + [ + "region", + "regions", + "region" + ], + [ + "maps", + "map" + ], + [ + "region", + "regions", + "regions" + ], + [ + "image", + "image" + ], + [ + "maps", + "maps" + ], + [ + "applied", + "apply" + ], + [ + "probability", + "probability" + ], + [ + "Data", + "data", + "data" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "atlas", + "atlas" + ], + [ + "shape", + "shape" + ], + [ + "applied", + "applied" + ], + [ + "general", + "general" + ], + [ + "binary", + "binary" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_viz_volume_screenshot", + "docstring": "Take screenshot(s) of one or more slices in a given image volume along the\nrequested axis. If slice indices are not provided, all slices in the volume\nare used. The name of the output images are suffixed with _slice_{id}, with\nid being the slice number in the volume. If a labelmap image is provided (e.g.\na tissue segmentation map), it is overlaid on the volume slices. Same goes if\na mask is provided, with the difference that it can be rendered as a\ntransparency overlay as well as a contour.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nExample:\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_annotated.png\n --display_slice_number --display_lr\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_masked.png\n --transparency brainmask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial.png\n --slices 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_sagittal.png --axis sagittal\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_plasma_cmap.png\n --slices 30 40 50 60 70 80 90 100 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_overlay.png\n --slices 30 40 50 60 70 80 90 100 --overlays brain_mask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_contour.png\n --slices 30 40 50 60 70 80 90 100\n --overlays brain_mask.nii.gz --overlays_as_contours\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_map.png\n --slices 30 40 50 60 70 80 90 100 --labelmap tissue_map.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_viridis_cmap.png\n --slices 30 40 50 60 70 80 90 100\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_peaks.png\n --slices 30 40 50 60 70 80 90 100 --peaks peaks.nii.gz --volume_opacity 0.5", + "help": "usage: scil_viz_volume_screenshot.py [-h]\n [--volume_cmap_name VOLUME_CMAP_NAME]\n [--volume_opacity VOLUME_OPACITY]\n [--transparency TRANSPARENCY]\n [--slices SID [SID ...]]\n [--axis {sagittal,coronal,axial}]\n [--size WIDTH HEIGHT]\n [--display_slice_number] [--display_lr]\n [--labelmap LABELMAP]\n [--labelmap_cmap_name LABELMAP_CMAP_NAME]\n [--labelmap_opacity LABELMAP_OPACITY]\n [--overlays OVERLAYS [OVERLAYS ...]]\n [--overlays_as_contours]\n [--overlays_colors R G B [R G B ...]]\n [--overlays_opacity OVERLAYS_OPACITY]\n [--peaks PEAKS [PEAKS ...]]\n [--peaks_width PEAKS_WIDTH]\n [--peaks_opacity PEAKS_OPACITY]\n [-v [{DEBUG,INFO,WARNING}]]\n volume out_fname\n\nTake screenshot(s) of one or more slices in a given image volume along the\nrequested axis. If slice indices are not provided, all slices in the volume\nare used. The name of the output images are suffixed with _slice_{id}, with\nid being the slice number in the volume. If a labelmap image is provided (e.g.\na tissue segmentation map), it is overlaid on the volume slices. Same goes if\na mask is provided, with the difference that it can be rendered as a\ntransparency overlay as well as a contour.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nExample:\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_annotated.png\n --display_slice_number --display_lr\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_masked.png\n --transparency brainmask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial.png\n --slices 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_sagittal.png --axis sagittal\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_plasma_cmap.png\n --slices 30 40 50 60 70 80 90 100 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_overlay.png\n --slices 30 40 50 60 70 80 90 100 --overlays brain_mask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_contour.png\n --slices 30 40 50 60 70 80 90 100\n --overlays brain_mask.nii.gz --overlays_as_contours\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_map.png\n --slices 30 40 50 60 70 80 90 100 --labelmap tissue_map.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_viridis_cmap.png\n --slices 30 40 50 60 70 80 90 100\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_peaks.png\n --slices 30 40 50 60 70 80 90 100 --peaks peaks.nii.gz --volume_opacity 0.5\n\npositional arguments:\n volume Input 3D Nifti file (.nii/.nii.gz).\n out_fname Name of the output image(s). If multiple slices are provided (or none), their index will be append to the name (e.g. volume.jpg, volume.png becomes volume_slice_0.jpg, volume_slice_0.png).\n\noptions:\n -h, --help show this help message and exit\n --transparency TRANSPARENCY\n Transparency Nifti image (.nii/.nii.gz). Can either be a binary mask or a scalar image in the range [0, 1].\n --size WIDTH HEIGHT Size of the output image. [(768, 768)]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nSlicing:\n --slices SID [SID ...]\n Slice indices to screenshot. If None are supplied, all slices inside the transparency mask are selected.\n --axis {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n\nInput overlays:\n --labelmap LABELMAP Input labelmap file (.nii/.nii.gz).\n --overlays OVERLAYS [OVERLAYS ...]\n 3D Nifti image(s) to overlay (.nii/.nii.gz).\n --peaks PEAKS [PEAKS ...]\n Peaks Nifti image (.nii/.nii.gz).\n\nVolume rendering:\n --volume_cmap_name VOLUME_CMAP_NAME\n Colormap name for the 3D Nifti image data. [None]\n --volume_opacity VOLUME_OPACITY\n Opacity value for the 3D Nifti image data. [1.0]\n --labelmap_cmap_name LABELMAP_CMAP_NAME\n Colormap name for the labelmap image data. [viridis]\n --labelmap_opacity LABELMAP_OPACITY\n Opacity value for the labelmap image data. [0.5]\n\nPeaks rendering:\n --peaks_width PEAKS_WIDTH\n Width of the peaks lines. [3.0]\n --peaks_opacity PEAKS_OPACITY\n Opacity value for the peaks overlay. [1.0]\n\nOverlay rendering:\n --overlays_as_contours\n Display overlays contours and reduce the opacity of their inner region (see the `--overlays_opacity` argument).\n --overlays_colors R G B [R G B ...]\n Colors for the overlays or contours. You may provide a single color, for all overlays/contours, or one color for each. Each color is given as three values: R G B\n --overlays_opacity OVERLAYS_OPACITY\n Opacity value for the masks overlays. When combined with `--overlays_as_contours`, this will be the opacity of the region inside the computed contours. [0.5]\n\nAnnotations:\n --display_slice_number\n If true, displays the slice number in the upper left corner.\n --display_lr If true, add left and right annotations to the images.\n", + "synonyms": [ + [ + "region", + "regions", + "region" + ], + [ + "axial", + "axial" + ], + [ + "maps", + "map" + ], + [ + "coronal", + "sagittal", + "coronal" + ], + [ + "image", + "image" + ], + [ + "rendered", + "rendering", + "rendering" + ], + [ + "rendered", + "rendering", + "rendered" + ], + [ + "Data", + "data", + "data" + ], + [ + "true", + "true" + ], + [ + "left", + "left" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "coronal", + "sagittal", + "sagittal" + ], + [ + "level", + "level" + ], + [ + "size", + "size" + ], + [ + "difference", + "difference" + ] + ], + "keywords": [] + }, + { + "name": "scil_viz_volume_screenshot_mosaic", + "docstring": "Compose a mosaic of screenshots of the given image volume slices along the\nrequested axis. The provided transparency mask (e.g. a brain mask volume) is\nused to set the screenshot values outside the mask non-zero values to full\ntransparency. Additionally, if a labelmap image is provided (e.g. a tissue\nsegmentation map), it is overlaid on the volume slices. Also, a series of\nmasks can be provided and will be used to generate contours overlaid on each\nvolume slice.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nThe screenshots are overlapped according to the given factors.\n\nThe mosaic supports either horizontal, vertical or matrix arrangements.\n\nExample:\n>>> scil_viz_volume_screenshot_mosaic.py 1 8 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial.png 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --overlap_factor 0.6 0.5 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 tissues.nii.gz brain_mask.nii.gz\n mosaic_overlap_tissues_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_viridis_cmap.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_contours.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --overlays wm_mask.nii.gz gm_mask.nii.gz csf_mask.nii.gz", + "help": "usage: scil_viz_volume_screenshot_mosaic.py [-h]\n [--volume_cmap_name VOLUME_CMAP_NAME]\n [--volume_opacity VOLUME_OPACITY]\n [--axis {sagittal,coronal,axial}]\n [--size WIDTH HEIGHT]\n [--labelmap LABELMAP]\n [--labelmap_cmap_name LABELMAP_CMAP_NAME]\n [--labelmap_opacity LABELMAP_OPACITY]\n [--overlays OVERLAYS [OVERLAYS ...]]\n [--overlays_as_contours]\n [--overlays_colors R G B [R G B ...]]\n [--overlays_opacity OVERLAYS_OPACITY]\n [--overlap rWIDTH rHEIGHT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n rows cols volume transparency\n out_fname SID [SID ...]\n\nCompose a mosaic of screenshots of the given image volume slices along the\nrequested axis. The provided transparency mask (e.g. a brain mask volume) is\nused to set the screenshot values outside the mask non-zero values to full\ntransparency. Additionally, if a labelmap image is provided (e.g. a tissue\nsegmentation map), it is overlaid on the volume slices. Also, a series of\nmasks can be provided and will be used to generate contours overlaid on each\nvolume slice.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nThe screenshots are overlapped according to the given factors.\n\nThe mosaic supports either horizontal, vertical or matrix arrangements.\n\nExample:\n>>> scil_viz_volume_screenshot_mosaic.py 1 8 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial.png 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --overlap_factor 0.6 0.5 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 tissues.nii.gz brain_mask.nii.gz\n mosaic_overlap_tissues_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_viridis_cmap.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_contours.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --overlays wm_mask.nii.gz gm_mask.nii.gz csf_mask.nii.gz\n\npositional arguments:\n rows The mosaic row count.\n cols The mosaic column count.\n volume Input 3D Nifti file (.nii/.nii.gz).\n transparency Transparency Nifti image (.nii/.nii.gz). Can either be a binary mask or a scalar image in the range [0, 1].\n out_fname Name of the output image (e.g. img.jpg, img.png).\n SID Slice indices to screenshot.\n\noptions:\n -h, --help show this help message and exit\n --axis {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n --size WIDTH HEIGHT Size of the output image. [(768, 768)]\n --overlap rWIDTH rHEIGHT\n The overlap factor as a ratio of each image dimension. [(0.6, 0.0)]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nInput overlays:\n --labelmap LABELMAP Input labelmap file (.nii/.nii.gz).\n --overlays OVERLAYS [OVERLAYS ...]\n 3D Nifti image(s) to overlay (.nii/.nii.gz).\n\nVolume rendering:\n --volume_cmap_name VOLUME_CMAP_NAME\n Colormap name for the 3D Nifti image data. [None]\n --volume_opacity VOLUME_OPACITY\n Opacity value for the 3D Nifti image data. [1.0]\n --labelmap_cmap_name LABELMAP_CMAP_NAME\n Colormap name for the labelmap image data. [viridis]\n --labelmap_opacity LABELMAP_OPACITY\n Opacity value for the labelmap image data. [0.5]\n\nOverlay rendering:\n --overlays_as_contours\n Display overlays contours and reduce the opacity of their inner region (see the `--overlays_opacity` argument).\n --overlays_colors R G B [R G B ...]\n Colors for the overlays or contours. You may provide a single color, for all overlays/contours, or one color for each. Each color is given as three values: R G B\n --overlays_opacity OVERLAYS_OPACITY\n Opacity value for the masks overlays. When combined with `--overlays_as_contours`, this will be the opacity of the region inside the computed contours. [0.5]\n", + "synonyms": [ + [ + "region", + "regions", + "region" + ], + [ + "axial", + "axial" + ], + [ + "maps", + "map" + ], + [ + "coronal", + "sagittal", + "coronal" + ], + [ + "image", + "image" + ], + [ + "rendered", + "rendering", + "rendering" + ], + [ + "Data", + "data", + "data" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "coronal", + "sagittal", + "sagittal" + ], + [ + "level", + "level" + ], + [ + "brain", + "brain" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_volume_apply_transform", + "docstring": "Transform Nifti (.nii.gz) using an affine/rigid transformation.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_apply_transform_to_image.py.", + "help": "usage: scil_volume_apply_transform.py [-h] [--inverse] [--keep_dtype]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file in_target_file in_transfo\n out_name\n\nTransform Nifti (.nii.gz) using an affine/rigid transformation.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_apply_transform_to_image.py.\n\npositional arguments:\n in_file Path of the file to be transformed (nii or nii.gz)\n in_target_file Path of the reference target file (.nii.gz).\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_name Output filename of the transformed data.\n\noptions:\n -h, --help show this help message and exit\n --inverse Apply the inverse transformation.\n --keep_dtype If True, keeps the data_type of the input image (in_file) when saving the output image (out_name).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "image", + "image" + ], + [ + "applied", + "apply" + ], + [ + "Data", + "data", + "data" + ], + [ + "true", + "true" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_volume_b0_synthesis", + "docstring": "Wrapper for SyNb0 available in Dipy, to run it on a single subject.\nRequires Skull-Strip b0 and t1w images as input, the script will normalize the\nt1w's WM to 110, co-register both images, then register it to the appropriate\ntemplate, run SyNb0 and then transform the result back to the original space.\n\nSyNb0 is a deep learning model that predicts a synthetic a distortion-free\nb0 image from a distorted b0 and T1w.\n\nThis script must be used carefully, as it is meant to be used in an\nenvironment with the following dependencies already installed (not installed by\ndefault in Scilpy):\n- tensorflow-addons\n- tensorrt\n- tensorflow", + "help": "usage: scil_volume_b0_synthesis.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_b0 in_b0_mask in_t1 in_t1_mask out_b0\n\nWrapper for SyNb0 available in Dipy, to run it on a single subject.\nRequires Skull-Strip b0 and t1w images as input, the script will normalize the\nt1w's WM to 110, co-register both images, then register it to the appropriate\ntemplate, run SyNb0 and then transform the result back to the original space.\n\nSyNb0 is a deep learning model that predicts a synthetic a distortion-free\nb0 image from a distorted b0 and T1w.\n\nThis script must be used carefully, as it is meant to be used in an\nenvironment with the following dependencies already installed (not installed by\ndefault in Scilpy):\n- tensorflow-addons\n- tensorrt\n- tensorflow\n\npositional arguments:\n in_b0 Input b0 image.\n in_b0_mask Input b0 mask.\n in_t1 Input t1w image.\n in_t1_mask Input t1w mask.\n out_b0 Output b0 image without distortion.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Schilling, Kurt G., et al. \"Synthesized b0 for diffusion distortion\n correction (Synb0-DisCo).\" Magnetic resonance imaging 64 (2019): 62-70.\n", + "synonyms": [ + [ + "subject", + "subject" + ], + [ + "imaging", + "imaging" + ], + [ + "learning", + "learning" + ], + [ + "image", + "image" + ], + [ + "diffusion", + "diffusion" + ], + [ + "space", + "space" + ], + [ + "result", + "result" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_volume_count_non_zero_voxels", + "docstring": "Count the number of non-zero voxels in an image file.\n\nIf you give it an image with more than 3 dimensions, it will summarize the 4th\n(or more) dimension to one voxel, and then find non-zero voxels over this.\nThis means that if there is at least one non-zero voxel in the 4th dimension,\nthis voxel of the 3D volume will be considered as non-zero.\n\nFormerly: scil_count_non_zero_voxels.py", + "help": "usage: scil_volume_count_non_zero_voxels.py [-h] [--out OUT_FILE] [--stats]\n [--id VALUE_ID]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n IN_FILE\n\nCount the number of non-zero voxels in an image file.\n\nIf you give it an image with more than 3 dimensions, it will summarize the 4th\n(or more) dimension to one voxel, and then find non-zero voxels over this.\nThis means that if there is at least one non-zero voxel in the 4th dimension,\nthis voxel of the 3D volume will be considered as non-zero.\n\nFormerly: scil_count_non_zero_voxels.py\n\npositional arguments:\n IN_FILE Input file name, in nifti format.\n\noptions:\n -h, --help show this help message and exit\n --out OUT_FILE Name of the output file, which will be saved as a text file.\n --stats If set, output the value using a stats format. Using this synthax will append\n a line to the output file, instead of creating a file with only one line.\n This is useful to create a file to be used as the source of data for a graph.\n Can be combined with --id\n --id VALUE_ID Id of the current count. If used, the value of this argument will be\n output (followed by a \":\") before the count value.\n Mostly useful with --stats.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "create", + "create" + ], + [ + "image", + "image" + ], + [ + "Data", + "data", + "data" + ], + [ + "create", + "creating" + ], + [ + "voxel", + "voxels" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "considered", + "considered" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_volume_crop", + "docstring": "Crop a volume using a given or an automatically computed bounding box. If a\npreviously computed bounding box file is given, the cropping will be applied\nand the affine fixed accordingly.\n\nWarning: This works well on masked images (like with FSL-Bet) volumes since\nit's looking for non-zero data. Therefore, you should validate the results on\nother types of images that haven't been masked.\n\nFormerly: scil_crop_volume.py", + "help": "usage: scil_volume_crop.py [-h] [--ignore_voxel_size]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--input_bbox INPUT_BBOX | --output_bbox OUTPUT_BBOX]\n in_image out_image\n\nCrop a volume using a given or an automatically computed bounding box. If a\npreviously computed bounding box file is given, the cropping will be applied\nand the affine fixed accordingly.\n\nWarning: This works well on masked images (like with FSL-Bet) volumes since\nit's looking for non-zero data. Therefore, you should validate the results on\nother types of images that haven't been masked.\n\nFormerly: scil_crop_volume.py\n\npositional arguments:\n in_image Path of the nifti file to crop.\n out_image Path of the cropped nifti file to write.\n\noptions:\n -h, --help show this help message and exit\n --ignore_voxel_size Ignore voxel size compatibility test between input bounding box and data. Warning, use only if you know what you are doing.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --input_bbox INPUT_BBOX\n Path of the pickle file from which to take the bounding box to crop input file.\n --output_bbox OUTPUT_BBOX\n Path of the pickle file where to write the computed bounding box. (.pickle extension)\n", + "synonyms": [ + [ + "volume", + "volumes", + "volumes" + ], + [ + "Data", + "data", + "data" + ], + [ + "applied", + "applied" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_volume_flip", + "docstring": "Flip the volume according to the specified axis.\n\nFormerly: scil_flip_volume.py", + "help": "usage: scil_volume_flip.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image dimension [dimension ...]\n\nFlip the volume according to the specified axis.\n\nFormerly: scil_flip_volume.py\n\npositional arguments:\n in_image Path of the input volume (nifti).\n out_image Path of the output volume (nifti).\n dimension The axes you want to flip. eg: to flip the x and y axes use: x y.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "volume", + "volumes", + "volume" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_volume_math", + "docstring": "Performs an operation on a list of images. The supported operations are\nlisted below.\n\nThis script is loading all images in memory, will often crash after a few\nhundred images.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of images.\n> scil_volume_math.py multiplication img.nii.gz 10 mult_10.nii.gz\n\nFormerly: scil_image_math.py", + "help": "usage: scil_volume_math.py [-h] [--data_type DATA_TYPE] [--exclude_background]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference,concatenate,dilation,erosion,closing,opening,blur}\n in_args [in_args ...] out_image\n\nPerforms an operation on a list of images. The supported operations are\nlisted below.\n\nThis script is loading all images in memory, will often crash after a few\nhundred images.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of images.\n> scil_volume_math.py multiplication img.nii.gz 10 mult_10.nii.gz\n\nFormerly: scil_image_math.py\n\n lower_threshold: IMG THRESHOLD\n All values below the threshold will be set to zero.\n All values above the threshold will be set to one.\n \n upper_threshold: IMG THRESHOLD\n All values below the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_eq: IMG THRESHOLD\n All values below the threshold will be set to zero.\n All values above or equal the threshold will be set to one.\n \n upper_threshold_eq: IMG THRESHOLD\n All values below or equal the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_otsu: IMG\n All values below or equal to the Otsu threshold will be set to zero.\n All values above the Otsu threshold will be set to one.\n (Otsu's method is an algorithm to perform automatic image thresholding\n of the background.)\n \n upper_threshold_otsu: IMG\n All values below the Otsu threshold will be set to one.\n All values above or equal to the Otsu threshold will be set to zero.\n Equivalent to lower_threshold_otsu followed by an inversion.\n \n lower_clip: IMG THRESHOLD\n All values below the threshold will be set to threshold.\n \n upper_clip: IMG THRESHOLD\n All values above the threshold will be set to threshold.\n \n absolute_value: IMG\n All negative values will become positive.\n \n round: IMG\n Round all decimal values to the closest integer.\n \n ceil: IMG\n Ceil all decimal values to the next integer.\n \n floor: IMG\n Floor all decimal values to the previous integer.\n \n normalize_sum: IMG\n Normalize the image so the sum of all values is one.\n \n normalize_max: IMG\n Normalize the image so the maximum value is one.\n \n log_10: IMG\n Apply a log (base 10) to all non zeros values of an image.\n \n log_e: IMG\n Apply a natural log to all non zeros values of an image.\n \n convert: IMG\n Perform no operation, but simply change the data type.\n \n invert: IMG\n Operation on binary image to interchange 0s and 1s in a binary mask.\n \n addition: IMGs\n Add multiple images together.\n \n subtraction: IMG_1 IMG_2\n Subtract first image by the second (IMG_1 - IMG_2).\n \n multiplication: IMGs\n Multiply multiple images together (danger of underflow and overflow)\n \n division: IMG_1 IMG_2\n Divide first image by the second (danger of underflow and overflow)\n Ignore zeros values, excluded from the operation.\n \n mean: IMGs\n Compute the mean of images.\n If a single 4D image is provided, average along the last dimension.\n \n std: IMGs\n Compute the standard deviation average of multiple images.\n If a single 4D image is provided, compute the STD along the last\n dimension.\n \n correlation: IMGs\n Computes the correlation of the 3x3x3 neighborhood of each voxel, for\n all pair of input images. The final image is the average correlation\n (through all pairs).\n For a given pair of images\n - Background is considered as 0. May lead to very high correlations\n close to the border of the background regions, or very poor ones if the\n background in both images differ.\n - Images are zero-padded. For the same reason as higher, may lead to\n very high correlations if you have data close to the border of the\n image.\n - NaN values (if a voxel's neighborhood is entirely uniform; std 0) are\n replaced by\n - 0 if at least one neighborhood was entirely containing background.\n - 1 if the voxel's neighborhoods are uniform in both images\n - 0 if the voxel's neighborhoods is uniform in one image, but not\n the other.\n\n UPDATE AS OF VERSION 2.0: Random noise was previously added in the\n process to help avoid NaN values. Now replaced by either 0 or 1 as\n explained above.\n \n union: IMGs\n Operation on binary image to keep voxels, that are non-zero, in at\n least one file.\n \n intersection: IMGs\n Operation on binary image to keep the voxels, that are non-zero,\n are present in all files.\n \n difference: IMG_1 IMG_2\n Operation on binary image to keep voxels from the first file that are\n not in the second file (non-zeros).\n \n concatenate: IMGs\n Concatenate a list of 3D and 4D images into a single 4D image.\n \n dilation: IMG, VALUE\n Binary morphological operation to spatially extend the values of an\n image to their neighbors. VALUE is in voxels: an integer > 0.\n \n erosion: IMG, VALUE\n Binary morphological operation to spatially shrink the volume contained\n in a binary image. VALUE is in voxels: an integer > 0.\n \n closing: IMG, VALUE\n Binary morphological operation, dilation followed by an erosion.\n \n opening: IMG, VALUE\n Binary morphological operation, erosion followed by a dilation.\n \n blur: IMG, VALUE\n Apply a gaussian blur to a single image. VALUE is sigma, the standard\n deviation of the Gaussian kernel.\n \n\npositional arguments:\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference,concatenate,dilation,erosion,closing,opening,blur}\n The type of operation to be performed on the images.\n in_args The list of image files or parameters. Refer to each operation's documentation of the expected arguments.\n out_image Output image path.\n\noptions:\n -h, --help show this help message and exit\n --data_type DATA_TYPE\n Data type of the output image. Use the format: \n uint8, int16, int/float32, int/float64.\n --exclude_background Does not affect the background of the original images.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "process", + "process" + ], + [ + "methods", + "method" + ], + [ + "region", + "regions", + "regions" + ], + [ + "positive", + "negative" + ], + [ + "supported", + "supported" + ], + [ + "image", + "image" + ], + [ + "high", + "high" + ], + [ + "algorithm", + "algorithm" + ], + [ + "applied", + "apply" + ], + [ + "positive", + "positive" + ], + [ + "random", + "random" + ], + [ + "Data", + "data", + "data" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "voxel", + "voxels" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "average", + "average" + ], + [ + "considered", + "considered" + ], + [ + "memory", + "memory" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "higher", + "higher" + ], + [ + "parameters", + "parameters" + ], + [ + "difference", + "difference" + ] + ], + "keywords": [] + }, + { + "name": "scil_volume_remove_outliers_ransac", + "docstring": "Remove outliers from image using the RANSAC algorithm.\nThe RANSAC algorithm parameters are sensitive to the input data.\n\nNOTE: Current default parameters are tuned for ad/md/rd images only.\n\nFormerly: scil_remove_outliers_ransac.py", + "help": "usage: scil_volume_remove_outliers_ransac.py [-h] [--min_fit MIN_FIT]\n [--max_iter MAX_ITER]\n [--fit_thr FIT_THR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image\n\nRemove outliers from image using the RANSAC algorithm.\nThe RANSAC algorithm parameters are sensitive to the input data.\n\nNOTE: Current default parameters are tuned for ad/md/rd images only.\n\nFormerly: scil_remove_outliers_ransac.py\n\npositional arguments:\n in_image Nifti image.\n out_image Corrected Nifti image.\n\noptions:\n -h, --help show this help message and exit\n --min_fit MIN_FIT The minimum number of data values required to fit the model. [50]\n --max_iter MAX_ITER The maximum number of iterations allowed in the algorithm. [1000]\n --fit_thr FIT_THR Threshold value for determining when a data point fits a model. [0.01]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "image", + "image" + ], + [ + "algorithm", + "algorithm" + ], + [ + "Data", + "data", + "data" + ], + [ + "threshold", + "thresholds", + "threshold" + ], + [ + "level", + "level" + ], + [ + "parameters", + "parameters" + ] + ], + "keywords": [] + }, + { + "name": "scil_volume_resample", + "docstring": "Script to resample a dataset to match the resolution of another\nreference dataset or to the resolution specified as in argument.\n\nFormerly: scil_resample_volume.py", + "help": "usage: scil_volume_resample.py [-h]\n (--ref REF | --volume_size VOLUME_SIZE [VOLUME_SIZE ...] | --voxel_size VOXEL_SIZE [VOXEL_SIZE ...] | --iso_min)\n [--interp {nn,lin,quad,cubic}]\n [--enforce_dimensions]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image\n\nScript to resample a dataset to match the resolution of another\nreference dataset or to the resolution specified as in argument.\n\nFormerly: scil_resample_volume.py\n\npositional arguments:\n in_image Path of the input volume.\n out_image Path of the resampled volume.\n\noptions:\n -h, --help show this help message and exit\n --ref REF Reference volume to resample to.\n --volume_size VOLUME_SIZE [VOLUME_SIZE ...]\n Sets the size for the volume. If the value is set to is Y, it will resample to a shape of Y x Y x Y.\n --voxel_size VOXEL_SIZE [VOXEL_SIZE ...]\n Sets the voxel size. If the value is set to is Y, it will set a voxel size of Y x Y x Y.\n --iso_min Resample the volume to R x R x R with R being the smallest current voxel dimension.\n --interp {nn,lin,quad,cubic}\n Interpolation mode.\n nn: nearest neighbour\n lin: linear\n quad: quadratic\n cubic: cubic\n Defaults to linear\n --enforce_dimensions Enforce the reference volume dimension.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "shape", + "shape" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ], + [ + "size", + "size" + ] + ], + "keywords": [] + }, + { + "name": "scil_volume_reshape_to_reference", + "docstring": "Reshape / reslice / resample *.nii or *.nii.gz using a reference.\nThis script can be used to align freesurfer/civet output, as .mgz,\nto the original input image.\n\n>>> scil_volume_reshape_to_reference.py wmparc.mgz t1.nii.gz wmparc_t1.nii.gz\\\n --interpolation nearest\n\nFormerly: scil_reshape_to_reference.py", + "help": "usage: scil_volume_reshape_to_reference.py [-h]\n [--interpolation {linear,nearest}]\n [--keep_dtype]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file in_ref_file out_file\n\nReshape / reslice / resample *.nii or *.nii.gz using a reference.\nThis script can be used to align freesurfer/civet output, as .mgz,\nto the original input image.\n\n>>> scil_volume_reshape_to_reference.py wmparc.mgz t1.nii.gz wmparc_t1.nii.gz\\\n --interpolation nearest\n\nFormerly: scil_reshape_to_reference.py\n\npositional arguments:\n in_file Path of the image (.nii or .mgz) to be reshaped.\n in_ref_file Path of the reference image (.nii).\n out_file Output filename of the reshaped image (.nii).\n\noptions:\n -h, --help show this help message and exit\n --interpolation {linear,nearest}\n Interpolation: \"linear\" or \"nearest\". [linear]\n --keep_dtype If True, keeps the data_type of the input image (in_file) when saving the output image (out_file).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "true", + "true" + ], + [ + "level", + "level" + ], + [ + "image", + "image" + ] + ], + "keywords": [] + }, + { + "name": "scil_volume_stats_in_ROI", + "docstring": "Compute the statistics (mean, std) of scalar maps, which can represent\ndiffusion metrics, in a ROI. Prints the results.\n\nThe mask can either be a binary mask, or a weighting mask. If the mask is\na weighting mask it should either contain floats between 0 and 1 or should be\nnormalized with --normalize_weights. IMPORTANT: if the mask contains weights\n(and not 0 and 1 exclusively), the standard deviation will also be weighted.", + "help": "usage: scil_volume_stats_in_ROI.py [-h]\n (--metrics_dir dir | --metrics file [file ...])\n [--bin] [--normalize_weights]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_mask\n\nCompute the statistics (mean, std) of scalar maps, which can represent\ndiffusion metrics, in a ROI. Prints the results.\n\nThe mask can either be a binary mask, or a weighting mask. If the mask is\na weighting mask it should either contain floats between 0 and 1 or should be\nnormalized with --normalize_weights. IMPORTANT: if the mask contains weights\n(and not 0 and 1 exclusively), the standard deviation will also be weighted.\n\npositional arguments:\n in_mask Mask volume filename.\n Can be a binary mask or a weighted mask.\n\noptions:\n -h, --help show this help message and exit\n --bin If set, will consider every value of the mask higherthan 0 to be \n part of the mask (equivalent weighting for every voxel).\n --normalize_weights If set, the weights will be normalized to the [0,1] range.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics input options:\n --metrics_dir dir Name of the directory containing metrics files: we will \n load all nifti files.\n --metrics file [file ...]\n Metrics nifti filename. List of the names of the metrics file, \n in nifti format.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", + "synonyms": [ + [ + "represent", + "represent" + ], + [ + "weighted", + "weighted" + ], + [ + "diffusion", + "diffusion" + ], + [ + "maps", + "maps" + ], + [ + "volume", + "volumes", + "volume" + ], + [ + "binary", + "binary" + ], + [ + "voxel", + "voxel" + ], + [ + "level", + "level" + ] + ], + "keywords": [] + }, + { + "name": "scil_volume_stats_in_labels", + "docstring": "Computes the information from the input map for each cortical region\n(corresponding to an atlas).\n\nHint: For instance, this script could be useful if you have a seed map from a\nspecific bundle, to know from which regions it originated.\n\nFormerly: scil_compute_seed_by_labels.py", + "help": "usage: scil_volume_stats_in_labels.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_labels in_labels_lut in_map\n\nComputes the information from the input map for each cortical region\n(corresponding to an atlas).\n\nHint: For instance, this script could be useful if you have a seed map from a\nspecific bundle, to know from which regions it originated.\n\nFormerly: scil_compute_seed_by_labels.py\n\npositional arguments:\n in_labels Path of the input label file.\n in_labels_lut Path of the LUT file corresponding to labels,used to name the regions of interest.\n in_map Path of the input map file. Expecting a 3D file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", + "synonyms": [ + [ + "region", + "regions", + "region" + ], + [ + "maps", + "map" + ], + [ + "region", + "regions", + "regions" + ], + [ + "bundles", + "bundle" + ], + [ + "atlas", + "atlas" + ], + [ + "cortex", + "cortical", + "parietal", + "cortical" + ], + [ + "level", + "level" + ], + [ + "specific", + "specific" + ] + ], + "keywords": [] + } + ] +} \ No newline at end of file diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index b4cf77575..21c294271 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -23,7 +23,8 @@ import nltk from nltk.stem import PorterStemmer from colorama import init, Fore, Style - +import json +from pathlib import Path from scilpy.io.utils import add_verbose_arg nltk.download('punkt', quiet=True) @@ -36,6 +37,9 @@ SPACING_CHAR = '=' SPACING_LEN = 80 +BASE_DIR = Path(__file__).parent.parent +JSON_FILE_PATH = BASE_DIR / 'scilpy-bot-scripts'/'json_files'/'knowledge_base.json' + stemmer = PorterStemmer() def _build_arg_parser(): @@ -148,7 +152,19 @@ def main(): if not matches: - logging.info(_make_title(' No results found! ')) + logging.info(_make_title(' No such keyword found! Let\'s look for synonyms... ')) + scripts = load_json(JSON_FILE_PATH) + # Search for synonyms if no matches found + matches = search_keywords_in_synonyms(scripts, args.keywords) + if matches: + logging.info(f"Found {len(matches)} scripts with matching synonyms. Displaying first 5:") + for match in matches[:5]: + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match['name']}{Style.RESET_ALL}") + display_short_info, _ = _split_first_sentence(match.get('docstring', 'No docstring available!')) + display_short_info = _highlight_keywords(display_short_info, stemmed_keywords) + logging.info(display_short_info) + else: + logging.info(_make_title(' No results found in synonyms either! ')) def _make_title(text): @@ -264,5 +280,18 @@ def _highlight_keywords(text, stemmed_keywords): highlighted_text.append(word) return ' '.join(highlighted_text) +def load_json(json_filepath): + with open(json_filepath, 'r', encoding='utf-8') as file: + return json.load(file) + +def search_keywords_in_synonyms(scripts, keywords): + matches = [] + for script in scripts['scripts']: + for synonym_list in script.get('synonyms', []): + if any(keyword in synonym_list for keyword in keywords): + matches.append(script) + break + return matches + if __name__ == '__main__': main() From 872e5a4e476ee605cb4a2e8016ac7f46b12bec15 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Fri, 21 Jun 2024 12:48:14 -0400 Subject: [PATCH 10/69] caddsynonyms-,keywords and acronyms json files --- scripts/scil_search_keywords.py | 133 +++++++++++++++----------------- 1 file changed, 64 insertions(+), 69 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 21c294271..6d6c4e541 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -17,14 +17,11 @@ import ast import logging import pathlib -import re import subprocess -import numpy as np import nltk from nltk.stem import PorterStemmer from colorama import init, Fore, Style -import json -from pathlib import Path + from scilpy.io.utils import add_verbose_arg nltk.download('punkt', quiet=True) @@ -37,9 +34,6 @@ SPACING_CHAR = '=' SPACING_LEN = 80 -BASE_DIR = Path(__file__).parent.parent -JSON_FILE_PATH = BASE_DIR / 'scilpy-bot-scripts'/'json_files'/'knowledge_base.json' - stemmer = PorterStemmer() def _build_arg_parser(): @@ -68,7 +62,6 @@ def main(): stemmed_keywords = _stem_keywords(args.keywords) - # Use directory of this script, should work with most installation setups script_dir = pathlib.Path(__file__).parent hidden_dir = script_dir / '.hidden' @@ -83,9 +76,6 @@ def main(): matches = [] - keywords_regexes = [re.compile('(' + re.escape(kw) + ')', re.IGNORECASE) - for kw in args.keywords] - # Search through the argparser instead of the docstring if args.search_parser: #Use precomputed help files @@ -106,7 +96,7 @@ def main(): display_short_info, display_long_info = _split_first_sentence( search_text) - # Highlight found keywords using colorama + # Highlight found keywords for keyword in args.keywords: display_short_info = display_short_info.replace(keyword, f'{Fore.RED}{Style.BRIGHT}{keyword}{Style.RESET_ALL}') display_long_info = display_long_info.replace(keyword, f'{Fore.RED}{Style.BRIGHT}{keyword}{Style.RESET_ALL}') @@ -152,53 +142,13 @@ def main(): if not matches: - logging.info(_make_title(' No such keyword found! Let\'s look for synonyms... ')) - scripts = load_json(JSON_FILE_PATH) - # Search for synonyms if no matches found - matches = search_keywords_in_synonyms(scripts, args.keywords) - if matches: - logging.info(f"Found {len(matches)} scripts with matching synonyms. Displaying first 5:") - for match in matches[:5]: - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match['name']}{Style.RESET_ALL}") - display_short_info, _ = _split_first_sentence(match.get('docstring', 'No docstring available!')) - display_short_info = _highlight_keywords(display_short_info, stemmed_keywords) - logging.info(display_short_info) - else: - logging.info(_make_title(' No results found in synonyms either! ')) + logging.info(_make_title(' No results found! ')) def _make_title(text): return f'{Fore.BLUE}{Style.BRIGHT}{text.center(SPACING_LEN, SPACING_CHAR)}{Style.RESET_ALL}' -def _test_matching_keywords(keywords, texts): - """Test multiple texts for matching keywords. Returns True only if all - keywords are present in any of the texts. - - Parameters - ---------- - keywords : Iterable of str - Keywords to test for. - texts : Iterable of str - Strings that should contain the keywords. - - Returns - ------- - True if all keywords were found in at least one of the texts. - - """ - matches = [] - for key in keywords: - key_match = False - for text in texts: - if key.lower() in text.lower(): - key_match = True - break - matches.append(key_match) - - return np.all(matches) - - def _get_docstring_from_script_path(script): """Extract a python file's docstring from a filepath. @@ -250,26 +200,84 @@ def _split_first_sentence(text): return sentence, remaining def _stem_keywords(keywords): + """ + Stem a list of keywords using PorterStemmer. + + Parameters + ---------- + keywords : list of str + Keywords to be stemmed. + + Returns + ------- + list of str + Stemmed keywords. + """ return [stemmer.stem(keyword) for keyword in keywords] def _stem_text(text): + """ + Stem all words in a text using PorterStemmer. + + Parameters + ---------- + text : str + Text to be stemmed. + + Returns + ------- + str + Stemmed text. + """ words = nltk.word_tokenize(text) return ' '.join([stemmer.stem(word) for word in words]) def _contains_stemmed_keywords(stemmed_keywords,text, filename): + """ + Check if stemmed keywords are present in the text or filename. + + Parameters + ---------- + stemmed_keywords : list of str + Stemmed keywords to search for. + text : str + Text to search within. + filename : str + Filename to search within. + + Returns + ------- + bool + True if all stemmed keywords are found in the text or filename, False otherwise. + """ stemmed_text = _stem_text(text) stemmed_filename = _stem_text(filename) return all([stem in stemmed_text or stem in stemmed_filename for stem in stemmed_keywords]) def _generate_help_files(): - """Call the external script generate_help_files to generate help files + """ + Call the external script generate_help_files to generate help files """ script_path = pathlib.Path(__file__).parent.parent / 'scilpy-bot-scripts'/'generate_help_files.py' #calling the extrernal script generate_help_files subprocess.run(['python', script_path], check=True) def _highlight_keywords(text, stemmed_keywords): - """Highlight the stemmed keywords in the given text using colorama.""" + """ + Highlight the stemmed keywords in the given text using colorama. + + Parameters + ---------- + text : str + Text to highlight keywords in. + stemmed_keywords : list of str + Stemmed keywords to highlight. + + Returns + ------- + str + Text with highlighted keywords. + """ words = text.split() highlighted_text = [] for word in words: @@ -280,18 +288,5 @@ def _highlight_keywords(text, stemmed_keywords): highlighted_text.append(word) return ' '.join(highlighted_text) -def load_json(json_filepath): - with open(json_filepath, 'r', encoding='utf-8') as file: - return json.load(file) - -def search_keywords_in_synonyms(scripts, keywords): - matches = [] - for script in scripts['scripts']: - for synonym_list in script.get('synonyms', []): - if any(keyword in synonym_list for keyword in keywords): - matches.append(script) - break - return matches - if __name__ == '__main__': - main() + main() \ No newline at end of file From 0ea271ae3bacba42c0575808e0b73a430d56b5c6 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Wed, 26 Jun 2024 11:50:30 -0400 Subject: [PATCH 11/69] Add the search in keywords json file --- scripts/scil_search_keywords.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 6d6c4e541..5aad235e2 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -21,6 +21,7 @@ import nltk from nltk.stem import PorterStemmer from colorama import init, Fore, Style +import json from scilpy.io.utils import add_verbose_arg @@ -36,6 +37,9 @@ stemmer = PorterStemmer() +# Path to the JSON file containing script information and keywords +JSON_FILE_PATH = pathlib.Path(__file__).parent.parent / 'scilpy-bot-scripts'/'Vocabulary'/'Keywords.json' + def _build_arg_parser(): p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) @@ -140,7 +144,19 @@ def main(): logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info("\n") - + # If no matches found, check in the keywords file + # Load keywords from the JSON file + with open(JSON_FILE_PATH, 'r') as f: + keywords_data = json.load(f) + + if not matches: + for script in keywords_data['scripts']: + script_name = script['name'] + script_keywords = script['keywords'] + if all([stem in _stem_text(' '.join(script_keywords)) for stem in stemmed_keywords]): + matches.append(script_name) + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{script_name}{Style.RESET_ALL}") + if not matches: logging.info(_make_title(' No results found! ')) From 7c8c7ee077cb91239c56d1d5361ac5a8073a0001 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Mon, 1 Jul 2024 21:29:46 -0400 Subject: [PATCH 12/69] clean code --- scripts/scil_search_keywords.py | 51 ++++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 4 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 5aad235e2..b6dac857d 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -38,7 +38,8 @@ stemmer = PorterStemmer() # Path to the JSON file containing script information and keywords -JSON_FILE_PATH = pathlib.Path(__file__).parent.parent / 'scilpy-bot-scripts'/'Vocabulary'/'Keywords.json' +KEYWORDS_FILE_PATH = pathlib.Path(__file__).parent.parent / 'scilpy-bot-scripts'/'Vocabulary'/'Keywords.json' +SYNONYMS_FILE_PATH = pathlib.Path(__file__).parent.parent / 'scilpy-bot-scripts'/'Vocabulary'/'Synonyms.json' def _build_arg_parser(): p = argparse.ArgumentParser(description=__doc__, @@ -145,18 +146,39 @@ def main(): logging.info("\n") # If no matches found, check in the keywords file - # Load keywords from the JSON file - with open(JSON_FILE_PATH, 'r') as f: + with open(KEYWORDS_FILE_PATH, 'r') as f: keywords_data = json.load(f) if not matches: + print("search by scripts keywords") for script in keywords_data['scripts']: script_name = script['name'] script_keywords = script['keywords'] if all([stem in _stem_text(' '.join(script_keywords)) for stem in stemmed_keywords]): matches.append(script_name) logging.info(f"{Fore.BLUE}{Style.BRIGHT}{script_name}{Style.RESET_ALL}") - + + + + # If still no matches found, check for synonyms in the synonyms file + with open(SYNONYMS_FILE_PATH, 'r') as f: + synonyms_data = json.load(f) + + if not matches: + for keyword in args.keywords: + synonyms = _get_synonyms(keyword, synonyms_data) + for script in sorted(script_dir.glob('*.py')): + filename = script.stem + if filename == '__init__' or filename == 'scil_search_keywords': + continue + search_text = _get_docstring_from_script_path(str(script)) + if any(synonym in search_text for synonym in synonyms): + matches.append(filename) + first_sentence, _ = _split_first_sentence(search_text) + logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{filename}{Style.RESET_ALL}: {first_sentence}") + logging.info("\n") + if not matches: logging.info(_make_title(' No results found! ')) @@ -304,5 +326,26 @@ def _highlight_keywords(text, stemmed_keywords): highlighted_text.append(word) return ' '.join(highlighted_text) +def _get_synonyms(keyword, synonyms_data): + """ + Get synonyms for a given keyword from the synonyms data. + + Parameters + ---------- + keyword : str + Keyword to find synonyms for. + synonyms_data : dict + Dictionary containing synonyms data. + + Returns + ------- + list of str + List of synonyms for the given keyword. + """ + for synonym_set in synonyms_data['synonyms']: + if keyword in synonym_set: + return synonym_set + return [] + if __name__ == '__main__': main() \ No newline at end of file From 542e8c8ee12cc654adaa254ac047d533bd2c9547 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Mon, 1 Jul 2024 21:30:22 -0400 Subject: [PATCH 13/69] clean code --- scilpy-bot-scripts/Vocabulary/Keywords.json | 1061 + scilpy-bot-scripts/Vocabulary/Synonyms.json | 55 + .../{json_files => Vocabulary}/acronyms.json | 0 scilpy-bot-scripts/contextual_search.py | 60 - scilpy-bot-scripts/generate_help_files.py | 19 +- scilpy-bot-scripts/generate_json_files.py | 84 - scilpy-bot-scripts/generate_synonyms.py | 90 - .../json_files/Scilpy_vocabulary.txt | 431 - .../json_files/knowledge_base.json | 44237 ---------------- .../json_files/knowledge_base_word2vec.json | 9715 ---- 10 files changed, 1127 insertions(+), 54625 deletions(-) create mode 100644 scilpy-bot-scripts/Vocabulary/Keywords.json create mode 100644 scilpy-bot-scripts/Vocabulary/Synonyms.json rename scilpy-bot-scripts/{json_files => Vocabulary}/acronyms.json (100%) delete mode 100644 scilpy-bot-scripts/contextual_search.py mode change 100755 => 100644 scilpy-bot-scripts/generate_help_files.py delete mode 100644 scilpy-bot-scripts/generate_json_files.py delete mode 100644 scilpy-bot-scripts/generate_synonyms.py delete mode 100644 scilpy-bot-scripts/json_files/Scilpy_vocabulary.txt delete mode 100644 scilpy-bot-scripts/json_files/knowledge_base.json delete mode 100644 scilpy-bot-scripts/json_files/knowledge_base_word2vec.json diff --git a/scilpy-bot-scripts/Vocabulary/Keywords.json b/scilpy-bot-scripts/Vocabulary/Keywords.json new file mode 100644 index 000000000..98875e3bd --- /dev/null +++ b/scilpy-bot-scripts/Vocabulary/Keywords.json @@ -0,0 +1,1061 @@ +{ + "scripts": [ + { + "name": "scil_bids_validate.py", + "keywords": [] + }, + { + "name": "scil_bingham_metrics.py", + "keywords": [ + "fiber density", + "fiber spread", + "fiber fraction", + "fixel" + ] + }, + { + "name": "scil_btensor_metrics.py", + "keywords": [ + "b-tensor", + "b-tensor encoding", + "tensor-valued diffusion MRI", + "micro-FA", + "uFA", + "order parameter", + "OP", + "DIVIDE", + "microstructure", + "linear tensor encoding (LTE)", + "planar tensor encoding (PTE)", + "spherical tensor encoding (STE)", + "multidimensional diffusion MRI" + ] + }, + { + "name": "scil_bundle_clean_qbx_clusters.py", + "keywords": [] + }, + { + "name": "scil_bundle_compute_centroid.py", + "keywords": [] + }, + { + "name": "scil_bundle_compute_endpoints_map.py", + "keywords": [] + }, + { + "name": "scil_bundle_diameter.py", + "keywords": [] + }, + { + "name": "scil_bundle_filter_by_occurence.py", + "keywords": [] + }, + { + "name": "scil_bundle_generate_priors.py", + "keywords": [] + }, + { + "name": "scil_bundle_label_map.py", + "keywords": [ + "parcellate", + "subdivide", + "split" + ] + }, + { + "name": "scil_bundle_mean_fixel_afd_from_hdf5.py", + "keywords": [] + }, + { + "name": "scil_bundle_mean_fixel_afd.py", + "keywords": [] + }, + { + "name": "scil_bundle_mean_fixel_bingham_metric.py", + "keywords": [ + "tractometry", + "lobe metrics", + "fiber density", + "fiber spread", + "fiber fraction", + "mean along bundle" + ] + }, + { + "name": "scil_bundle_mean_std.py", + "keywords": [] + }, + { + "name": "scil_bundle_pairwise_comparison.py", + "keywords": [] + }, + { + "name": "scil_bundle_reject_outliers.py", + "keywords": [] + }, + { + "name": "scil_bundle_score_many_bundles_one_tractogram.py", + "keywords": [] + }, + { + "name": "scil_bundle_score_same_bundle_many_segmentations.py", + "keywords": [] + }, + { + "name": "scil_bundle_shape_measures.py", + "keywords": [ + "geometry" + ] + }, + { + "name": "scil_bundle_uniformize_endpoints.py", + "keywords": [] + }, + { + "name": "scil_bundle_volume_per_label.py", + "keywords": [] + }, + { + "name": "scil_connectivity_compare_populations.py", + "keywords": [] + }, + { + "name": "scil_connectivity_compute_matrices.py", + "keywords": [] + }, + { + "name": "scil_connectivity_compute_pca.py", + "keywords": [] + }, + { + "name": "scil_connectivity_filter.py", + "keywords": [] + }, + { + "name": "scil_connectivity_graph_measures.py", + "keywords": [] + }, + { + "name": "scil_connectivity_hdf5_average_density_map.py", + "keywords": [] + }, + { + "name": "scil_connectivity_math.py", + "keywords": [] + }, + { + "name": "scil_connectivity_normalize.py", + "keywords": [] + }, + { + "name": "scil_connectivity_pairwise_agreement.py", + "keywords": [] + }, + { + "name": "scil_connectivity_print_filenames.py", + "keywords": [] + }, + { + "name": "scil_connectivity_reorder_rois.py", + "keywords": [] + }, + { + "name": "scil_denoising_nlmeans.py", + "keywords": [] + }, + { + "name": "scil_dki_metrics.py", + "keywords": [] + }, + { + "name": "scil_dti_convert_tensors.py", + "keywords": [ + "tensor", + "tensors", + "triangular matrix", + "fsl format", + "nifti format", + "mrtrix format", + "dipy format" + ] + }, + { + "name": "scil_dti_metrics.py", + "keywords": [ + "dti", + "metrics", + "diffusion tensor", + "FA", + "MD", + "AD", + "RD", + "RGB", + "eigenvector", + "eigenvalue", + "diffusivity" + ] + }, + { + "name": "scil_dwi_apply_bias_field.py", + "keywords": [] + }, + { + "name": "scil_dwi_compute_snr.py", + "keywords": [] + }, + { + "name": "scil_dwi_concatenate.py", + "keywords": [ + "merge", + "fuse", + "concatenate", + "diffusion data", + "DWI" + ] + }, + { + "name": "scil_dwi_convert_FDF.py", + "keywords": [] + }, + { + "name": "scil_dwi_detect_volume_outliers.py", + "keywords": [] + }, + { + "name": "scil_dwi_extract_b0.py", + "keywords": [ + "b0 extraction", + "b0", + "b-value 0", + "extract" + ] + }, + { + "name": "scil_dwi_extract_shell.py", + "keywords": [ + "shell extraction", + "b-value extraction", + "DWI", + "shell/b-value selection", + "extract", + "DWI split", + "DWI splitting", + "multiple shells" + ] + }, + { + "name": "scil_dwi_powder_average.py", + "keywords": [ + "powder average", + "DWI" + ] + }, + { + "name": "scil_dwi_prepare_eddy_command.py", + "keywords": [] + }, + { + "name": "scil_dwi_prepare_topup_command.py", + "keywords": [] + }, + { + "name": "scil_dwi_reorder_philips.py", + "keywords": [ + "Philips", + "DWI reorder", + "original gradient table" + ] + }, + { + "name": "scil_dwi_split_by_indices.py", + "keywords": [ + "DWI splitting", + "DWI split", + "indices" + ] + }, + { + "name": "scil_dwi_to_sh.py", + "keywords": [ + "signal", + "spherical harmonics" + ] + }, + { + "name": "scil_fodf_max_in_ventricles.py", + "keywords": [ + "ventricles", + "maximum fODF", + "absolute threshold" + ] + }, + { + "name": "scil_fodf_memsmt.py", + "keywords": [ + "b-tensor", + "b-tensor encoding", + "multi-encoding", + "multi-shell", + "multi-tissue", + "memsmt", + "linear tensor encoding (LTE)", + "planar tensor encoding (PTE)", + "spherical tensor encoding (STE)", + "multidimensional diffusion MRI", + "volume fraction", + "CSD", + "constrained spherical deconvolution", + "fODF" + ] + }, + { + "name": "scil_fodf_metrics.py", + "keywords": [ + "fODF metrics", + "NuFO", + "peaks", + "directions", + "peak values", + "peak indices", + "rgb", + "afd" + ] + }, + { + "name": "scil_fodf_msmt.py", + "keywords": [ + "CSD", + "constrained spherical deconvolution", + "multi-shell", + "multi-tissue", + "msmt", + "volume fraction", + "fODF" + ] + }, + { + "name": "scil_fodf_ssst.py", + "keywords": [ + "CSD", + "constrained spherical deconvolution", + "single-shell", + "single-tissue", + "ssst", + "fODF" + ] + }, + { + "name": "scil_fodf_to_bingham.py", + "keywords": [ + "lobe", + "lobe-specific", + "bingham-odf" + ] + }, + { + "name": "scil_freewater_maps.py", + "keywords": [] + }, + { + "name": "scil_freewater_priors.py", + "keywords": [] + }, + { + "name": "scil_frf_mean.py", + "keywords": [ + "fiber response function", + "response function", + "RF", + "FRF", + "mean", + "mean FRF" + ] + }, + { + "name": "scil_frf_memsmt.py", + "keywords": [ + "fiber response function", + "response function", + "RF", + "FRF", + "b-tensor", + "b-tensor encoding", + "multi-encoding", + "multi-shell", + "multi-tissue", + "memsmt", + "linear tensor encoding (LTE)", + "planar tensor encoding (PTE)", + "spherical tensor encoding (STE)", + "multidimensional diffusion MRI" + ] + }, + { + "name": "scil_frf_msmt.py", + "keywords": [ + "fiber response function", + "response function", + "RF", + "FRF", + "multi-shell", + "multi-tissue", + "msmt" + ] + }, + { + "name": "scil_frf_set_diffusivities.py", + "keywords": [ + "fiber response function", + "response function", + "RF", + "FRF", + "diffusivity", + "diffusivities", + "fixed FRF" + ] + }, + { + "name": "scil_frf_ssst.py", + "keywords": [ + "fiber response function", + "response function", + "RF", + "FRF", + "single-shell", + "single-tissue", + "ssst" + ] + }, + { + "name": "scil_get_version.py", + "keywords": [] + }, + { + "name": "scil_gradients_apply_transform.py", + "keywords": [ + "gradients", + "bvecs", + "b-vectors", + "transformation", + "transform" + ] + }, + { + "name": "scil_gradients_convert.py", + "keywords": [ + "gradients", + "gradient table", + "fsl format", + "mrtrix format", + "bval", + "bvec" + ] + }, + { + "name": "scil_gradients_generate_sampling.py", + "keywords": [ + "gradients", + "gradient table", + "sampling scheme", + "sampling", + "hardi", + "multi-shell", + "caruyer", + "optimized gradients" + ] + }, + { + "name": "scil_gradients_modify_axes.py", + "keywords": [] + }, + { + "name": "scil_gradients_round_bvals.py", + "keywords": [ + "bvals", + "b-value", + "round bvals", + "shell" + ] + }, + { + "name": "scil_gradients_validate_correct_eddy.py", + "keywords": [] + }, + { + "name": "scil_gradients_validate_correct.py", + "keywords": [ + "fiber coherence index", + "coherence" + ] + }, + { + "name": "scil_header_print_info.py", + "keywords": [] + }, + { + "name": "scil_header_validate_compatibility.py", + "keywords": [] + }, + { + "name": "scil_json_convert_entries_to_xlsx.py", + "keywords": [] + }, + { + "name": "scil_json_harmonize_entries.py", + "keywords": [] + }, + { + "name": "scil_json_merge_entries.py", + "keywords": [] + }, + { + "name": "scil_labels_combine.py", + "keywords": [] + }, + { + "name": "scil_labels_dilate.py", + "keywords": [] + }, + { + "name": "scil_labels_remove.py", + "keywords": [] + }, + { + "name": "scil_labels_split_volume_by_ids.py", + "keywords": [] + }, + { + "name": "scil_labels_split_volume_from_lut.py", + "keywords": [] + }, + { + "name": "scil_lesions_info.py", + "keywords": [] + }, + { + "name": "scil_mti_adjust_B1_header.py", + "keywords": [ + "MTI", + "magnetization transfer", + "MT", + "B1 map", + "header", + "B1" + ] + }, + { + "name": "scil_mti_maps_ihMT.py", + "keywords": [ + "MTI", + "magnetization transfer", + "MT", + "ihMT", + "ihMTR", + "ihMTsat", + "myelin", + "MTR", + "MTsat" + ] + }, + { + "name": "scil_mti_maps_MT.py", + "keywords": [ + "MTI", + "magnetization transfer", + "MT", + "MTR", + "MTsat", + "myelin" + ] + }, + { + "name": "scil_NODDI_maps.py", + "keywords": [] + }, + { + "name": "scil_NODDI_priors.py", + "keywords": [] + }, + { + "name": "scil_plot_stats_per_point.py", + "keywords": [] + }, + { + "name": "scil_qball_metrics.py", + "keywords": [ + "CSA", + "QBI", + "q-ball imaging", + "diffusion odf" + ] + }, + { + "name": "scil_rgb_convert.py", + "keywords": [] + }, + { + "name": "scil_sh_convert.py", + "keywords": [ + "spherical harmonics", + "tournier", + "mrtrix", + "descoteaux", + "dipy", + "modern", + "legacy" + ] + }, + { + "name": "scil_sh_fusion.py", + "keywords": [ + "spherical harmonics", + "SH", + "fusion", + "largest magnitude", + "merge", + "coefficients" + ] + }, + { + "name": "scil_sh_to_aodf.py", + "keywords": [ + "asymmetric", + "asymmetries", + "filtering", + "full basis" + ] + }, + { + "name": "scil_sh_to_rish.py", + "keywords": [ + "rotation invariant spherical harmonics", + "features" + ] + }, + { + "name": "scil_sh_to_sf.py", + "keywords": [ + "spherical harmonics", + "spherical functions", + "SH", + "SF", + "convertion", + "conversion" + ] + }, + { + "name": "scil_stats_group_comparison.py", + "keywords": [] + }, + { + "name": "scil_surface_apply_transform.py", + "keywords": [ + "registration", + "warp", + "transformation", + "surface", + "mesh", + "vtk FreeSurfer" + ] + }, + { + "name": "scil_surface_convert.py", + "keywords": [ + "surface", + "mesh", + "vtk FreeSurfer" + ] + }, + { + "name": "scil_surface_flip.py", + "keywords": [ + "surface", + "mesh", + "vtk FreeSurfer" + ] + }, + { + "name": "scil_surface_smooth.py", + "keywords": [ + "surface", + "mesh", + "vtk FreeSurfer" + ] + }, + { + "name": "scil_tracking_local_dev.py", + "keywords": [ + "development", + "runge-kutta", + "pure-python", + "onboarding", + "tractography", + "dipy" + ] + }, + { + "name": "scil_tracking_local.py", + "keywords": [ + "eudx", + "tractography", + "tracking", + "peak tracking", + "local tracking", + "probabilistic", + "deterministic", + "prob", + "det" + ] + }, + { + "name": "scil_tracking_pft_maps_edit.py", + "keywords": [ + "particule filtering tractography", + "cmc" + ] + }, + { + "name": "scil_tracking_pft_maps.py", + "keywords": [ + "particle filter tractography", + "continuous map criterion", + "tracking", + "fodf", + "cmc", + "particle filtering tractography" + ] + }, + { + "name": "scil_tracking_pft.py", + "keywords": [ + "particle filter tractography", + "continuous map criterion", + "tracking", + "fodf" + ] + }, + { + "name": "scil_tractogram_alter.py", + "keywords": [] + }, + { + "name": "scil_tractogram_apply_transform.py", + "keywords": [ + "ants", + "registration", + "affine", + "linear", + "nonlinear" + ] + }, + { + "name": "scil_tractogram_apply_transform_to_hdf5.py", + "keywords": [] + }, + { + "name": "scil_tractogram_assign_custom_color.py", + "keywords": [] + }, + { + "name": "scil_tractogram_assign_uniform_color.py", + "keywords": [] + }, + { + "name": "scil_tractogram_commit.py", + "keywords": [ + "microstructure informed", + "filtering", + "mit" + ] + }, + { + "name": "scil_tractogram_compress.py", + "keywords": [] + }, + { + "name": "scil_tractogram_compute_density_map.py", + "keywords": [ + "TDI", + "track density imaging", + "streamline count" + ] + }, + { + "name": "scil_tractogram_compute_TODI.py", + "keywords": [ + "track orientation density imaging", + "track density imaging", + "TDI" + ] + }, + { + "name": "scil_tractogram_convert_hdf5_to_trk.py", + "keywords": [] + }, + { + "name": "scil_tractogram_convert.py", + "keywords": [] + }, + { + "name": "scil_tractogram_count_streamlines.py", + "keywords": [] + }, + { + "name": "scil_tractogram_cut_streamlines.py", + "keywords": [] + }, + { + "name": "scil_tractogram_detect_loops.py", + "keywords": [] + }, + { + "name": "scil_tractogram_dpp_math.py", + "keywords": [ + "tractogram", + "data per point" + ] + }, + { + "name": "scil_tractogram_extract_ushape.py", + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_anatomy.py", + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_length.py", + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_orientation.py", + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_roi.py", + "keywords": [ + "segment", + "atlas" + ] + }, + { + "name": "scil_tractogram_fix_trk.py", + "keywords": [] + }, + { + "name": "scil_tractogram_flip.py", + "keywords": [] + }, + { + "name": "scil_tractogram_math.py", + "keywords": [] + }, + { + "name": "scil_tractogram_pairwise_comparison.py", + "keywords": [] + }, + { + "name": "scil_tractogram_print_info.py", + "keywords": [] + }, + { + "name": "scil_tractogram_project_map_to_streamlines.py", + "keywords": [] + }, + { + "name": "scil_tractogram_project_streamlines_to_map.py", + "keywords": [] + }, + { + "name": "scil_tractogram_qbx.py", + "keywords": [ + "clustering" + ] + }, + { + "name": "scil_tractogram_register.py", + "keywords": [] + }, + { + "name": "scil_tractogram_remove_invalid.py", + "keywords": [] + }, + { + "name": "scil_tractogram_resample_nb_points.py", + "keywords": [] + }, + { + "name": "scil_tractogram_resample.py", + "keywords": [] + }, + { + "name": "scil_tractogram_seed_density_map.py", + "keywords": [] + }, + { + "name": "scil_tractogram_segment_and_score.py", + "keywords": [] + }, + { + "name": "scil_tractogram_segment_bundles_for_connectivity.py", + "keywords": [] + }, + { + "name": "scil_tractogram_segment_bundles.py", + "keywords": [] + }, + { + "name": "scil_tractogram_segment_one_bundle.py", + "keywords": [] + }, + { + "name": "scil_tractogram_shuffle.py", + "keywords": [] + }, + { + "name": "scil_tractogram_smooth.py", + "keywords": [] + }, + { + "name": "scil_tractogram_split.py", + "keywords": [] + }, + { + "name": "scil_viz_bingham_fit.py", + "keywords": [ + "visualisation", + "bingham distributions", + "bingham odf" + ] + }, + { + "name": "scil_viz_bundle.py", + "keywords": [ + "visualisation", + "bundle", + "tractogram", + "streamlines" + ] + }, + { + "name": "scil_viz_bundle_screenshot_mni.py", + "keywords": [] + }, + { + "name": "scil_viz_bundle_screenshot_mosaic.py", + "keywords": [] + }, + { + "name": "scil_viz_connectivity.py", + "keywords": [] + }, + { + "name": "scil_viz_dti_screenshot.py", + "keywords": [] + }, + { + "name": "scil_viz_fodf.py", + "keywords": [ + "visualize", + "fiber odf", + "odf", + "sh", + "peaks", + "background" + ] + }, + { + "name": "scil_viz_gradients_screenshot.py", + "keywords": [] + }, + { + "name": "scil_viz_tractogram_seeds_3d.py", + "keywords": [ + "visualize", + "seed", + "density", + "3D", + "seed density" + ] + }, + { + "name": "scil_viz_tractogram_seeds.py", + "keywords": [ + "visualize", + "seed", + "streamline", + "streamline origin" + ] + }, + { + "name": "scil_viz_volume_histogram.py", + "keywords": [ + "visualize", + "histogram", + "metric" + ] + }, + { + "name": "scil_viz_volume_scatterplot.py", + "keywords": [ + "visualize", + "scatterplot", + "distribution", + "metric" + ] + }, + { + "name": "scil_viz_volume_screenshot_mosaic.py", + "keywords": [] + }, + { + "name": "scil_viz_volume_screenshot.py", + "keywords": [] + }, + { + "name": "scil_volume_apply_transform.py", + "keywords": [] + }, + { + "name": "scil_volume_b0_synthesis.py", + "keywords": [] + }, + { + "name": "scil_volume_count_non_zero_voxels.py", + "keywords": [] + }, + { + "name": "scil_volume_crop.py", + "keywords": [] + }, + { + "name": "scil_volume_flip.py", + "keywords": [] + }, + { + "name": "scil_volume_math.py", + "keywords": [] + }, + { + "name": "scil_volume_remove_outliers_ransac.py", + "keywords": [] + }, + { + "name": "scil_volume_resample.py", + "keywords": [] + }, + { + "name": "scil_volume_reshape_to_reference.py", + "keywords": [] + }, + { + "name": "scil_volume_stats_in_labels.py", + "keywords": [] + }, + { + "name": "scil_volume_stats_in_ROI.py", + "keywords": [] + } + ] +} \ No newline at end of file diff --git a/scilpy-bot-scripts/Vocabulary/Synonyms.json b/scilpy-bot-scripts/Vocabulary/Synonyms.json new file mode 100644 index 000000000..ffad9b9d8 --- /dev/null +++ b/scilpy-bot-scripts/Vocabulary/Synonyms.json @@ -0,0 +1,55 @@ +{ + "synonyms": [ + [ + "Bundle", + "tract", + "pathway", + "fasciculus" + ], + [ + "multi-shells", + "multishell", + "multi shell", + "msmt" + ], + [ + "single-shell", + "single shell", + "ssst" + ], + [ + "ODF", + "Orientation Distribution Function" + ], + [ + "DWI", + "Diffusion Weighted Imaging" + ], + [ + "shell", + "bval", + "b-value", + "bvals" + ], + [ + "b-tensor encoding", + "tensor-valued" + ], + [ + "surface", + "mesh" + ], + [ + "merge", + "fuse", + "concatenate", + "add" + ], + [ + "parcellate", + "subdivide", + "split", + "divide" + ] + ] + } \ No newline at end of file diff --git a/scilpy-bot-scripts/json_files/acronyms.json b/scilpy-bot-scripts/Vocabulary/acronyms.json similarity index 100% rename from scilpy-bot-scripts/json_files/acronyms.json rename to scilpy-bot-scripts/Vocabulary/acronyms.json diff --git a/scilpy-bot-scripts/contextual_search.py b/scilpy-bot-scripts/contextual_search.py deleted file mode 100644 index 5890f80aa..000000000 --- a/scilpy-bot-scripts/contextual_search.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import json -import spacy -from pathlib import Path - -# Initialize SpaCy -nlp = spacy.load('en_core_web_md') - -def load_knowledge_base(json_file): - """Load the knowledge base from a JSON file.""" - with open(json_file, 'r') as f: - knowledge_base = json.load(f) - return knowledge_base - -def contextual_search(query, knowledge_base, threshold=0.1): - """Perform a contextual search based on the user query.""" - query_doc = nlp(query) - best_match = None - highest_similarity = 0 - - for script in knowledge_base['scripts']: - # Combine docstring, help text, synonyms, and keywords for better matching - description = ( - script['docstring'] + ' ' + script['help'] + ' ' + - ' '.join(script['synonyms']) + ' ' + ' '.join(script['keywords']) - ) - description_doc = nlp(description) - similarity = query_doc.similarity(description_doc) - if similarity > highest_similarity and similarity>threshold: - highest_similarity = similarity - best_match = script - - return best_match, highest_similarity - -def main(): - base_dir = Path(__file__).parent - - json_file = base_dir / 'json_files' / 'knowledge_base.json' - - # Load the knowledge base from JSON file - knowledge_base = load_knowledge_base(json_file) - - # Example user query - query = "I need a script that computes the SH coefficient directly on the raw DWI signal." - - - # Perform contextual search - best_match, similarity = contextual_search(query, knowledge_base) - - if best_match: - print(f"The best match is {best_match['name']} with a similarity score of {similarity:.2f}") - print(f"Docstring: {best_match['docstring']}") - print(f"Help: {best_match['help']}") - else: - print("No relevant script found.") - -if __name__ == '__main__': - main() diff --git a/scilpy-bot-scripts/generate_help_files.py b/scilpy-bot-scripts/generate_help_files.py old mode 100755 new mode 100644 index ee281a343..5776c24fb --- a/scilpy-bot-scripts/generate_help_files.py +++ b/scilpy-bot-scripts/generate_help_files.py @@ -1,25 +1,24 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ -This script iterates over all Python scripts in the 'scripts' directory, runs each script with the '--h' flag to generate help text, and saves the output to corresponding hidden files in the '.hidden' directory. +This script iterates over all Python scripts in the 'scripts' directory, +runs each script with the '--h' flag to generate help text, +and saves the output to corresponding hidden files in the '.hidden' directory. -By doing this, we can precompute the help outputs for each script, which can be useful for faster searches or documentation purposes. +By doing this, we can precompute the help outputs for each script, +which can be useful for faster searches. -Scripts that should be skipped: - -- '**init**.py' -- 'scil_search_keywords.py' +If a help file already exists for a script, the script is skipped, +and the existing help file is left unchanged. The help output is saved in a hidden directory to avoid clutter in the main scripts directory. """ -import os import subprocess from pathlib import Path -# Directory where your scripts are located scripts_dir = Path('scripts/') # Hidden directory to store help files @@ -31,6 +30,10 @@ if script.name == '__init__.py' or script.name == 'scil_search_keywords.py': continue help_file = hidden_dir / f'{script.name}.help' + # Check if help file already exists + if help_file.exists(): + print(f'Help file for {script.name} already exists. Skipping.') + continue # Run the script with --h and capture the output result = subprocess.run(['python', script, '--h'], capture_output=True, text=True) diff --git a/scilpy-bot-scripts/generate_json_files.py b/scilpy-bot-scripts/generate_json_files.py deleted file mode 100644 index 2c2a1c63b..000000000 --- a/scilpy-bot-scripts/generate_json_files.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import os -import json -import ast -from pathlib import Path - - - -def _get_docstring_from_script_path(script_path): - """Extract a python file's docstring from a filepath. - - Parameters - ---------- - script : str - Path to python file - - Returns - ------- - docstring : str - The file docstring, or an empty string if there was no docstring. - """ - with open(script_path, 'r') as reader: - file_contents = reader.read() - module = ast.parse(file_contents) - docstring = ast.get_docstring(module) or '' - return docstring - - -def _get_help_text_from_file(help_file_path): - with open(help_file_path, 'r') as f: - help_text = f.read() - return help_text - - -def generate_json(knowledge_base_dir, hidden_dir, output_json_dir): - knowledge_base = {'scripts': []} - - for script in sorted(Path(knowledge_base_dir).glob('*.py')): - script_name = script.stem - if script_name in ('__init__','scil_search_keywords'): - continue - - docstring = _get_docstring_from_script_path(str(script)) - help_file_path = Path(hidden_dir) / f'{script_name}.py.help' - - if not help_file_path.exists(): - print(f"Warning: Help file for {script_name} not found in {hidden_dir}") - help_text = '' - else: - help_text = _get_help_text_from_file(help_file_path) - - script_info = { - 'name': script_name, - 'docstring': docstring, - 'help': help_text, - 'synonyms': [], # This can be filled later by lab members - 'keywords': [] # This can be filled later by lab members - } - - knowledge_base['scripts'].append(script_info) - - # Ensure the output directory exists - output_json_dir.mkdir(parents=True, exist_ok=True) - output_json_path = output_json_dir / 'knowledge_base.json' - - with open(output_json_path, 'w') as json_file: - json.dump(knowledge_base, json_file, indent=4) - - print(f"Knowledge base JSON has been generated at {output_json_path}") - - -def main(): - base_dir = Path(__file__).parent.parent - knowledge_base_dir = base_dir/'scripts/' - hidden_dir = knowledge_base_dir / '.hidden' - output_json_dir = base_dir/'scilpy-bot-scripts'/'json_files' - - generate_json(knowledge_base_dir, hidden_dir, output_json_dir) - - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/scilpy-bot-scripts/generate_synonyms.py b/scilpy-bot-scripts/generate_synonyms.py deleted file mode 100644 index a7354ea4f..000000000 --- a/scilpy-bot-scripts/generate_synonyms.py +++ /dev/null @@ -1,90 +0,0 @@ -import json -import gensim.downloader as api -from scipy.spatial.distance import cosine -import re -from pathlib import Path - -#Load vocabulary and Acronyms -def load_vocabulary(vocab_file_path): - with open(vocab_file_path, 'r', encoding='utf-8') as file: - vocabulary = [line.strip() for line in file] - return vocabulary - -def load_acronyms(acronyms_file_path): - with open(acronyms_file_path, 'r', encoding='utf-8') as file: - acronyms = json.load(file) - return {entry['abbreviation']: entry['Description'] for entry in acronyms} - -#load pre-trained word vectors -word_vectors = api.load("word2vec-google-news-300") - -#calculate similarity and find synonyms -def get_word_embedding(word): - if word in word_vectors: - return word_vectors[word] - return None - -def calculate_similarity(word1, word2): - embedding1 = get_word_embedding(word1) - embedding2 = get_word_embedding(word2) - if embedding1 is not None and embedding2 is not None: - return 1 - cosine(embedding1, embedding2) - return 0 - -def find_synonyms(word, vocabulary, acronyms_dict, threshold=0.7): - synonyms = [] - for vocab_word in vocabulary: - # Check if it's an acronym - if vocab_word.startswith('*'): - acronym = vocab_word[1:] - if acronym in acronyms_dict: - description = acronyms_dict[acronym] - description_words = description.split() - for desc_word in description_words: - similarity = calculate_similarity(word, desc_word) - if similarity >= threshold: - synonyms.append(vocab_word) - break - else: - similarity = calculate_similarity(word, vocab_word) - if similarity >= threshold: - synonyms.append(vocab_word) - return synonyms - -def extract_words(text): - return re.findall(r'\w+', text.lower()) - - -def generate_synonyms(script_entry, vocabulary, acronyms_dict): - words = set(extract_words(script_entry["docstring"]) + extract_words(script_entry["help"])) - synonyms_dict = {} - for word in words: - synonyms = find_synonyms(word, vocabulary, acronyms_dict) - if len(synonyms) != 0: - synonyms.append(word) - #synonyms_dict[word] = synonyms - script_entry['synonyms'].append(synonyms) - return script_entry - -def update_scripts_with_synonyms(json_filepath, vocabulary, acronyms_dict): - with open(json_filepath, 'r', encoding='utf-8') as file: - data = json.load(file) - - for script_entry in data['scripts']: - script_entry['synonyms'] = [] # Initialize the synonyms list - updated_script = generate_synonyms(script_entry, vocabulary, acronyms_dict) - - with open(json_filepath, 'w', encoding='utf-8') as file: - json.dump(data, file, indent=4) - - -base_dir = Path(__file__).parent - -vocab_filepath = base_dir/'json_files'/'Scilpy_vocabulary.txt' -acronyms_filepath = base_dir/'json_files'/'acronyms.json' -json_filepath = base_dir/'json_files'/'knowledge_base_word2vec.json' -vocabulary = load_vocabulary(vocab_filepath) -acronyms_dict = load_acronyms(acronyms_filepath) - -update_scripts_with_synonyms(json_filepath, vocabulary, acronyms_dict) -print(f"Scripts in {json_filepath} have been updated with synonyms.") diff --git a/scilpy-bot-scripts/json_files/Scilpy_vocabulary.txt b/scilpy-bot-scripts/json_files/Scilpy_vocabulary.txt deleted file mode 100644 index 299367400..000000000 --- a/scilpy-bot-scripts/json_files/Scilpy_vocabulary.txt +++ /dev/null @@ -1,431 +0,0 @@ -*ATS -Allin -*BLS -BrainMap -Brainnetome -*CDL -*CIN -*CVL -Catani -Conclusion -Connections -*DDI -Data -Dejerine -Diffusion -Experiment -Fan -Figure -Figures -Frontal -Functionally -Gage -Gyrus -*HCP -*IFG -*INS -*IPL -*ITG -Imaging -Laird -Lancaster -Lobe -Longitudinal -*MFG -*MME -*MRI -*MTG -Maier -Mandonnet -MesL -*MesLS -Middle -Neher -*OFG -*PCG -*PCUN -*PaG -Pandya -*PoCG -*ROIs -*RVA -*SFG -*SLS -*SPL -Sarubbo -Schotten -Sporns -Structurally -*TOIs -TractoFind -Tractofind -Trends -acted -action -active -affective -algorithm -analysis -anatomical -anatomically -anatomy -animal -anterior -application -applied -arcuate -area -areas -articulating -assigned -association -atlas -attention -atypical -average -awareness -axial -axon -axonal -axons -basal -based -binary -blue -body -brain -brainnetome -branch -bundles -callosum -capsule -categorized -caudal -cell -cingulum -clear -clinical -cognition -commissural -complex -complexes -comprised -comprising -compromised -conditions -connect -connected -connecting -connection -connectional -connections -connectivity -connectome -connects -considered -consistently -contrast -coronal -corpus -cortex -cortical -create -*dMRI -dark -data -database -defined -degeneration -degree -depicted -desires -difference -differences -diffusion -direction -discovery -disease -disorder -dissection -dissimilar -divergence -dominant -dorsal -dorsally -dorsolateral -echoed -emotion -emotional -episodic -error -examine -examined -examining -executing -exist -experience -experiment -exploration -false -fasciculus -fibre -fibres -form -forms -frontal -fronto -function -functional -functionally -functions -fundamental -future -general -genu -global -greater -green -grey -gyrus -healthy -held -hemisphere -hemispheres -hemispheric -high -higher -highest -highly -homologous -human -humans -image -imaging -imagining -implicated -inconsistent -increase -indicating -individual -individuals -inferior -inferiorly -inhibiting -insight -insights -insula -insular -integrate -integrated -integrating -internal -intersected -invalid -involved -key -keywords -knowledge -lack -language -large -larger -lateral -learning -left -level -limb -limbic -limitation -limitations -limiting -literature -lobe -lobes -lobule -long -longitudinal -loss -main -manner -maps -matrices -matter -meaning -medial -memory -mesial -meta -methodologies -methodology -methods -midbody -middle -mique -mirror -mortem -motion -motor -movement -movements -mémique -méthique -naming -network -networks -neuroanatomy -neuron -nuclei -nucleus -observation -observed -observing -occipital -occurrence -occurrent -orbital -order -orientation -oriented -overt -pain -papers -paracentral -parameter -parameters -parcels -parietal -participant -participants -pass -pathological -pathway -pathways -perception -planes -population -positive -positives -possibility -post -postcentral -posterior -potential -precentral -precision -presented -primary -principal -probabilistic -probability -process -processes -processing -project -projecting -projection -proposed -question -random -ranged -reasoning -red -region -regions -reliability -reliable -rendered -rendering -reported -represent -resolved -result -role -rooted -rostroventral -*sTOI -sTOIs -sagittal -scientific -seeding -sensory -sexual -shape -signal -similarity -size -social -somatosensation -space -spatial -specific -step -streamline -streamlines -structural -structurally -structure -structures -studies -study -subject -subjects -sublobe -sublobes -subsequently -sulcus -superior -superiorly -supported -surviving -systematic -systems -tasting -taxonomy -temporal -terminations -thalamus -thinking -threshold -thresholds -tool -total -tracing -tracking -tract -tractogram -tractograms -tractography -tracts -traditionally -trajectory -transverse -traverse -trend -true -understanding -unique -unknown -utilized -vTOI -*vTOIs -valid -validation -variability -variable -variance -variety -ventral -ventrolateral -vertically -view -views -visual -vivo -voies -volume -volumes -voxel -weighted -white -work -working diff --git a/scilpy-bot-scripts/json_files/knowledge_base.json b/scilpy-bot-scripts/json_files/knowledge_base.json deleted file mode 100644 index ee5f949f3..000000000 --- a/scilpy-bot-scripts/json_files/knowledge_base.json +++ /dev/null @@ -1,44237 +0,0 @@ -{ - "scripts": [ - { - "name": "scil_NODDI_maps", - "docstring": "Compute NODDI [1] maps using AMICO.\nMulti-shell DWI necessary.\n\nFormerly: scil_compute_NODDI.py", - "help": "usage: scil_NODDI_maps.py [-h] [--mask MASK] [--out_dir OUT_DIR]\n [--tolerance tol] [--skip_b0_check]\n [--para_diff PARA_DIFF] [--iso_diff ISO_DIFF]\n [--lambda1 LAMBDA1] [--lambda2 LAMBDA2]\n [--save_kernels DIRECTORY | --load_kernels DIRECTORY]\n [--compute_only] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec\n\nCompute NODDI [1] maps using AMICO.\nMulti-shell DWI necessary.\n\nFormerly: scil_compute_NODDI.py\n\npositional arguments:\n in_dwi DWI file acquired with a NODDI compatible protocol (single-shell data not suited).\n in_bval b-values filename, in FSL format (.bval).\n in_bvec b-vectors filename, in FSL format (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Brain mask filename.\n --out_dir OUT_DIR Output directory for the NODDI results. [results]\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nModel options:\n --para_diff PARA_DIFF\n Axial diffusivity (AD) in the CC. [0.0017]\n --iso_diff ISO_DIFF Mean diffusivity (MD) in ventricles. [0.003]\n --lambda1 LAMBDA1 First regularization parameter. [0.5]\n --lambda2 LAMBDA2 Second regularization parameter. [0.001]\n\nKernels options:\n --save_kernels DIRECTORY\n Output directory for the COMMIT kernels.\n --load_kernels DIRECTORY\n Input directory where the COMMIT kernels are located.\n --compute_only Compute kernels only, --save_kernels must be used.\n\nReference:\n [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC.\n NODDI: practical in vivo neurite orientation dispersion\n and density imaging of the human brain.\n NeuroImage. 2012 Jul 16;61:1000-16.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "animal", - "human", - "human" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "matter", - "question", - "thinking", - "understand" - ], - [ - "orientation", - "orientation" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "parameter", - "parameters", - "parameter" - ], - [ - "methods", - "use" - ], - [ - "work", - "find" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "area", - "located" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "thinking", - "i" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "axial", - "axial" - ], - [ - "methods", - "using" - ], - [ - "future", - "current" - ], - [ - "exist", - "exists" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "vivo", - "vivo" - ], - [ - "imaging", - "imaging" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "area", - "work", - "where" - ], - [ - "result", - "results" - ], - [ - "order", - "allow" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "step", - "start" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "maps", - "maps" - ], - [ - "order", - "necessary" - ] - ], - "keywords": [] - }, - { - "name": "scil_NODDI_priors", - "docstring": "Compute the axial (para_diff), radial (perp_diff), and mean (iso_diff)\ndiffusivity priors for NODDI.\n\nFormerly: scil_compute_NODDI_priors.py", - "help": "usage: scil_NODDI_priors.py [-h] [--fa_min_single_fiber FA_MIN_SINGLE_FIBER]\n [--fa_max_ventricles FA_MAX_VENTRICLES]\n [--md_min_ventricles MD_MIN_VENTRICLES]\n [--roi_radius ROI_RADIUS]\n [--roi_center pos pos pos]\n [--out_txt_1fiber_para FILE]\n [--out_txt_1fiber_perp FILE]\n [--out_mask_1fiber FILE]\n [--out_txt_ventricles FILE]\n [--out_mask_ventricles FILE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_FA in_AD in_RD in_MD\n\nCompute the axial (para_diff), radial (perp_diff), and mean (iso_diff)\ndiffusivity priors for NODDI.\n\nFormerly: scil_compute_NODDI_priors.py\n\npositional arguments:\n in_FA Path to the FA volume.\n in_AD Path to the axial diffusivity (AD) volume.\n in_RD Path to the radial diffusivity (RD) volume.\n in_MD Path to the mean diffusivity (MD) volume.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics options:\n --fa_min_single_fiber FA_MIN_SINGLE_FIBER\n Minimal threshold of FA (voxels above that threshold are considered in \n the single fiber mask). [0.7]\n --fa_max_ventricles FA_MAX_VENTRICLES\n Maximal threshold of FA (voxels under that threshold are considered in \n the ventricles). [0.1]\n --md_min_ventricles MD_MIN_VENTRICLES\n Minimal threshold of MD in mm2/s (voxels above that threshold are considered \n for in the ventricles). [0.003]\n\nRegions options:\n --roi_radius ROI_RADIUS\n Radius of the region used to estimate the priors. The roi will be a cube spanning \n from ROI_CENTER in each direction. [20]\n --roi_center pos pos pos\n Center of the roi of size roi_radius used to estimate the priors; a 3-value coordinate. \n If not set, uses the center of the 3D volume.\n\nOutputs:\n --out_txt_1fiber_para FILE\n Output path for the text file containing the single fiber average value of AD.\n If not set, the file will not be saved.\n --out_txt_1fiber_perp FILE\n Output path for the text file containing the single fiber average value of RD.\n If not set, the file will not be saved.\n --out_mask_1fiber FILE\n Output path for single fiber mask. If not set, the mask will not be saved.\n --out_txt_ventricles FILE\n Output path for the text file containing the ventricles average value of MD.\n If not set, the file will not be saved.\n --out_mask_ventricles FILE\n Output path for the ventricule mask.\n If not set, the mask will not be saved.\n\nReference:\n [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC.\n NODDI: practical in vivo neurite orientation dispersion and density\n imaging of the human brain. NeuroImage. 2012 Jul 16;61:1000-16.\n", - "synonyms": [ - [ - "animal", - "human", - "human" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "orientation", - "orientation" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "fibre", - "fiber" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "areas", - "region", - "regions", - "regions" - ], - [ - "direction", - "direction" - ], - [ - "order", - "set" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "average", - "average" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "axial", - "axial" - ], - [ - "methods", - "using" - ], - [ - "vivo", - "vivo" - ], - [ - "imaging", - "imaging" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "individual", - "each" - ], - [ - "larger", - "size", - "size" - ], - [ - "lack", - "minimal" - ], - [ - "level", - "above" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "area", - "areas", - "region", - "regions", - "region" - ] - ], - "keywords": [] - }, - { - "name": "scil_aodf_metrics", - "docstring": "Script to compute various metrics derivated from asymmetric ODF.\n\nThese metrics include the asymmetric peak directions image, a number of fiber\ndirections (nufid) map [1], the asymmetry index (ASI) map [2] and an odd-power\nmap [3].\n\nThe asymmetric peak directions image contains peaks per hemisphere, considering\nantipodal sphere directions as distinct. On a symmetric signal, the number of\nasymmetric peaks extracted is then twice the number of symmetric peaks.\n\nThe nufid map is the asymmetric alternative to NuFO maps. It counts the\nnumber of asymmetric peaks extracted and ranges in [0..N] with N the maximum\nnumber of peaks.\n\nThe asymmetric index is a cosine-based metric in the range [0..1], with 0\ncorresponding to a perfectly symmetric signal and 1 to a perfectly asymmetric\nsignal.\n\nThe odd-power map is also in the range [0..1], with 0 corresponding to a\nperfectly symmetric signal and 1 to a perfectly anti-symmetric signal. It is\ngiven as the ratio of the L2-norm of odd SH coefficients on the L2-norm of all\nSH coefficients.\n\nFormerly: scil_compute_asym_odf_metrics.py", - "help": "usage: scil_aodf_metrics.py [-h] [--mask MASK] [--asi_map ASI_MAP]\n [--odd_power_map ODD_POWER_MAP] [--peaks PEAKS]\n [--peak_values PEAK_VALUES]\n [--peak_indices PEAK_INDICES] [--nufid NUFID]\n [--not_all] [--at A_THRESHOLD] [--rt R_THRESHOLD]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--processes NBR]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh\n\nScript to compute various metrics derivated from asymmetric ODF.\n\nThese metrics include the asymmetric peak directions image, a number of fiber\ndirections (nufid) map [1], the asymmetry index (ASI) map [2] and an odd-power\nmap [3].\n\nThe asymmetric peak directions image contains peaks per hemisphere, considering\nantipodal sphere directions as distinct. On a symmetric signal, the number of\nasymmetric peaks extracted is then twice the number of symmetric peaks.\n\nThe nufid map is the asymmetric alternative to NuFO maps. It counts the\nnumber of asymmetric peaks extracted and ranges in [0..N] with N the maximum\nnumber of peaks.\n\nThe asymmetric index is a cosine-based metric in the range [0..1], with 0\ncorresponding to a perfectly symmetric signal and 1 to a perfectly asymmetric\nsignal.\n\nThe odd-power map is also in the range [0..1], with 0 corresponding to a\nperfectly symmetric signal and 1 to a perfectly anti-symmetric signal. It is\ngiven as the ratio of the L2-norm of odd SH coefficients on the L2-norm of all\nSH coefficients.\n\nFormerly: scil_compute_asym_odf_metrics.py\n\npositional arguments:\n in_sh Input SH image.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Optional mask.\n --asi_map ASI_MAP Output asymmetry index (ASI) map.\n --odd_power_map ODD_POWER_MAP\n Output odd power map.\n --peaks PEAKS Output filename for the extracted peaks.\n --peak_values PEAK_VALUES\n Output filename for the extracted peaks values.\n --peak_indices PEAK_INDICES\n Output filename for the generated peaks indices on the sphere.\n --nufid NUFID Output filename for the nufid file.\n --not_all If set, only saves the files specified using the file flags [False].\n --at A_THRESHOLD Absolute threshold on fODF amplitude. This value should be set to\n approximately 1.5 to 2 times the maximum fODF amplitude in isotropic voxels\n (ie. ventricles).\n Use scil_fodf_max_in_ventricles.py to find the maximal value.\n See [Dell'Acqua et al HBM 2013] [0.0].\n --rt R_THRESHOLD Relative threshold on fODF amplitude in percentage [0.1].\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Sphere to use for peak directions estimation [symmetric724].\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] C. Poirier and M. Descoteaux, \"Filtering Methods for Asymmetric ODFs:\nWhere and How Asymmetry Occurs in the White Matter.\" bioRxiv. 2022 Jan 1;\n2022.12.18.520881. doi: https://doi.org/10.1101/2022.12.18.520881\n\n[2] S. Cetin Karayumak, E. \u00d6zarslan, and G. Unal,\n\"Asymmetric Orientation Distribution Functions (AODFs) revealing intravoxel\ngeometry in diffusion MRI,\" Magnetic Resonance Imaging, vol. 49, pp. 145-158,\nJun. 2018, doi: https://doi.org/10.1016/j.mri.2018.03.006.\n\n[3] C. Poirier, E. St-Onge, and M. Descoteaux, \"Investigating the Occurence of\nAsymmetric Patterns in White Matter Fiber Orientation Distribution Functions\"\n[Abstract], In: Proc. Intl. Soc. Mag. Reson. Med. 29 (2021), 2021 May 15-20,\nVancouver, BC, Abstract number 0865.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "average", - "per" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "orientation", - "orientation" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "examining", - "involved", - "investigating" - ], - [ - "unique", - "distinct" - ], - [ - "methods", - "use" - ], - [ - "work", - "find" - ], - [ - "order", - "set" - ], - [ - "direction", - "directions" - ], - [ - "long", - "a" - ], - [ - "average", - "percentage" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "specific", - "variety", - "various" - ], - [ - "diffusion", - "diffusion" - ], - [ - "imaging", - "mri" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "variety", - "include" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "matter", - "question", - "thinking", - "true", - "work", - "working", - "how" - ], - [ - "long", - "with" - ], - [ - "possibility", - "considering" - ], - [ - "parameters", - "specified" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "signal", - "signal" - ], - [ - "imaging", - "imaging" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "result", - "may" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "area", - "work", - "where" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "analysis", - "methodology", - "methods", - "processes", - "methods" - ], - [ - "step", - "start" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "false", - "false" - ], - [ - "maps", - "maps" - ], - [ - "function", - "functions", - "functions" - ], - [ - "based", - "based" - ], - [ - "maps", - "map" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "hemisphere", - "hemisphere" - ] - ], - "keywords": [] - }, - { - "name": "scil_bids_validate", - "docstring": "Create a json file from a BIDS dataset detailling all info\nneeded for tractoflow\n- DWI/rev_DWI\n- T1\n- fmap/sbref (based on IntendedFor entity)\n- Freesurfer (optional - could be one per participant\n or one per participant/session)\n\nThe BIDS dataset MUST be homogeneous.\nThe metadata need to be uniform across all participants/sessions/runs\n\nMandatory entity: IntendedFor\nSensitive entities: PhaseEncodingDirection, TotalReadoutTime, direction\n\nFormerly: scil_validate_bids.py", - "help": "usage: scil_bids_validate.py [-h] [--bids_ignore BIDS_IGNORE] [--fs FS]\n [--clean] [--readout READOUT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bids out_json\n\nCreate a json file from a BIDS dataset detailling all info\nneeded for tractoflow\n- DWI/rev_DWI\n- T1\n- fmap/sbref (based on IntendedFor entity)\n- Freesurfer (optional - could be one per participant\n or one per participant/session)\n\nThe BIDS dataset MUST be homogeneous.\nThe metadata need to be uniform across all participants/sessions/runs\n\nMandatory entity: IntendedFor\nSensitive entities: PhaseEncodingDirection, TotalReadoutTime, direction\n\nFormerly: scil_validate_bids.py\n\npositional arguments:\n in_bids Input BIDS folder.\n out_json Output json file.\n\noptions:\n -h, --help show this help message and exit\n --bids_ignore BIDS_IGNORE\n If you want to ignore some subjects or some files, you\n can provide an extra bidsignore file.Check:\n https://github.com/bids-standard/bids-\n validator#bidsignore\n --fs FS Output freesurfer path. It will add keys wmparc and\n aparc+aseg.\n --clean If set, it will remove all the participants that are\n missing any information.\n --readout READOUT Default total readout time value [0.062].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "average", - "per" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "subject", - "subjects", - "subjects" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "participant", - "participant" - ], - [ - "direction", - "direction" - ], - [ - "order", - "set" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "create" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "action", - "clear", - "considered", - "future", - "matter", - "possibility", - "potential", - "question", - "result", - "specific", - "any" - ], - [ - "long", - "result", - "work", - "working", - "time" - ], - [ - "create", - "provide" - ], - [ - "methods", - "using" - ], - [ - "areas", - "across" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "participants", - "participants" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "create", - "lack", - "step", - "work", - "working", - "need" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "based", - "based" - ], - [ - "clear", - "result", - "work", - "could" - ], - [ - "increase", - "total", - "total" - ], - [ - "large", - "work", - "some" - ] - ], - "keywords": [] - }, - { - "name": "scil_bingham_metrics", - "docstring": "Script to compute fODF lobe-specific metrics derived from a Bingham\ndistribution fit, as described in [1]. Resulting metrics are fiber density\n(FD), fiber spread (FS) and fiber fraction (FF) [2].\n\nThe Bingham coefficients volume comes from scil_fodf_to_bingham.py.\n\nA lobe's FD is the integral of the Bingham function on the sphere. It\nrepresents the density of fibers going through a given voxel for a given\nfODF lobe (fixel). A lobe's FS is the ratio of its FD on its maximum AFD. It\nis at its minimum for a sharp lobe and at its maximum for a wide lobe. A lobe's\nFF is the ratio of its FD on the total FD in the voxel.\n\nUsing 12 threads, the execution takes 10 minutes for FD estimation for a brain\nwith 1mm isotropic resolution. Other metrics take less than a second.\n\nFormerly: scil_compute_lobe_specific_fodf_metrics.py", - "help": "usage: scil_bingham_metrics.py [-h] [--out_fd OUT_FD] [--out_fs OUT_FS]\n [--out_ff OUT_FF] [--not_all] [--mask MASK]\n [--nbr_integration_steps NBR_INTEGRATION_STEPS]\n [-v [{DEBUG,INFO,WARNING}]] [--processes NBR]\n [-f]\n in_bingham\n\nScript to compute fODF lobe-specific metrics derived from a Bingham\ndistribution fit, as described in [1]. Resulting metrics are fiber density\n(FD), fiber spread (FS) and fiber fraction (FF) [2].\n\nThe Bingham coefficients volume comes from scil_fodf_to_bingham.py.\n\nA lobe's FD is the integral of the Bingham function on the sphere. It\nrepresents the density of fibers going through a given voxel for a given\nfODF lobe (fixel). A lobe's FS is the ratio of its FD on its maximum AFD. It\nis at its minimum for a sharp lobe and at its maximum for a wide lobe. A lobe's\nFF is the ratio of its FD on the total FD in the voxel.\n\nUsing 12 threads, the execution takes 10 minutes for FD estimation for a brain\nwith 1mm isotropic resolution. Other metrics take less than a second.\n\nFormerly: scil_compute_lobe_specific_fodf_metrics.py\n\npositional arguments:\n in_bingham Input Bingham nifti image.\n\noptions:\n -h, --help show this help message and exit\n --out_fd OUT_FD Path to output fiber density. [fd.nii.gz]\n --out_fs OUT_FS Path to output fiber spread. [fs.nii.gz]\n --out_ff OUT_FF Path to fiber fraction file. [ff.nii.gz]\n --not_all Do not compute all metrics. Then, please provide the output paths of the files you need.\n --mask MASK Optional mask image. Only voxels inside the mask are computed.\n --nbr_integration_steps NBR_INTEGRATION_STEPS\n Number of integration steps along the theta axis for fiber density estimation. [50]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -f Force overwriting of the output files.\n\n[1] T. W. Riffert, J. Schreiber, A. Anwander, and T. R. Kn\u00f6sche, \u201cBeyond\n fractional anisotropy: Extraction of bundle-specific structural metrics\n from crossing fiber models,\u201d NeuroImage, vol. 100, pp. 176-191, Oct. 2014,\n doi: 10.1016/j.neuroimage.2014.06.015.\n\n[2] J. Schreiber, T. Riffert, A. Anwander, and T. R. Kn\u00f6sche, \u201cPlausibility\n Tracking: A method to evaluate anatomical connectivity and microstructural\n properties along fiber pathways,\u201d NeuroImage, vol. 90, pp. 163-178, Apr.\n 2014, doi: 10.1016/j.neuroimage.2014.01.002.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "methods", - "method" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "total", - "90" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "brain", - "brain" - ], - [ - "meaning", - "derived" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "specific", - "specific" - ], - [ - "thinking", - "you" - ], - [ - "step", - "steps" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "anatomical", - "anatomical" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "defined", - "function", - "functional", - "functions", - "function" - ], - [ - "clear", - "order", - "step", - "work", - "take" - ], - [ - "pathway", - "pathways", - "pathways" - ], - [ - "long", - "with" - ], - [ - "total", - "50" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "create", - "provide" - ], - [ - "represent", - "represents" - ], - [ - "methods", - "using" - ], - [ - "structural", - "structural" - ], - [ - "variety", - "work", - "other" - ], - [ - "area", - "main", - "along" - ], - [ - "long", - "than" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "highly", - "less" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "result", - "resulting" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "fibres", - "fibers" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "create", - "lack", - "step", - "work", - "working", - "need" - ], - [ - "connectivity", - "connectivity" - ], - [ - "step", - "start" - ], - [ - "work", - "all" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "total", - "100" - ], - [ - "lobe", - "lobes", - "occipital", - "parietal", - "lobe" - ], - [ - "attention", - "comes" - ], - [ - "step", - "thinking", - "going" - ], - [ - "bundles", - "bundle" - ], - [ - "data", - "tracking", - "tracking" - ], - [ - "examine", - "evaluate" - ], - [ - "increase", - "total", - "total" - ], - [ - "function", - "integral" - ] - ], - "keywords": [] - }, - { - "name": "scil_btensor_metrics", - "docstring": "Script to compute microstructure metrics using the DIVIDE method. In order to\noperate, the script needs at leats two different types of b-tensor encodings.\nNote that custom encodings are not yet supported, so that only the linear\ntensor encoding (LTE, b_delta = 1), the planar tensor encoding\n(PTE, b_delta = -0.5), the spherical tensor encoding (STE, b_delta = 0) and\nthe cigar shape tensor encoding (b_delta = 0.5) are available. Moreover, all\nof `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names. Thus, this\nscript outputs the results from the DIVIDE fit or direct derivatives:\nmean diffusivity (MD), isotropic mean kurtosis (mk_i), anisotropic mean\nkurtosis (mk_a), total mean kurtosis (mk_t) and finally micro-FA (uFA).\nSpecific names can be specified using the\nfile flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output. The order parameter can also be computed from the uFA and a\nprecomputed FA, using separate input parameters.\n\n>>> scil_btensor_metrics.py --in_dwis LTE.nii.gz PTE.nii.gz STE.nii.gz\n --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs LTE.bvec PTE.bvec STE.bvec\n --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nIMPORTANT: If the script does not converge to a solution, it is probably due to\nnoise outside the brain. Thus, it is strongly recommanded to provide a brain\nmask with --mask.\n\nBased on Markus Nilsson, Filip Szczepankiewicz, Bj\u00f6rn Lampinen, Andr\u00e9 Ahlgren,\nJo\u00e3o P. de Almeida Martins, Samo Lasic, Carl-Fredrik Westin,\nand Daniel Topgaard. An open-source framework for analysis of multidimensional\ndiffusion MRI data implemented in MATLAB.\nProc. Intl. Soc. Mag. Reson. Med. (26), Paris, France, 2018.\n\nFormerly: scil_compute_divide.py", - "help": "usage: scil_btensor_metrics.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals\n IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS\n [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5}\n [{0,1,-0.5,0.5} ...] [--mask MASK]\n [--tolerance tol] [--skip_b0_check]\n [--fit_iters FIT_ITERS]\n [--random_iters RANDOM_ITERS]\n [--do_weight_bvals] [--do_weight_pa]\n [--do_multiple_s0] [--op OP] [--fa FA]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n [-f] [--not_all] [--md file] [--ufa file]\n [--mk_i file] [--mk_a file] [--mk_t file]\n\nScript to compute microstructure metrics using the DIVIDE method. In order to\noperate, the script needs at leats two different types of b-tensor encodings.\nNote that custom encodings are not yet supported, so that only the linear\ntensor encoding (LTE, b_delta = 1), the planar tensor encoding\n(PTE, b_delta = -0.5), the spherical tensor encoding (STE, b_delta = 0) and\nthe cigar shape tensor encoding (b_delta = 0.5) are available. Moreover, all\nof `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names. Thus, this\nscript outputs the results from the DIVIDE fit or direct derivatives:\nmean diffusivity (MD), isotropic mean kurtosis (mk_i), anisotropic mean\nkurtosis (mk_a), total mean kurtosis (mk_t) and finally micro-FA (uFA).\nSpecific names can be specified using the\nfile flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output. The order parameter can also be computed from the uFA and a\nprecomputed FA, using separate input parameters.\n\n>>> scil_btensor_metrics.py --in_dwis LTE.nii.gz PTE.nii.gz STE.nii.gz\n --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs LTE.bvec PTE.bvec STE.bvec\n --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nIMPORTANT: If the script does not converge to a solution, it is probably due to\nnoise outside the brain. Thus, it is strongly recommanded to provide a brain\nmask with --mask.\n\nBased on Markus Nilsson, Filip Szczepankiewicz, Bj\u00f6rn Lampinen, Andr\u00e9 Ahlgren,\nJo\u00e3o P. de Almeida Martins, Samo Lasic, Carl-Fredrik Westin,\nand Daniel Topgaard. An open-source framework for analysis of multidimensional\ndiffusion MRI data implemented in MATLAB.\nProc. Intl. Soc. Mag. Reson. Med. (26), Paris, France, 2018.\n\nFormerly: scil_compute_divide.py\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n Path to the input diffusion volume for each b-tensor encoding type.\n --in_bvals IN_BVALS [IN_BVALS ...]\n Path to the bval file, in FSL format, for each b-tensor encoding type.\n --in_bvecs IN_BVECS [IN_BVECS ...]\n Path to the bvec file, in FSL format, for each b-tensor encoding type.\n --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...]\n Value of b_delta for each b-tensor encoding type, in the same order as dwi, bval and bvec inputs.\n --mask MASK Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --fit_iters FIT_ITERS\n The number of time the gamma fit will be done [1]\n --random_iters RANDOM_ITERS\n The number of iterations for the initial parameters search. [50]\n --do_weight_bvals If set, does not do a weighting on the bvalues in the gamma fit.\n --do_weight_pa If set, does not do a powder averaging weighting in the gamma fit.\n --do_multiple_s0 If set, does not take into account multiple baseline signals.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --not_all If set, only saves the files specified using the file flags. (Default: False)\n\nOrder parameter (OP):\n --op OP Output filename for the order parameter. The OP will not be output if this is not given. Computation of the OP also requires a precomputed FA map (given using --fa).\n --fa FA Path to a FA map. Needed for calculating the OP.\n\nFile flags:\n --md file Output filename for the MD.\n --ufa file Output filename for the microscopic FA.\n --mk_i file Output filename for the isotropic mean kurtosis.\n --mk_a file Output filename for the anisotropic mean kurtosis.\n --mk_t file Output filename for the total mean kurtosis.\n", - "synonyms": [ - [ - "individual", - "specific", - "unique", - "variety", - "different" - ], - [ - "total", - "number" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "methods", - "method" - ], - [ - "lack", - "result", - "due" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "matter", - "question", - "thinking", - "understand" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "considered", - "are" - ], - [ - "order", - "order" - ], - [ - "left", - "into" - ], - [ - "brain", - "brain" - ], - [ - "result", - "moreover" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "parameter", - "parameters", - "parameter" - ], - [ - "methods", - "use" - ], - [ - "work", - "find" - ], - [ - "order", - "set" - ], - [ - "long", - "have" - ], - [ - "specific", - "specific" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "diffusion", - "diffusion" - ], - [ - "imaging", - "mri" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "probably" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "clear", - "order", - "step", - "work", - "take" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "process", - "solution" - ], - [ - "long", - "with" - ], - [ - "parameters", - "specified" - ], - [ - "clear", - "long", - "matter", - "result", - "work", - "so" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "long", - "result", - "work", - "working", - "time" - ], - [ - "total", - "50" - ], - [ - "create", - "provide" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "future", - "current" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "exist", - "exists" - ], - [ - "signal", - "signals" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "supported", - "supported" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "matter", - "question", - "does" - ], - [ - "result", - "results" - ], - [ - "considered", - "form", - "meaning", - "order", - "result", - "thus" - ], - [ - "order", - "allow" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "long", - "two" - ], - [ - "work", - "working", - "done" - ], - [ - "step", - "start" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "step", - "true", - "work", - "yet" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "false", - "false" - ], - [ - "based", - "based" - ], - [ - "forms", - "specific", - "variety", - "types" - ], - [ - "maps", - "map" - ], - [ - "clear", - "matter", - "question", - "thinking", - "sure" - ], - [ - "supported", - "strongly" - ], - [ - "increase", - "total", - "total" - ], - [ - "shape", - "structure", - "shape" - ], - [ - "considered", - "key", - "work", - "important" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_clean_qbx_clusters", - "docstring": "Render clusters sequentially to either accept or reject them based on\nvisual inspection. Useful for cleaning bundles for RBx, BST or for figures.\nThe VTK window does not handle well opacity of streamlines, this is a\nnormal rendering behavior.\nOften use in pair with scil_tractogram_qbx.py.\n\nKey mapping:\n- a/A: accept displayed clusters\n- r/R: reject displayed clusters\n- z/Z: Rewing one element\n- c/C: Stop rendering of the background concatenation of streamlines\n- q/Q: Early window exist, everything remaining will be rejected", - "help": "usage: scil_bundle_clean_qbx_clusters.py [-h]\n [--out_accepted_dir OUT_ACCEPTED_DIR]\n [--out_rejected_dir OUT_REJECTED_DIR]\n [--min_cluster_size MIN_CLUSTER_SIZE]\n [--background_opacity BACKGROUND_OPACITY]\n [--background_linewidth BACKGROUND_LINEWIDTH]\n [--clusters_linewidth CLUSTERS_LINEWIDTH]\n [--reference REFERENCE]\n [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...]\n out_accepted out_rejected\n\n Render clusters sequentially to either accept or reject them based on\n visual inspection. Useful for cleaning bundles for RBx, BST or for figures.\n The VTK window does not handle well opacity of streamlines, this is a\n normal rendering behavior.\n Often use in pair with scil_tractogram_qbx.py.\n\n Key mapping:\n - a/A: accept displayed clusters\n - r/R: reject displayed clusters\n - z/Z: Rewing one element\n - c/C: Stop rendering of the background concatenation of streamlines\n - q/Q: Early window exist, everything remaining will be rejected\n\npositional arguments:\n in_bundles List of the clusters filename.\n out_accepted Filename of the concatenated accepted clusters.\n out_rejected Filename of the concatenated rejected clusters.\n\noptions:\n -h, --help show this help message and exit\n --out_accepted_dir OUT_ACCEPTED_DIR\n Directory to save all accepted clusters separately.\n --out_rejected_dir OUT_REJECTED_DIR\n Directory to save all rejected clusters separately.\n --min_cluster_size MIN_CLUSTER_SIZE\n Minimum cluster size for consideration [1].Must be at least 1.\n --background_opacity BACKGROUND_OPACITY\n Opacity of the background streamlines.Keep low between 0 and 0.5 [0.1].\n --background_linewidth BACKGROUND_LINEWIDTH\n Linewidth of the background streamlines [1].\n --clusters_linewidth CLUSTERS_LINEWIDTH\n Linewidth of the current cluster [1].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundles" - ], - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "proposed", - "rejected" - ], - [ - "left", - "result", - "when" - ], - [ - "invalid", - "valid", - "invalid" - ], - [ - "rendered", - "rendering", - "rendering" - ], - [ - "methods", - "use" - ], - [ - "exist", - "exist" - ], - [ - "maps", - "mapping" - ], - [ - "long", - "a" - ], - [ - "key", - "main", - "key" - ], - [ - "held", - "on" - ], - [ - "tool", - "useful" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "traditionally", - "often" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "rendered", - "rendering", - "render" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "visual", - "visual" - ], - [ - "methods", - "using" - ], - [ - "future", - "current" - ], - [ - "thinking", - "everything" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "high", - "higher", - "level", - "low" - ], - [ - "larger", - "size", - "size" - ], - [ - "matter", - "question", - "does" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "based", - "based" - ], - [ - "considered", - "experience", - "large", - "long", - "result", - "variety", - "work", - "working", - "well" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_compute_centroid", - "docstring": "Compute a single bundle centroid, using an 'infinite' QuickBundles threshold.\n\nFormerly: scil_compute_centroid.py", - "help": "usage: scil_bundle_compute_centroid.py [-h] [--nb_points NB_POINTS]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_centroid\n\nCompute a single bundle centroid, using an 'infinite' QuickBundles threshold.\n\nFormerly: scil_compute_centroid.py\n\npositional arguments:\n in_bundle Fiber bundle file.\n out_centroid Output centroid streamline filename.\n\noptions:\n -h, --help show this help message and exit\n --nb_points NB_POINTS\n Number of points defining the centroid streamline[20].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "streamline", - "streamline" - ], - [ - "long", - "a" - ], - [ - "work", - "and" - ], - [ - "methods", - "using" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "bundles", - "bundle" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "is" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_compute_endpoints_map", - "docstring": "Computes the endpoint map of a bundle. The endpoint map is simply a count of\nthe number of streamlines that start or end in each voxel.\n\nThe idea is to estimate the cortical area affected by the bundle (assuming\nstreamlines start/end in the cortex).\n\nNote: If the streamlines are not ordered the head/tail are random and not\nreally two coherent groups. Use the following script to order streamlines:\nscil_tractogram_uniformize_endpoints.py\n\nFormerly: scil_compute_endpoints_map.py", - "help": "usage: scil_bundle_compute_endpoints_map.py [-h] [--swap] [--binary]\n [--nb_points NB_POINTS]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle endpoints_map_head\n endpoints_map_tail\n\nComputes the endpoint map of a bundle. The endpoint map is simply a count of\nthe number of streamlines that start or end in each voxel.\n\nThe idea is to estimate the cortical area affected by the bundle (assuming\nstreamlines start/end in the cortex).\n\nNote: If the streamlines are not ordered the head/tail are random and not\nreally two coherent groups. Use the following script to order streamlines:\nscil_tractogram_uniformize_endpoints.py\n\nFormerly: scil_compute_endpoints_map.py\n\npositional arguments:\n in_bundle Fiber bundle filename.\n endpoints_map_head Output endpoints map head filename.\n endpoints_map_tail Output endpoints map tail filename.\n\noptions:\n -h, --help show this help message and exit\n --swap Swap head<->tail convention. Can be useful when the reference is not in RAS.\n --binary Save outputs as a binary mask instead of a heat map.\n --nb_points NB_POINTS\n Number of points to consider at the extremities of the streamlines. [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "long", - "end" - ], - [ - "total", - "number" - ], - [ - "thinking", - "really" - ], - [ - "anatomy", - "anatomy" - ], - [ - "work", - "and" - ], - [ - "result", - "following" - ], - [ - "considered", - "are" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "order", - "order" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "cortex", - "thalamus", - "cortex" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "tool", - "useful" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "random", - "random" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "cortical", - "cortical" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "thinking", - "simply" - ], - [ - "order", - "work", - "instead" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "possibility", - "question", - "thinking", - "true", - "view", - "idea" - ], - [ - "individual", - "each" - ], - [ - "long", - "two" - ], - [ - "step", - "start" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "maps", - "map" - ], - [ - "bundles", - "bundle" - ], - [ - "area", - "areas", - "region", - "area" - ], - [ - "considered", - "possibility", - "question", - "step", - "consider" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_diameter", - "docstring": "Script to estimate the diameter of bundle(s) along their length.\nSee also scil_bundle_shape_measures.py, which prints a quick estimate of\nthe diameter (volume / length). The computation here is more complex and done\nfor each section of the bundle.\n\nThe script expects:\n- bundles with coherent endpoints from scil_tractogram_uniformize_endpoints.py\n- labels maps with around 5-50 points scil_bundle_label_map.py\n <5 is not enough, high risk of bad fit\n >50 is too much, high risk of bad fit\n- bundles that are close to a tube\n without major fanning in a single axis\n fanning is in 2 directions (uniform dispersion) good approximation\n\nThe scripts prints a JSON file with mean/std to be compatible with tractometry.\nWARNING: STD is in fact an ERROR measure from the fit and NOT an STD.\n\nSince the estimation and fit quality is not always intuitive for some bundles\nand the tube with varying diameter is not easy to color/visualize,\nthe script comes with its own VTK rendering to allow exploration of the data.\n(optional).\n\nFormerly: scil_estimate_bundles_diameter.py", - "help": "usage: scil_bundle_diameter.py [-h]\n [--fitting_func {lin_up,lin_down,exp,inv,log}]\n [--show_rendering | --save_rendering OUT_FOLDER]\n [--wireframe] [--error_coloring]\n [--width WIDTH] [--opacity OPACITY]\n [--win_dims WIDTH HEIGHT] [--background R G B]\n [--reference REFERENCE] [--indent INDENT]\n [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...] in_labels\n [in_labels ...]\n\nScript to estimate the diameter of bundle(s) along their length.\nSee also scil_bundle_shape_measures.py, which prints a quick estimate of\nthe diameter (volume / length). The computation here is more complex and done\nfor each section of the bundle.\n\nThe script expects:\n- bundles with coherent endpoints from scil_tractogram_uniformize_endpoints.py\n- labels maps with around 5-50 points scil_bundle_label_map.py\n <5 is not enough, high risk of bad fit\n >50 is too much, high risk of bad fit\n- bundles that are close to a tube\n without major fanning in a single axis\n fanning is in 2 directions (uniform dispersion) good approximation\n\nThe scripts prints a JSON file with mean/std to be compatible with tractometry.\nWARNING: STD is in fact an ERROR measure from the fit and NOT an STD.\n\nSince the estimation and fit quality is not always intuitive for some bundles\nand the tube with varying diameter is not easy to color/visualize,\nthe script comes with its own VTK rendering to allow exploration of the data.\n(optional).\n\nFormerly: scil_estimate_bundles_diameter.py\n\npositional arguments:\n in_bundles List of tractography files.\n in_labels List of labels maps that match the bundles.\n\noptions:\n -h, --help show this help message and exit\n --fitting_func {lin_up,lin_down,exp,inv,log}\n Function to weigh points using their distance.\n [Default: None]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nVisualization options:\n --show_rendering Display VTK window (optional).\n --save_rendering OUT_FOLDER\n Save VTK render in the specified folder (optional)\n --wireframe Use wireframe for the tube rendering.\n --error_coloring Use the fitting error to color the tube.\n --width WIDTH Width of tubes or lines representing streamlines\n [Default: 0.2]\n --opacity OPACITY Opacity for the streamlines rendered with the tube.\n [Default: 0.2]\n --win_dims WIDTH HEIGHT\n The dimensions for the vtk window. [(1920, 1080)]\n --background R G B RBG values [0, 255] of the color of the background.\n [Default: [1, 1, 1]]\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "true", - "always" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "bundles", - "bundles" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "considered", - "are" - ], - [ - "long", - "work", - "more" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "attention", - "experience", - "long", - "result", - "work", - "much" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "long", - "too" - ], - [ - "rendered", - "rendering", - "rendering" - ], - [ - "methods", - "use" - ], - [ - "rendered", - "rendering", - "rendered" - ], - [ - "direction", - "directions" - ], - [ - "long", - "a" - ], - [ - "represent", - "representing" - ], - [ - "held", - "on" - ], - [ - "exploration", - "exploration" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "create", - "enough" - ], - [ - "key", - "main", - "major" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "rendered", - "rendering", - "render" - ], - [ - "held", - "in" - ], - [ - "defined", - "function", - "functional", - "functions", - "function" - ], - [ - "long", - "with" - ], - [ - "parameters", - "specified" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "total", - "50" - ], - [ - "clear", - "experience", - "thinking", - "true", - "good" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "area", - "main", - "along" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "create", - "work", - "own" - ], - [ - "individual", - "each" - ], - [ - "order", - "allow" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "work", - "working", - "done" - ], - [ - "complex", - "structure", - "structures", - "complex" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "maps", - "maps" - ], - [ - "lack", - "quality" - ], - [ - "clear", - "considered", - "difference", - "lack", - "matter", - "question", - "result", - "subject", - "thinking", - "true", - "view", - "fact" - ], - [ - "attention", - "comes" - ], - [ - "error", - "error" - ], - [ - "bundles", - "bundle" - ], - [ - "potential", - "risk" - ], - [ - "result", - "since" - ], - [ - "large", - "work", - "some" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_filter_by_occurence", - "docstring": "Use multiple versions of a same bundle and detect the most probable voxels by\nusing a threshold on the occurence, voxel-wise. With threshold 0.5, this is\na majority vote. This is useful to generate an average representation from\nbundles of a given population.\n\nIf streamlines originate from the same tractogram (ex, to compare various\nbundle clustering techniques), streamline-wise vote is available to find the\nstreamlines most often included in the bundle.\n\nFormerly: scil_perform_majority_vote.py", - "help": "usage: scil_bundle_filter_by_occurence.py [-h] [--ratio_voxels [RATIO_VOXELS]]\n [--ratio_streamlines [RATIO_STREAMLINES]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...]\n output_prefix\n\nUse multiple versions of a same bundle and detect the most probable voxels by\nusing a threshold on the occurence, voxel-wise. With threshold 0.5, this is\na majority vote. This is useful to generate an average representation from\nbundles of a given population.\n\nIf streamlines originate from the same tractogram (ex, to compare various\nbundle clustering techniques), streamline-wise vote is available to find the\nstreamlines most often included in the bundle.\n\nFormerly: scil_perform_majority_vote.py\n\npositional arguments:\n in_bundles Input bundles filename(s). All tractograms must have identical headers.\n output_prefix Output prefix. Ex: my_path/voting_. The suffixes will be: streamlines.trk and voxels.nii.gz\n\noptions:\n -h, --help show this help message and exit\n --ratio_voxels [RATIO_VOXELS]\n Threshold on the ratio of bundles with at least one streamine in a \n given voxel to consider it as part of the 'gold standard'. Default if set: 0.5.\n --ratio_streamlines [RATIO_STREAMLINES]\n If all bundles come from the same tractogram, use this to generate \n a voting for streamlines too. The associated value is the threshold on the ratio of \n bundles including the streamline to consider it as part of the 'gold standard'. [0.5]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "considered", - "highly", - "long", - "work", - "most" - ], - [ - "bundles", - "bundles" - ], - [ - "create", - "generate" - ], - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "long", - "too" - ], - [ - "methods", - "use" - ], - [ - "work", - "find" - ], - [ - "order", - "set" - ], - [ - "long", - "have" - ], - [ - "long", - "a" - ], - [ - "methods", - "techniques" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "tool", - "useful" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "step", - "work", - "come" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "specific", - "variety", - "various" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "traditionally", - "often" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "average", - "average" - ], - [ - "population", - "population" - ], - [ - "methods", - "using" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "streamline", - "streamline" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "area", - "main", - "work", - "part" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "bundles", - "bundle" - ], - [ - "considered", - "possibility", - "question", - "step", - "consider" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_generate_priors", - "docstring": "Generation of priors and enhanced-FOD from an example/template bundle.\nThe bundle must have been cleaned thorougly before use. The E-FOD can then\nbe used for bundle-specific tractography, but not for FOD metrics.\n\nFormerly: scil_generate_priors_from_bundle.py", - "help": "usage: scil_bundle_generate_priors.py [-h]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--todi_sigma {0,1,2,3,4}]\n [--sf_threshold SF_THRESHOLD]\n [--out_prefix OUT_PREFIX]\n [--out_dir OUT_DIR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle in_fodf in_mask\n\nGeneration of priors and enhanced-FOD from an example/template bundle.\nThe bundle must have been cleaned thorougly before use. The E-FOD can then\nbe used for bundle-specific tractography, but not for FOD metrics.\n\nFormerly: scil_generate_priors_from_bundle.py\n\npositional arguments:\n in_bundle Input bundle filename.\n in_fodf Input FOD filename.\n in_mask Mask to constrain the TODI spatial smoothing,\n for example a WM mask.\n\noptions:\n -h, --help show this help message and exit\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --todi_sigma {0,1,2,3,4}\n Smooth the orientation histogram.\n --sf_threshold SF_THRESHOLD\n Relative threshold for sf masking (0.0-1.0).\n --out_prefix OUT_PREFIX\n Add a prefix to all output filename, \n default is no prefix.\n --out_dir OUT_DIR Output directory for all generated files,\n default is current directory.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Rheault, Francois, et al. \"Bundle-specific tractography with\n incorporated anatomical and orientational priors.\"\n NeuroImage 186 (2019): 382-398\n \n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "orientation", - "orientation" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "methods", - "use" - ], - [ - "long", - "have" - ], - [ - "specific", - "specific" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "anatomical", - "anatomical" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "considered", - "involved", - "work", - "been" - ], - [ - "methods", - "using" - ], - [ - "future", - "current" - ], - [ - "left", - "before" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "work", - "all" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "bundles", - "bundle" - ], - [ - "spatial", - "temporal", - "spatial" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_label_map", - "docstring": "Compute the label image (Nifti) from a centroid and tractograms (all\nrepresenting the same bundle). The label image represents the coverage of\nthe bundle, segmented into regions labelled from 0 to --nb_pts, starting from\nthe head, ending in the tail.\n\nEach voxel will have the label of its nearest centroid point.\n\nThe number of labels will be the same as the centroid's number of points.\n\nFormerly: scil_compute_bundle_voxel_label_map.py", - "help": "usage: scil_bundle_label_map.py [-h] [--nb_pts NB_PTS] [--colormap COLORMAP]\n [--new_labelling] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...] in_centroid\n out_dir\n\nCompute the label image (Nifti) from a centroid and tractograms (all\nrepresenting the same bundle). The label image represents the coverage of\nthe bundle, segmented into regions labelled from 0 to --nb_pts, starting from\nthe head, ending in the tail.\n\nEach voxel will have the label of its nearest centroid point.\n\nThe number of labels will be the same as the centroid's number of points.\n\nFormerly: scil_compute_bundle_voxel_label_map.py\n\npositional arguments:\n in_bundles Fiber bundle file.\n in_centroid Centroid streamline corresponding to bundle.\n out_dir Directory to save all mapping and coloring files:\n - correlation_map.nii.gz\n - session_x/labels_map.nii.gz\n - session_x/distance_map.nii.gz\n - session_x/correlation_map.nii.gz\n - session_x/labels.trk\n - session_x/distance.trk\n - session_x/correlation.trk\n Where session_x is numbered with each bundle.\n\noptions:\n -h, --help show this help message and exit\n --nb_pts NB_PTS Number of divisions for the bundles.\n Default is the number of points of the centroid.\n --colormap COLORMAP Select the colormap for colored trk (data_per_point) [jet].\n --new_labelling Use the new labelling method (multi-centroids).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundles" - ], - [ - "planes", - "jet" - ], - [ - "methods", - "method" - ], - [ - "work", - "and" - ], - [ - "left", - "into" - ], - [ - "difference", - "point" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "areas", - "region", - "regions", - "regions" - ], - [ - "methods", - "use" - ], - [ - "long", - "have" - ], - [ - "maps", - "mapping" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "represent", - "representing" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "blue", - "colored" - ], - [ - "long", - "with" - ], - [ - "represent", - "represents" - ], - [ - "methods", - "using" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "area", - "work", - "where" - ], - [ - "individual", - "each" - ], - [ - "streamline", - "streamline" - ], - [ - "work", - "all" - ], - [ - "bundles", - "bundle" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_mean_fixel_afd", - "docstring": "Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps along a bundle.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof the bundle provided, averaged at every voxel.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_fixel_afd_from_bundles.py", - "help": "usage: scil_bundle_mean_fixel_afd.py [-h] [--length_weighting]\n [--reference REFERENCE]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle in_fodf afd_mean_map\n\nCompute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps along a bundle.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof the bundle provided, averaged at every voxel.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_fixel_afd_from_bundles.py\n\npositional arguments:\n in_bundle Path of the bundle file.\n in_fodf Path of the fODF volume in spherical harmonics (SH).\n afd_mean_map Path of the output mean AFD map.\n\noptions:\n -h, --help show this help message and exit\n --length_weighting If set, will weigh the AFD values according to segment lengths. [False]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReference:\n [1] Raffelt, D., Tournier, JD., Rose, S., Ridgway, GR., Henderson, R.,\n Crozier, S., Salvado, O., & Connelly, A. (2012).\n Apparent Fibre Density: a novel measure for the analysis of\n diffusion-weighted magnetic resonance images. NeuroImage, 59(4),\n 3976--3994.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "work", - "and" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "fibre", - "fibres", - "fibre" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "clear", - "considered", - "create", - "form", - "manner", - "matter", - "result", - "subject", - "thinking", - "true", - "view", - "work", - "rather" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "diffusion", - "diffusion" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "methods", - "using" - ], - [ - "area", - "main", - "along" - ], - [ - "long", - "than" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "streamline", - "streamline" - ], - [ - "weighted", - "weighted" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "false", - "false" - ], - [ - "maps", - "maps" - ], - [ - "based", - "reported", - "according" - ], - [ - "based", - "based" - ], - [ - "maps", - "map" - ], - [ - "bundles", - "bundle" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_mean_fixel_afd_from_hdf5", - "docstring": "Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps for every connections within a hdf5 (.h5) file.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof each connection, averaged at every voxel.\n\nPlease use a hdf5 (.h5) file containing decomposed connections\n\nFormerly: scil_compute_fixel_afd_from_hdf5.py", - "help": "usage: scil_bundle_mean_fixel_afd_from_hdf5.py [-h] [--length_weighting]\n [--processes NBR]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_hdf5 in_fodf out_hdf5\n\nCompute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps for every connections within a hdf5 (.h5) file.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof each connection, averaged at every voxel.\n\nPlease use a hdf5 (.h5) file containing decomposed connections\n\nFormerly: scil_compute_fixel_afd_from_hdf5.py\n\npositional arguments:\n in_hdf5 HDF5 filename (.h5) containing decomposed connections.\n in_fodf Path of the fODF volume in spherical harmonics (SH).\n out_hdf5 Path of the output HDF5 filenames (.h5).\n\noptions:\n -h, --help show this help message and exit\n --length_weighting If set, will weigh the AFD values according to segment lengths. [False]\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReference:\n [1] Raffelt, D., Tournier, JD., Rose, S., Ridgway, GR., Henderson, R.,\n Crozier, S., Salvado, O., & Connelly, A. (2012).\n Apparent Fibre Density: a novel measure for the analysis of\n diffusion-weighted magnetic resonance images. NeuroImage,\n 59(4), 3976--3994.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "work", - "and" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "fibre", - "fibres", - "fibre" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "diffusion", - "diffusion" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "methods", - "using" - ], - [ - "area", - "main", - "along" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "individual", - "each" - ], - [ - "connect", - "connected", - "connection", - "connections", - "connections" - ], - [ - "streamline", - "streamline" - ], - [ - "weighted", - "weighted" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "step", - "start" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "connection", - "connections", - "connection" - ], - [ - "false", - "false" - ], - [ - "maps", - "maps" - ], - [ - "based", - "reported", - "according" - ], - [ - "based", - "based" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_mean_fixel_bingham_metric", - "docstring": "Given a bundle and Bingham coefficients, compute the average Bingham\nmetric at each voxel intersected by the bundle. Intersected voxels are\nfound by computing the intersection between the voxel grid and each streamline\nin the input tractogram.\n\nThis script behaves like scil_compute_mean_fixel_afd_from_bundles.py for fODFs,\nbut here for Bingham distributions. These add the unique possibility to capture\nfixel-based fiber spread (FS) and fiber fraction (FF). FD from the bingham\nshould be \"equivalent\" to the AFD_fixel we are used to.\n\nBingham coefficients volume must come from scil_fodf_to_bingham.py\nand Bingham metrics comes from scil_bingham_metrics.py.\n\nBingham metrics are extracted from Bingham distributions fitted to fODF. There\nare as many values per voxel as there are lobes extracted. The values chosen\nfor a given voxelis the one belonging to the lobe better aligned with the\ncurrent streamline segment.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_mean_fixel_obe_metric_from_bundles.py", - "help": "usage: scil_bundle_mean_fixel_bingham_metric.py [-h] [--length_weighting]\n [--max_theta MAX_THETA]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bundle in_bingham\n in_bingham_metric out_mean_map\n\nGiven a bundle and Bingham coefficients, compute the average Bingham\nmetric at each voxel intersected by the bundle. Intersected voxels are\nfound by computing the intersection between the voxel grid and each streamline\nin the input tractogram.\n\nThis script behaves like scil_compute_mean_fixel_afd_from_bundles.py for fODFs,\nbut here for Bingham distributions. These add the unique possibility to capture\nfixel-based fiber spread (FS) and fiber fraction (FF). FD from the bingham\nshould be \"equivalent\" to the AFD_fixel we are used to.\n\nBingham coefficients volume must come from scil_fodf_to_bingham.py\nand Bingham metrics comes from scil_bingham_metrics.py.\n\nBingham metrics are extracted from Bingham distributions fitted to fODF. There\nare as many values per voxel as there are lobes extracted. The values chosen\nfor a given voxelis the one belonging to the lobe better aligned with the\ncurrent streamline segment.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_mean_fixel_obe_metric_from_bundles.py\n\npositional arguments:\n in_bundle Path of the bundle file.\n in_bingham Path of the Bingham volume.\n in_bingham_metric Path of the Bingham metric (FD, FS, or FF) volume.\n out_mean_map Path of the output mean map.\n\noptions:\n -h, --help show this help message and exit\n --length_weighting If set, will weigh the FD values according to segment lengths.\n --max_theta MAX_THETA\n Maximum angle (in degrees) condition on lobe alignment. [60]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "average", - "per" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "lobe", - "lobes", - "lobes" - ], - [ - "considered", - "are" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "considered", - "create", - "form", - "manner", - "matter", - "result", - "subject", - "thinking", - "true", - "view", - "work", - "rather" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "experience", - "thinking", - "work", - "working", - "better" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "step", - "work", - "come" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "average", - "average" - ], - [ - "methods", - "using" - ], - [ - "future", - "current" - ], - [ - "long", - "than" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "degree", - "degrees" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "intersected", - "intersected" - ], - [ - "unique", - "variety", - "unique" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "large", - "work", - "many" - ], - [ - "represent", - "chosen" - ], - [ - "individual", - "each" - ], - [ - "streamline", - "streamline" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "clear", - "long", - "question", - "result", - "work", - "there" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "total", - "60" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "lobe", - "lobes", - "occipital", - "parietal", - "lobe" - ], - [ - "based", - "reported", - "according" - ], - [ - "based", - "based" - ], - [ - "attention", - "comes" - ], - [ - "maps", - "map" - ], - [ - "bundles", - "bundle" - ], - [ - "future", - "possibility", - "potential", - "question", - "possibility" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_mean_std", - "docstring": "Compute mean and std for each metric.\n\n- Default: For the whole bundle. This is achieved by averaging the metric\n values of all voxels occupied by the bundle.\n- Option --per_point: For all streamlines points in the bundle for each metric\n combination, along the bundle, i.e. for each point.\n **To create label_map and distance_map, see\n scil_bundle_label_map.py\n\nDensity weighting modifies the contribution of voxel with lower/higher\nstreamline count to reduce influence of spurious streamlines.\n\nFormerly: scil_compute_bundle_mean_std_per_point.py or\nscil_compute_bundle_mean_std.py", - "help": "usage: scil_bundle_mean_std.py [-h] [--per_point in_labels | --include_dps]\n [--density_weighting]\n [--distance_weighting DISTANCE_NII]\n [--correlation_weighting CORRELATION_NII]\n [--out_json OUT_JSON] [--reference REFERENCE]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n in_bundle in_metrics [in_metrics ...]\n\nCompute mean and std for each metric.\n\n- Default: For the whole bundle. This is achieved by averaging the metric\n values of all voxels occupied by the bundle.\n- Option --per_point: For all streamlines points in the bundle for each metric\n combination, along the bundle, i.e. for each point.\n **To create label_map and distance_map, see\n scil_bundle_label_map.py\n\nDensity weighting modifies the contribution of voxel with lower/higher\nstreamline count to reduce influence of spurious streamlines.\n\nFormerly: scil_compute_bundle_mean_std_per_point.py or\nscil_compute_bundle_mean_std.py\n\npositional arguments:\n in_bundle Fiber bundle file to compute statistics on.\n in_metrics Nifti file to compute statistics on. Probably some tractometry measure(s) such as FA, MD, RD, ...\n\noptions:\n -h, --help show this help message and exit\n --per_point in_labels\n If set, computes the metrics per point instead of on the whole bundle.\n You must then give the label map (.nii.gz) of the corresponding fiber bundle.\n --include_dps Save values from data_per_streamline.\n Currently not offered with option --per_point.\n --density_weighting If set, weights statistics by the number of fibers passing through each voxel.\n --distance_weighting DISTANCE_NII\n If set, weights statistics by the inverse of the distance between a streamline and the centroid.\n --correlation_weighting CORRELATION_NII\n If set, weight statistics by the correlation strength between longitudinal data.\n --out_json OUT_JSON Path of the output file. If not given, the output is simply printed on screen.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "pass", - "passing" - ], - [ - "average", - "per" - ], - [ - "work", - "and" - ], - [ - "form", - "combination" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "difference", - "point" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "order", - "set" - ], - [ - "thinking", - "you" - ], - [ - "high", - "higher", - "increase", - "level", - "higher" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "thinking", - "i" - ], - [ - "considered", - "probably" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "create" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "longitudinal", - "transverse", - "longitudinal" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "methods", - "using" - ], - [ - "thinking", - "simply" - ], - [ - "view", - "see" - ], - [ - "order", - "work", - "instead" - ], - [ - "area", - "main", - "along" - ], - [ - "streamlines", - "streamlines" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "clear", - "give" - ], - [ - "fibres", - "fibers" - ], - [ - "individual", - "each" - ], - [ - "higher", - "lower" - ], - [ - "streamline", - "streamline" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "maps", - "map" - ], - [ - "increase", - "reduce" - ], - [ - "bundles", - "bundle" - ], - [ - "large", - "work", - "some" - ], - [ - "considered", - "specific", - "variety", - "such" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_pairwise_comparison", - "docstring": "Evaluate pair-wise similarity measures of bundles.\nAll tractograms must be in the same space (aligned to one reference).\n\nFor the voxel representation, the computed similarity measures are:\n bundle_adjacency_voxels, dice_voxels, w_dice_voxels, density_correlation\n volume_overlap, volume_overreach\nThe same measures are also evluated for the endpoints.\n\nFor the streamline representation, the computed similarity measures are:\n bundle_adjacency_streamlines, dice_streamlines, streamlines_count_overlap,\n streamlines_count_overreach\n\nFormerly: scil_evaluate_bundles_pairwise_agreement_measures.py", - "help": "usage: scil_bundle_pairwise_comparison.py [-h] [--streamline_dice]\n [--bundle_adjency_no_overlap]\n [--disable_streamline_distance]\n [--single_compare SINGLE_COMPARE]\n [--keep_tmp] [--ratio]\n [--processes NBR]\n [--reference REFERENCE]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...] out_json\n\nEvaluate pair-wise similarity measures of bundles.\nAll tractograms must be in the same space (aligned to one reference).\n\nFor the voxel representation, the computed similarity measures are:\n bundle_adjacency_voxels, dice_voxels, w_dice_voxels, density_correlation\n volume_overlap, volume_overreach\nThe same measures are also evluated for the endpoints.\n\nFor the streamline representation, the computed similarity measures are:\n bundle_adjacency_streamlines, dice_streamlines, streamlines_count_overlap,\n streamlines_count_overreach\n\nFormerly: scil_evaluate_bundles_pairwise_agreement_measures.py\n\npositional arguments:\n in_bundles Path of the input bundles.\n out_json Path of the output json file.\n\noptions:\n -h, --help show this help message and exit\n --streamline_dice Compute streamline-wise dice coefficient.\n Tractograms must be identical [False].\n --bundle_adjency_no_overlap\n If set, do not count zeros in the average BA.\n --disable_streamline_distance\n Will not compute the streamlines distance \n [False].\n --single_compare SINGLE_COMPARE\n Compare inputs to this single file.\n --keep_tmp Will not delete the tmp folder at the end.\n --ratio Compute overlap and overreach as a ratio over the\n reference tractogram in a Tractometer-style way.\n Can only be used if also using the `single_compare` option.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "long", - "end" - ], - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundles" - ], - [ - "clear", - "long", - "step", - "thinking", - "view", - "work", - "working", - "way" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "considered", - "are" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "order", - "set" - ], - [ - "long", - "over" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "space", - "space" - ], - [ - "held", - "in" - ], - [ - "average", - "average" - ], - [ - "methods", - "using" - ], - [ - "streamlines", - "streamlines" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "streamline", - "streamline" - ], - [ - "similarity", - "similarity" - ], - [ - "step", - "start" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "false", - "false" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "examine", - "evaluate" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_reject_outliers", - "docstring": "Clean a bundle (inliers/outliers) using hiearchical clustering.\nhttp://archive.ismrm.org/2015/2844.html\n\nIf spurious streamlines are dense, it is possible they will not be recognized\nas outliers. Manual cleaning may be required to overcome this limitation.", - "help": "usage: scil_bundle_reject_outliers.py [-h]\n [--remaining_bundle REMAINING_BUNDLE]\n [--alpha ALPHA] [--display_counts]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_bundle\n\nClean a bundle (inliers/outliers) using hiearchical clustering.\nhttp://archive.ismrm.org/2015/2844.html\n\nIf spurious streamlines are dense, it is possible they will not be recognized\nas outliers. Manual cleaning may be required to overcome this limitation.\n\npositional arguments:\n in_bundle Fiber bundle file to remove outliers from.\n out_bundle Fiber bundle without outliers.\n\noptions:\n -h, --help show this help message and exit\n --remaining_bundle REMAINING_BUNDLE\n Removed outliers.\n --alpha ALPHA Percent of the length of the tree that clusters of individual streamlines will be pruned. [0.6]\n --display_counts Print streamline count before and after filtering\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "order", - "required" - ], - [ - "work", - "and" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "considered", - "are" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "considered", - "recognized" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "methods", - "using" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "left", - "after" - ], - [ - "streamlines", - "streamlines" - ], - [ - "left", - "before" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "result", - "may" - ], - [ - "individual", - "individuals", - "individual" - ], - [ - "limitation", - "limitations", - "limitation" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "streamline", - "streamline" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "higher", - "increase", - "percent" - ], - [ - "bundles", - "bundle" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_score_many_bundles_one_tractogram", - "docstring": "This script is intended to score all bundles from a single tractogram. Each\nvalid bundle is compared to its ground truth.\nEx: It was used for the ISMRM 2015 Challenge scoring.\n\nSee also scil_bundle_score_same_bundle_many_segmentations.py to score many\nversions of a same bundle, compared to ONE ground truth / gold standard.\n\nThis script is the second part of script scil_score_tractogram, which also\nsegments the wholebrain tractogram into bundles first.\n\nHere we suppose that the bundles are already segmented and saved as follows:\n main_dir/\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (optional)\n segmented_WPC/*_wpc.trk (optional)\n IS.trk OR NC.trk (if segmented_IB is present)\n\nConfig file\n-----------\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n }\n}\n\nFormerly: scil_score_bundles.py", - "help": "usage: scil_bundle_score_many_bundles_one_tractogram.py [-h] [--json_prefix p]\n [--gt_dir DIR]\n [--indent INDENT]\n [--sort_keys]\n [--reference REFERENCE]\n [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n gt_config bundles_dir\n\nThis script is intended to score all bundles from a single tractogram. Each\nvalid bundle is compared to its ground truth.\nEx: It was used for the ISMRM 2015 Challenge scoring.\n\nSee also scil_bundle_score_same_bundle_many_segmentations.py to score many\nversions of a same bundle, compared to ONE ground truth / gold standard.\n\nThis script is the second part of script scil_score_tractogram, which also\nsegments the wholebrain tractogram into bundles first.\n\nHere we suppose that the bundles are already segmented and saved as follows:\n main_dir/\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (optional)\n segmented_WPC/*_wpc.trk (optional)\n IS.trk OR NC.trk (if segmented_IB is present)\n\nConfig file\n-----------\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n }\n}\n\nFormerly: scil_score_bundles.py\n\nTractometry\n-----------\nGlobal connectivity metrics:\n\n- Computed by default:\n - VS: valid streamlines, belonging to a bundle (i.e. respecting all the\n criteria for that bundle; endpoints, limit_mask, gt_mask.).\n - IS: invalid streamlines. All other streamlines. IS = IC + NC.\n\n- Optional:\n - WPC: wrong path connections, streamlines connecting correct ROIs but not\n respecting the other criteria for that bundle. Such streamlines always\n exist but they are only saved separately if specified in the options.\n Else, they are merged back with the IS.\n By definition. WPC are only computed if \"limits masks\" are provided.\n - IC: invalid connections, streamlines joining an incorrect combination of\n ROIs. Use carefully, quality depends on the quality of your ROIs and no\n analysis is done on the shape of the streamlines.\n - NC: no connections. Invalid streamlines minus invalid connections.\n\n- Fidelity metrics:\n - OL: Overlap. Percentage of ground truth voxels containing streamline(s)\n for a given bundle.\n - OR: Overreach. Amount of voxels containing streamline(s) when they\n shouldn't, for a given bundle. We compute two versions :\n OR_pct_vs = divided by the total number of voxel covered by the bundle.\n (percentage of the voxels touched by VS).\n Values range between 0 and 100%. Values are not defined when we\n recovered no streamline for a bundle, but we set the OR_pct_vs to 0\n in that case.\n OR_pct_gt = divided by the total size of the ground truth bundle mask.\n Values could be higher than 100%.\n - f1 score: which is the same as the Dice score.\n\npositional arguments:\n gt_config .json dict configured as specified above.\n bundles_dir Directory containing all bundles.\n (Ex: Output directory for scil_score_tractogram).\n It is expected to contain a file IS.trk and \n files segmented_VB/*_VS.trk, with, possibly, files \n segmented_WPC/*_wpc.trk and segmented_IC/\n\noptions:\n -h, --help show this help message and exit\n --json_prefix p Prefix of the output json file. Ex: 'study_x_'.\n Suffix will be results.json. File will be saved inside bundles_dir.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nAdditions to gt_config:\n --gt_dir DIR Root path of the ground truth files listed in the gt_config.\n If not set, filenames in the config file are considered \n as absolute paths.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "true", - "always" - ], - [ - "global", - "global" - ], - [ - "bundles", - "bundles" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "form", - "combination" - ], - [ - "considered", - "are" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "left", - "into" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "limiting", - "limits" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "invalid", - "valid", - "invalid" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "lack", - "loss", - "result", - "result" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "invalid", - "valid", - "valid" - ], - [ - "exist", - "exist" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "high", - "higher", - "increase", - "level", - "higher" - ], - [ - "long", - "a" - ], - [ - "defined", - "definition" - ], - [ - "average", - "percentage" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "thinking", - "i" - ], - [ - "supported", - "support" - ], - [ - "future", - "will" - ], - [ - "order", - "intended" - ], - [ - "left", - "back" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "true", - "truth" - ], - [ - "long", - "with" - ], - [ - "parameters", - "specified" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "methods", - "using" - ], - [ - "variety", - "work", - "other" - ], - [ - "view", - "see" - ], - [ - "increase", - "total", - "amount" - ], - [ - "increase", - "expected" - ], - [ - "defined", - "function", - "defined" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "thinking", - "wrong" - ], - [ - "matter", - "thinking", - "else" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "large", - "work", - "many" - ], - [ - "considered", - "subsequently", - "was" - ], - [ - "considered", - "possibility", - "result", - "possibly" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "connect", - "connected", - "connection", - "connections", - "connections" - ], - [ - "larger", - "size", - "size" - ], - [ - "average", - "compared" - ], - [ - "streamline", - "streamline" - ], - [ - "total", - "100" - ], - [ - "result", - "results" - ], - [ - "level", - "above" - ], - [ - "work", - "working", - "done" - ], - [ - "long", - "two" - ], - [ - "connectivity", - "connectivity" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "area", - "main", - "work", - "part" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "lack", - "quality" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "connect", - "connected", - "connecting", - "connects", - "connecting" - ], - [ - "matter", - "question", - "case" - ], - [ - "create", - "created" - ], - [ - "bundles", - "bundle" - ], - [ - "clear", - "result", - "work", - "could" - ], - [ - "increase", - "total", - "total" - ], - [ - "shape", - "structure", - "shape" - ], - [ - "considered", - "specific", - "variety", - "such" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_score_same_bundle_many_segmentations", - "docstring": "This script is intended to score many versions of a same bundle, compared to\nONE ground truth / gold standard.\n\nSee also scil_bundle_score_many_bundles_one_tractogram.py to score all bundles\nfrom a single tractogram by comparing each valid bundle to its ground truth.\n\nAll tractograms must be in the same space (aligned to one reference).\nThe measures can be applied to a voxel-wise or streamline-wise representation.\n\nA gold standard must be provided for the desired representation.\nA gold standard would be a segmentation from an expert or a group of experts.\nIf only the streamline-wise representation is provided without a voxel-wise\ngold standard, it will be computed from the provided streamlines.\nAt least one of the two representations is required.\n\nThe gold standard tractogram is the tractogram (whole brain most likely) from\nwhich the segmentation is performed.\nThe gold standard tracking mask is the tracking mask used by the tractography\nalgorighm to generate the gold standard tractogram.\n\nThe computed binary classification measures are:\nsensitivity, specificity, precision, accuracy, dice, kappa, youden for both\nthe streamline and voxel representation (if provided).\n\nFormerly: scil_evaluate_bundles_binary_classification_measures.py", - "help": "usage: scil_bundle_score_same_bundle_many_segmentations.py [-h]\n [--streamlines_measures GOLD_STANDARD_STREAMLINES TRACTOGRAM]\n [--voxels_measures GOLD_STANDARD_MASK TRACKING MASK]\n [--processes NBR]\n [--reference REFERENCE]\n [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bundles\n [in_bundles ...]\n out_json\n\nThis script is intended to score many versions of a same bundle, compared to\nONE ground truth / gold standard.\n\nSee also scil_bundle_score_many_bundles_one_tractogram.py to score all bundles\nfrom a single tractogram by comparing each valid bundle to its ground truth.\n\nAll tractograms must be in the same space (aligned to one reference).\nThe measures can be applied to a voxel-wise or streamline-wise representation.\n\nA gold standard must be provided for the desired representation.\nA gold standard would be a segmentation from an expert or a group of experts.\nIf only the streamline-wise representation is provided without a voxel-wise\ngold standard, it will be computed from the provided streamlines.\nAt least one of the two representations is required.\n\nThe gold standard tractogram is the tractogram (whole brain most likely) from\nwhich the segmentation is performed.\nThe gold standard tracking mask is the tracking mask used by the tractography\nalgorighm to generate the gold standard tractogram.\n\nThe computed binary classification measures are:\nsensitivity, specificity, precision, accuracy, dice, kappa, youden for both\nthe streamline and voxel representation (if provided).\n\nFormerly: scil_evaluate_bundles_binary_classification_measures.py\n\npositional arguments:\n in_bundles Path of the input bundles.\n out_json Path of the output json.\n\noptions:\n -h, --help show this help message and exit\n --streamlines_measures GOLD_STANDARD_STREAMLINES TRACTOGRAM\n The gold standard bundle and the original tractogram.\n --voxels_measures GOLD_STANDARD_MASK TRACKING MASK\n The gold standard mask and the original tracking mask.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "considered", - "potential", - "result", - "likely" - ], - [ - "anatomy", - "anatomy" - ], - [ - "considered", - "highly", - "long", - "work", - "most" - ], - [ - "bundles", - "bundles" - ], - [ - "create", - "generate" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "order", - "required" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "precision", - "precision" - ], - [ - "applied", - "applied" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "considered", - "are" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "invalid", - "valid", - "valid" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "order", - "intended" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "space", - "space" - ], - [ - "held", - "in" - ], - [ - "true", - "truth" - ], - [ - "reliability", - "accuracy" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "streamlines", - "streamlines" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "large", - "work", - "many" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "based", - "group" - ], - [ - "individual", - "each" - ], - [ - "streamline", - "streamline" - ], - [ - "average", - "compared" - ], - [ - "long", - "two" - ], - [ - "step", - "start" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "bundles", - "bundle" - ], - [ - "data", - "tracking", - "tracking" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_shape_measures", - "docstring": "Evaluate basic measurements of bundle(s).\n\nThe computed measures are:\n - volume_info: volume, volume_endpoints\n - streamlines_info: streamlines_count, avg_length (in mm or in number of\n point), average step size, min_length, max_length.\n ** You may also get this information with scil_tractogram_print_info.py.\n - shape_info: span, curl, diameter, elongation, surface area,\n irregularity, end surface area, radius, end surface irregularity,\n mean_curvature, fractal dimension.\n ** The diameter, here, is a simple estimation using volume / length.\n For a more complex calculation, see scil_bundle_diameter.py.\n\nWith more than one bundle, the measures are averaged over bundles. All\ntractograms must be in the same space.\n\nThe set average contains the average measures of all input bundles. The\nmeasures that are dependent on the streamline count are weighted by the number\nof streamlines of each bundle. Each of these average measure is computed by\nfirst summing the multiple of a measure and the streamline count of each\nbundle and divide the sum by the total number of streamlines. Thus, measures\nincluding length and span are essentially averages of all the streamlines.\nOther streamline-related set measure are computed with other set averages.\nWhereas bundle-related measures are computed as an average of all bundles.\nThese measures include volume and surface area.\n\nThe fractal dimension is dependent on the voxel size and the number of voxels.\nIf data comparison is performed, the bundles MUST be in same resolution.\n\nFormerly: scil_compute_bundle_volume.py or\nscil_evaluate_bundles_individual_measures.py", - "help": "usage: scil_bundle_shape_measures.py [-h] [--out_json OUT_JSON]\n [--group_statistics] [--no_uniformize]\n [--reference REFERENCE] [--processes NBR]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...]\n\nEvaluate basic measurements of bundle(s).\n\nThe computed measures are:\n - volume_info: volume, volume_endpoints\n - streamlines_info: streamlines_count, avg_length (in mm or in number of\n point), average step size, min_length, max_length.\n ** You may also get this information with scil_tractogram_print_info.py.\n - shape_info: span, curl, diameter, elongation, surface area,\n irregularity, end surface area, radius, end surface irregularity,\n mean_curvature, fractal dimension.\n ** The diameter, here, is a simple estimation using volume / length.\n For a more complex calculation, see scil_bundle_diameter.py.\n\nWith more than one bundle, the measures are averaged over bundles. All\ntractograms must be in the same space.\n\nThe set average contains the average measures of all input bundles. The\nmeasures that are dependent on the streamline count are weighted by the number\nof streamlines of each bundle. Each of these average measure is computed by\nfirst summing the multiple of a measure and the streamline count of each\nbundle and divide the sum by the total number of streamlines. Thus, measures\nincluding length and span are essentially averages of all the streamlines.\nOther streamline-related set measure are computed with other set averages.\nWhereas bundle-related measures are computed as an average of all bundles.\nThese measures include volume and surface area.\n\nThe fractal dimension is dependent on the voxel size and the number of voxels.\nIf data comparison is performed, the bundles MUST be in same resolution.\n\nFormerly: scil_compute_bundle_volume.py or\nscil_evaluate_bundles_individual_measures.py\n\npositional arguments:\n in_bundles Path of the input bundles.\n\noptions:\n -h, --help show this help message and exit\n --out_json OUT_JSON Path of the output file. If not given, the output is simply printed on screen.\n --group_statistics Show average measures [False].\n --no_uniformize Do NOT automatically uniformize endpoints for theendpoints related metrics.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\nReferences:\n[1] Fang-Cheng Yeh. 2020.\n Shape analysis of the human association pathways. NeuroImage.\n", - "synonyms": [ - [ - "long", - "end" - ], - [ - "total", - "number" - ], - [ - "animal", - "human", - "human" - ], - [ - "anatomy", - "anatomy" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "bundles", - "bundles" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "considered", - "are" - ], - [ - "long", - "work", - "more" - ], - [ - "difference", - "point" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "order", - "set" - ], - [ - "long", - "over" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "space", - "space" - ], - [ - "held", - "in" - ], - [ - "variety", - "include" - ], - [ - "pathway", - "pathways", - "pathways" - ], - [ - "long", - "with" - ], - [ - "average", - "average" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "action", - "process", - "step", - "step" - ], - [ - "contrast", - "comparison" - ], - [ - "specific", - "related" - ], - [ - "methods", - "using" - ], - [ - "variety", - "work", - "other" - ], - [ - "view", - "see" - ], - [ - "thinking", - "simply" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "result", - "may" - ], - [ - "fundamental", - "basic" - ], - [ - "individual", - "each" - ], - [ - "larger", - "size", - "size" - ], - [ - "streamline", - "streamline" - ], - [ - "weighted", - "weighted" - ], - [ - "considered", - "form", - "meaning", - "order", - "result", - "thus" - ], - [ - "complex", - "structure", - "structures", - "complex" - ], - [ - "association", - "association" - ], - [ - "step", - "start" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "false", - "false" - ], - [ - "bundles", - "bundle" - ], - [ - "examine", - "evaluate" - ], - [ - "area", - "areas", - "region", - "area" - ], - [ - "increase", - "total", - "total" - ], - [ - "shape", - "structure", - "shape" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_uniformize_endpoints", - "docstring": "Uniformize streamlines' endpoints according to a defined axis.\nUseful for tractometry or models creation.\n\nThe --auto option will automatically calculate the main orientation.\nIf the input bundle is poorly defined, it is possible heuristic will be wrong.\n\nThe default is to flip each streamline so their first point's coordinate in the\ndefined axis is smaller than their last point (--swap does the opposite).\n\nThe --target_roi option will use the barycenter of the target mask to define\nthe axis. The target mask can be a binary mask or an atlas. If an atlas is\nused, labels are expected in the form of --target_roi atlas.nii.gz 2 3 5:7.\n\nFormerly: scil_uniformize_streamlines_endpoints.py", - "help": "usage: scil_bundle_uniformize_endpoints.py [-h]\n (--axis {x,y,z} | --auto | --centroid tractogram | --target_roi TARGET_ROI [TARGET_ROI ...])\n [--swap] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_bundle\n\nUniformize streamlines' endpoints according to a defined axis.\nUseful for tractometry or models creation.\n\nThe --auto option will automatically calculate the main orientation.\nIf the input bundle is poorly defined, it is possible heuristic will be wrong.\n\nThe default is to flip each streamline so their first point's coordinate in the\ndefined axis is smaller than their last point (--swap does the opposite).\n\nThe --target_roi option will use the barycenter of the target mask to define\nthe axis. The target mask can be a binary mask or an atlas. If an atlas is\nused, labels are expected in the form of --target_roi atlas.nii.gz 2 3 5:7.\n\nFormerly: scil_uniformize_streamlines_endpoints.py\n\npositional arguments:\n in_bundle Input path of the tractography file.\n out_bundle Output path of the uniformized file.\n\noptions:\n -h, --help show this help message and exit\n --axis {x,y,z} Match endpoints of the streamlines along this axis.\n SUGGESTION: Commissural = x, Association = y, Projection = z\n --auto Match endpoints of the streamlines along an automatically determined axis.\n --centroid tractogram\n Match endpoints of the streamlines to align it to a reference unique streamline (centroid).\n --target_roi TARGET_ROI [TARGET_ROI ...]\n Provide a target ROI: either a binary mask or a label map and the labels to use.\n Will align heads to be closest to the mask barycenter.\n (atlas: if no labels are provided, all labels will be used.\n --swap Swap head <-> tail convention. Can be useful when the reference is not in RAS.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "commissural", - "commissural" - ], - [ - "motor", - "auto" - ], - [ - "anatomy", - "anatomy" - ], - [ - "orientation", - "orientation" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "difference", - "point" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "direction", - "opposite" - ], - [ - "long", - "a" - ], - [ - "tool", - "useful" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "form", - "forms", - "form" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "last" - ], - [ - "held", - "in" - ], - [ - "atlas", - "atlas" - ], - [ - "clear", - "long", - "matter", - "result", - "work", - "so" - ], - [ - "create", - "provide" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "area", - "main", - "along" - ], - [ - "defined", - "function", - "defined" - ], - [ - "long", - "than" - ], - [ - "increase", - "expected" - ], - [ - "streamlines", - "streamlines" - ], - [ - "thinking", - "wrong" - ], - [ - "unique", - "variety", - "unique" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "projection", - "projection" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "matter", - "question", - "does" - ], - [ - "streamline", - "streamline" - ], - [ - "association", - "association" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "defined", - "define" - ], - [ - "clear", - "result", - "determined" - ], - [ - "based", - "reported", - "according" - ], - [ - "create", - "creation" - ], - [ - "key", - "main", - "main" - ], - [ - "maps", - "map" - ], - [ - "bundles", - "bundle" - ], - [ - "large", - "larger", - "smaller" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_volume_per_label", - "docstring": "Compute bundle volume per label in mm3. This script supports anisotropic voxels\nresolution. Volume is estimated by counting the number of voxel occupied by\neach label and multiplying it by the volume of a single voxel.\n\nThe labels can be obtained by scil_bundle_label_map.py.\n\nThis estimation is typically performed at resolution around 1mm3.\n\nTo get the volume and other measures directly from the (whole) bundle, use\nscil_bundle_shape_measures.py.\n\nFormerly: scil_compute_bundle_volume_per_label.py", - "help": "usage: scil_bundle_volume_per_label.py [-h] [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n voxel_label_map bundle_name\n\nCompute bundle volume per label in mm3. This script supports anisotropic voxels\nresolution. Volume is estimated by counting the number of voxel occupied by\neach label and multiplying it by the volume of a single voxel.\n\nThe labels can be obtained by scil_bundle_label_map.py.\n\nThis estimation is typically performed at resolution around 1mm3.\n\nTo get the volume and other measures directly from the (whole) bundle, use\nscil_bundle_shape_measures.py.\n\nFormerly: scil_compute_bundle_volume_per_label.py\n\npositional arguments:\n voxel_label_map Fiber bundle file.\n bundle_name Bundle name.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "average", - "per" - ], - [ - "work", - "and" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "total", - "estimated" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "supported", - "supports" - ], - [ - "methods", - "using" - ], - [ - "variety", - "work", - "other" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "bundles", - "bundle" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_compare_populations", - "docstring": "Performs a network-based statistical comparison for populations g1 and g2. The\noutput is a matrix of the same size as the input connectivity matrices, with\np-values at each edge.\nAll input matrices must have the same shape (NxN). For paired t-test, both\ngroups must have the same number of observations.\n\nFor example, if you have streamline count weighted matrices for a MCI and a\ncontrol group and you want to investiguate differences in their connectomes:\n >>> scil_connectivity_compare_populations.py pval.npy\n --g1 MCI/*_sc.npy --g2 CTL/*_sc.npy\n\n--filtering_mask will simply multiply the binary mask to all input\nmatrices before performing the statistical comparison. Reduces the number of\nstatistical tests, useful when using --fdr or --bonferroni.\n\nFormerly: scil_compare_connectivity.py", - "help": "usage: scil_connectivity_compare_populations.py [-h] --in_g1 IN_G1 [IN_G1 ...]\n --in_g2 IN_G2 [IN_G2 ...]\n [--tail {left,right,both}]\n [--paired]\n [--fdr | --bonferroni]\n [--p_threshold THRESH OUT_FILE]\n [--filtering_mask FILTERING_MASK]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n out_pval_matrix\n\nPerforms a network-based statistical comparison for populations g1 and g2. The\noutput is a matrix of the same size as the input connectivity matrices, with\np-values at each edge.\nAll input matrices must have the same shape (NxN). For paired t-test, both\ngroups must have the same number of observations.\n\nFor example, if you have streamline count weighted matrices for a MCI and a\ncontrol group and you want to investiguate differences in their connectomes:\n >>> scil_connectivity_compare_populations.py pval.npy\n --g1 MCI/*_sc.npy --g2 CTL/*_sc.npy\n\n--filtering_mask will simply multiply the binary mask to all input\nmatrices before performing the statistical comparison. Reduces the number of\nstatistical tests, useful when using --fdr or --bonferroni.\n\nFormerly: scil_compare_connectivity.py\n\npositional arguments:\n out_pval_matrix Output matrix (.npy) containing the edges p-value.\n\noptions:\n -h, --help show this help message and exit\n --in_g1 IN_G1 [IN_G1 ...]\n List of matrices for the first population (.npy).\n --in_g2 IN_G2 [IN_G2 ...]\n List of matrices for the second population (.npy).\n --tail {left,right,both}\n Enables specification of an alternative hypothesis:\n left: mean of g1 < mean of g2,\n right: mean of g2 < mean of g1,\n both: both means are not equal (default).\n --paired Use paired sample t-test instead of population t-test.\n --in_g1 and --in_g2 must be ordered the same way.\n --fdr Perform a false discovery rate (FDR) correction for the p-values.\n Uses the number of non-zero edges as number of tests (value between 0.01 and 0.1).\n --bonferroni Perform a Bonferroni correction for the p-values.\n Uses the number of non-zero edges as number of tests.\n --p_threshold THRESH OUT_FILE\n Threshold the final p-value matrix and save the binary matrix (.npy).\n --filtering_mask FILTERING_MASK\n Binary filtering mask (.npy) to apply before computing the measures.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Rubinov, Mikail, and Olaf Sporns. \"Complex network measures of brain\n connectivity: uses and interpretations.\" Neuroimage 52.3 (2010):\n 1059-1069.\n[2] Zalesky, Andrew, Alex Fornito, and Edward T. Bullmore. \"Network-based\n statistic: identifying differences in brain networks.\" Neuroimage 53.4\n (2010): 1197-1207.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "clear", - "long", - "step", - "thinking", - "view", - "work", - "working", - "way" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "methods", - "use" - ], - [ - "long", - "have" - ], - [ - "left", - "left" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "matrices", - "matrices" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "tool", - "useful" - ], - [ - "applied", - "apply" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "population", - "populations" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "discovery", - "discovery" - ], - [ - "higher", - "increase", - "rate" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "contrast", - "comparison" - ], - [ - "population", - "population" - ], - [ - "network", - "networks", - "networks" - ], - [ - "differences", - "differences" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "thinking", - "simply" - ], - [ - "order", - "work", - "instead" - ], - [ - "left", - "right" - ], - [ - "left", - "before" - ], - [ - "matrices", - "matrix" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "based", - "group" - ], - [ - "individual", - "each" - ], - [ - "larger", - "size", - "size" - ], - [ - "streamline", - "streamline" - ], - [ - "weighted", - "weighted" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "complex", - "structure", - "structures", - "complex" - ], - [ - "connectivity", - "connectivity" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "false", - "false" - ], - [ - "based", - "based" - ], - [ - "network", - "networks", - "network" - ], - [ - "meaning", - "order", - "result", - "step", - "true", - "means" - ], - [ - "shape", - "structure", - "shape" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_compute_matrices", - "docstring": "This script computes a variety of measures in the form of connectivity\nmatrices. This script is made to follow\nscil_tractogram_segment_bundles_for_connectivity.py and\nuses the same labels list as input.\n\nThe script expects a folder containing all relevants bundles following the\nnaming convention LABEL1_LABEL2.trk and a text file containing the list of\nlabels that should be part of the matrices. The ordering of labels in the\nmatrices will follow the same order as the list.\nThis script only generates matrices in the form of array, does not visualize\nor reorder the labels (node).\n\nThe parameter --similarity expects a folder with density maps\n(LABEL1_LABEL2.nii.gz) following the same naming convention as the input\ndirectory.\nThe bundles should be averaged version in the same space. This will\ncompute the weighted-dice between each node and their homologuous average\nversion.\n\nThe parameters --metrics can be used more than once and expect a map (t1, fa,\netc.) in the same space and each will generate a matrix. The average value in\nthe volume occupied by the bundle will be reported in the matrices nodes.\n\nThe parameters --maps can be used more than once and expect a folder with\npre-computed maps (LABEL1_LABEL2.nii.gz) following the same naming convention\nas the input directory. Each will generate a matrix. The average non-zeros\nvalue in the map will be reported in the matrices nodes.\n\nThe parameters --lesion_load will compute 3 lesion(s) related matrices:\nlesion_count.npy, lesion_vol.npy, lesion_sc.npy and put it inside of a\nspecified folder. They represent the number of lesion, the total volume of\nlesion(s) and the total of streamlines going through the lesion(s) for of each\nconnection. Each connection can be seen as a 'bundle' and then something\nsimilar to scil_analyse_lesion_load.py is run for each 'bundle'.\n\nFormerly: scil_compute_connectivity.py", - "help": "usage: scil_connectivity_compute_matrices.py [-h] [--volume OUT_FILE]\n [--streamline_count OUT_FILE]\n [--length OUT_FILE]\n [--similarity IN_FOLDER OUT_FILE]\n [--maps IN_FOLDER OUT_FILE]\n [--metrics IN_FILE OUT_FILE]\n [--lesion_load IN_FILE OUT_DIR]\n [--min_lesion_vol MIN_LESION_VOL]\n [--density_weighting]\n [--no_self_connection]\n [--include_dps OUT_DIR]\n [--force_labels_list FORCE_LABELS_LIST]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_hdf5 in_labels\n\nThis script computes a variety of measures in the form of connectivity\nmatrices. This script is made to follow\nscil_tractogram_segment_bundles_for_connectivity.py and\nuses the same labels list as input.\n\nThe script expects a folder containing all relevants bundles following the\nnaming convention LABEL1_LABEL2.trk and a text file containing the list of\nlabels that should be part of the matrices. The ordering of labels in the\nmatrices will follow the same order as the list.\nThis script only generates matrices in the form of array, does not visualize\nor reorder the labels (node).\n\nThe parameter --similarity expects a folder with density maps\n(LABEL1_LABEL2.nii.gz) following the same naming convention as the input\ndirectory.\nThe bundles should be averaged version in the same space. This will\ncompute the weighted-dice between each node and their homologuous average\nversion.\n\nThe parameters --metrics can be used more than once and expect a map (t1, fa,\netc.) in the same space and each will generate a matrix. The average value in\nthe volume occupied by the bundle will be reported in the matrices nodes.\n\nThe parameters --maps can be used more than once and expect a folder with\npre-computed maps (LABEL1_LABEL2.nii.gz) following the same naming convention\nas the input directory. Each will generate a matrix. The average non-zeros\nvalue in the map will be reported in the matrices nodes.\n\nThe parameters --lesion_load will compute 3 lesion(s) related matrices:\nlesion_count.npy, lesion_vol.npy, lesion_sc.npy and put it inside of a\nspecified folder. They represent the number of lesion, the total volume of\nlesion(s) and the total of streamlines going through the lesion(s) for of each\nconnection. Each connection can be seen as a 'bundle' and then something\nsimilar to scil_analyse_lesion_load.py is run for each 'bundle'.\n\nFormerly: scil_compute_connectivity.py\n\npositional arguments:\n in_hdf5 Input filename for the hdf5 container (.h5).\n Obtained from scil_tractogram_segment_bundles_for_connectivity.py.\n in_labels Labels file name (nifti).\n This generates a NxN connectivity matrix.\n\noptions:\n -h, --help show this help message and exit\n --volume OUT_FILE Output file for the volume weighted matrix (.npy).\n --streamline_count OUT_FILE\n Output file for the streamline count weighted matrix (.npy).\n --length OUT_FILE Output file for the length weighted matrix (.npy).\n --similarity IN_FOLDER OUT_FILE\n Input folder containing the averaged bundle density\n maps (.nii.gz) and output file for the similarity weighted matrix (.npy).\n --maps IN_FOLDER OUT_FILE\n Input folder containing pre-computed maps (.nii.gz)\n and output file for the weighted matrix (.npy).\n --metrics IN_FILE OUT_FILE\n Input (.nii.gz). and output file (.npy) for a metric weighted matrix.\n --lesion_load IN_FILE OUT_DIR\n Input binary mask (.nii.gz) and output directory for all lesion-related matrices.\n --min_lesion_vol MIN_LESION_VOL\n Minimum lesion volume in mm3 [7].\n --density_weighting Use density-weighting for the metric weightedmatrix.\n --no_self_connection Eliminate the diagonal from the matrices.\n --include_dps OUT_DIR\n Save matrices from data_per_streamline in the output directory.\n COMMIT-related values will be summed instead of averaged.\n Will always overwrite files.\n --force_labels_list FORCE_LABELS_LIST\n Path to a labels list (.txt) in case of missing labels in the atlas.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "true", - "always" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "bundles", - "bundles" - ], - [ - "create", - "generate" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "result", - "following" - ], - [ - "reported", - "reported" - ], - [ - "order", - "order" - ], - [ - "long", - "work", - "more" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "parameter", - "parameters", - "parameter" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "naming", - "naming" - ], - [ - "long", - "a" - ], - [ - "matrices", - "matrices" - ], - [ - "held", - "on" - ], - [ - "considered", - "seen" - ], - [ - "represent", - "represent" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "form", - "forms", - "form" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "space", - "space" - ], - [ - "held", - "in" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "parameters", - "specified" - ], - [ - "average", - "average" - ], - [ - "atlas", - "atlas" - ], - [ - "specific", - "related" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "clear", - "left", - "work", - "put" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "contrast", - "form", - "forms", - "larger", - "result", - "specific", - "variety", - "similar" - ], - [ - "order", - "work", - "instead" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "matrices", - "matrix" - ], - [ - "step", - "follow" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "left", - "long", - "work", - "once" - ], - [ - "matter", - "question", - "thinking", - "true", - "something" - ], - [ - "unique", - "variety", - "variety" - ], - [ - "individual", - "each" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "matter", - "question", - "does" - ], - [ - "streamline", - "streamline" - ], - [ - "weighted", - "weighted" - ], - [ - "similarity", - "similarity" - ], - [ - "clear", - "work", - "made" - ], - [ - "connectivity", - "connectivity" - ], - [ - "step", - "start" - ], - [ - "work", - "all" - ], - [ - "area", - "main", - "work", - "part" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "connection", - "connections", - "connection" - ], - [ - "maps", - "maps" - ], - [ - "matter", - "question", - "case" - ], - [ - "maps", - "map" - ], - [ - "step", - "thinking", - "going" - ], - [ - "bundles", - "bundle" - ], - [ - "increase", - "total", - "total" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_compute_pca", - "docstring": "Script to compute PCA analysis on diffusion metrics. Output returned is all\nsignificant principal components (e.g. presenting eigenvalues > 1) in a\nconnectivity matrix format. This script can take into account all edges from\nevery subject in a population or only non-zero edges across all subjects.\n\nThe script can take directly as input a connectoflow output folder. Simply use\nthe --input_connectoflow flag. For other type of folder input, the script\nexpects a single folder containing all matrices for all subjects.\nExample:\n [in_folder]\n |--- sub-01_ad.npy\n |--- sub-01_md.npy\n |--- sub-02_ad.npy\n |--- sub-02_md.npy\n |--- ...\n\nThe plots, tables and principal components matrices will be outputted in the\ndesignated folder from the argument. If you want to move back your\nprincipal components matrices in your connectoflow output, you can use a\nsimilar bash command for all principal components:\nfor sub in `cat list_id.txt`;\ndo\n cp out_folder/${sub}_PC1.npy connectoflow_output/$sub/Compute_Connectivity/\ndone\n\nInterpretation of resulting principal components can be done by evaluating the\nloadings values for each metrics. A value near 0 means that this metric doesn't\ncontribute to this specific component whereas high positive or negative values\nmean a larger contribution. Components can then be labeled based on which\nmetric contributes the highest. For example, a principal component showing a\nhigh loading for afd_fixel and near 0 loading for all other metrics can be\ninterpreted as axonal density (see Gagnon et al. 2022 for this specific example\nor ref [3] for an introduction to PCA).\n\nEXAMPLE USAGE:\nscil_connectivity_compute_pca.py input_folder/ output_folder/\n --metrics ad fa md rd [...] --list_ids list_ids.txt", - "help": "usage: scil_connectivity_compute_pca.py [-h] --metrics METRICS [METRICS ...]\n --list_ids FILE [--not_only_common]\n [--input_connectoflow]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_folder out_folder\n\nScript to compute PCA analysis on diffusion metrics. Output returned is all\nsignificant principal components (e.g. presenting eigenvalues > 1) in a\nconnectivity matrix format. This script can take into account all edges from\nevery subject in a population or only non-zero edges across all subjects.\n\nThe script can take directly as input a connectoflow output folder. Simply use\nthe --input_connectoflow flag. For other type of folder input, the script\nexpects a single folder containing all matrices for all subjects.\nExample:\n [in_folder]\n |--- sub-01_ad.npy\n |--- sub-01_md.npy\n |--- sub-02_ad.npy\n |--- sub-02_md.npy\n |--- ...\n\nThe plots, tables and principal components matrices will be outputted in the\ndesignated folder from the argument. If you want to move back your\nprincipal components matrices in your connectoflow output, you can use a\nsimilar bash command for all principal components:\nfor sub in `cat list_id.txt`;\ndo\n cp out_folder/${sub}_PC1.npy connectoflow_output/$sub/Compute_Connectivity/\ndone\n\nInterpretation of resulting principal components can be done by evaluating the\nloadings values for each metrics. A value near 0 means that this metric doesn't\ncontribute to this specific component whereas high positive or negative values\nmean a larger contribution. Components can then be labeled based on which\nmetric contributes the highest. For example, a principal component showing a\nhigh loading for afd_fixel and near 0 loading for all other metrics can be\ninterpreted as axonal density (see Gagnon et al. 2022 for this specific example\nor ref [3] for an introduction to PCA).\n\nEXAMPLE USAGE:\nscil_connectivity_compute_pca.py input_folder/ output_folder/\n --metrics ad fa md rd [...] --list_ids list_ids.txt\n\npositional arguments:\n in_folder Path to the input folder. See explanation above for its expected organization.\n out_folder Path to the output folder to export graphs, tables and principal \n components matrices.\n\noptions:\n -h, --help show this help message and exit\n --metrics METRICS [METRICS ...]\n Suffixes of all metrics to include in PCA analysis (ex: ad md fa rd). \n They must be immediately followed by the .npy extension.\n --list_ids FILE Path to a .txt file containing a list of all ids.\n --not_only_common If true, will include all edges from all subjects and not only \n common edges (Not recommended)\n --input_connectoflow If true, script will assume the input folder is a Connectoflow output.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Chamberland M, Raven EP, Genc S, Duffy K, Descoteaux M, Parker GD, Tax CMW,\n Jones DK. Dimensionality reduction of diffusion MRI measures for improved\n tractometry of the human brain. Neuroimage. 2019 Oct 15;200:89-100.\n doi: 10.1016/j.neuroimage.2019.06.020. Epub 2019 Jun 20. PMID: 31228638;\n PMCID: PMC6711466.\n[2] Gagnon A., Grenier G., Bocti C., Gillet V., Lepage J.-F., Baccarelli A. A.,\n Posner J., Descoteaux M., Takser L. (2022). White matter microstructural\n variability linked to differential attentional skills and impulsive behavior\n in a pediatric population. Cerebral Cortex.\n https://doi.org/10.1093/cercor/bhac180\n[3] https://towardsdatascience.com/what-are-pca-loadings-and-biplots-9a7897f2e559\n \n", - "synonyms": [ - [ - "animal", - "human", - "human" - ], - [ - "positive", - "negative" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "left", - "into" - ], - [ - "variability", - "variability" - ], - [ - "axonal", - "axonal" - ], - [ - "subject", - "subjects", - "subjects" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "cortex", - "thalamus", - "cortex" - ], - [ - "left", - "from" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "methods", - "use" - ], - [ - "positive", - "positive" - ], - [ - "area", - "near" - ], - [ - "question", - "argument" - ], - [ - "thinking", - "you" - ], - [ - "specific", - "specific" - ], - [ - "examining", - "evaluating" - ], - [ - "highest", - "level", - "highest" - ], - [ - "long", - "a" - ], - [ - "clear", - "immediately" - ], - [ - "action", - "clear", - "step", - "move" - ], - [ - "matrices", - "matrices" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "diffusion", - "diffusion" - ], - [ - "imaging", - "mri" - ], - [ - "future", - "will" - ], - [ - "large", - "larger", - "size", - "larger" - ], - [ - "left", - "back" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "result", - "followed" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "forms", - "specific", - "common" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "experience", - "knowledge", - "learning", - "skills" - ], - [ - "clear", - "order", - "step", - "work", - "take" - ], - [ - "variety", - "include" - ], - [ - "indicating", - "showing" - ], - [ - "clear", - "matter", - "question", - "thinking", - "true", - "view", - "work", - "what" - ], - [ - "population", - "population" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "methods", - "using" - ], - [ - "contrast", - "form", - "forms", - "larger", - "result", - "specific", - "variety", - "similar" - ], - [ - "thinking", - "simply" - ], - [ - "variety", - "work", - "other" - ], - [ - "view", - "see" - ], - [ - "involved", - "linked" - ], - [ - "increase", - "expected" - ], - [ - "left", - "subsequently", - "returned" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "meaning", - "true", - "true" - ], - [ - "areas", - "across" - ], - [ - "matrices", - "matrix" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "result", - "resulting" - ], - [ - "considered", - "is" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "individual", - "each" - ], - [ - "increase", - "reduction" - ], - [ - "level", - "above" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "work", - "working", - "done" - ], - [ - "connectivity", - "connectivity" - ], - [ - "presented", - "presenting" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "total", - "100" - ], - [ - "systems", - "components" - ], - [ - "based", - "based" - ], - [ - "principal", - "principal" - ], - [ - "association", - "organization" - ], - [ - "matter", - "question", - "subject", - "subjects", - "subject" - ], - [ - "meaning", - "order", - "result", - "step", - "true", - "means" - ], - [ - "assigned", - "command" - ], - [ - "considered", - "greater", - "larger", - "potential", - "result", - "significant" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_filter", - "docstring": "Script to facilitate filtering of connectivity matrices.\nThe same could be achieved through a complex sequence of\nscil_connectivity_math.py.\n\nCan be used with any connectivity matrix from\nscil_connectivity_compute_matrices.py.\n\nFor example, a simple filtering (Jasmeen style) would be:\nscil_connectivity_filter.py out_mask.npy\n --greater_than */sc.npy 1 0.90\n --lower_than */sim.npy 2 0.90\n --greater_than */len.npy 40 0.90 -v;\n\nThis will result in a binary mask where each node with a value of 1 represents\na node with at least 90% of the population having at least 1 streamline,\n90% of the population is similar to the average (2mm) and 90% of the\npopulation having at least 40mm of average streamlines length.\n\nAll operation are stricly > or <, there is no >= or <=.\n\n--greater_than or --lower_than expect the same convention:\n MATRICES_LIST VALUE_THR POPULATION_PERC\nIt is strongly recommended (but not enforced) that the same number of\nconnectivity matrices is used for each condition.\n\nThis script performs an intersection of all conditions, meaning that all\nconditions must be met in order not to be filtered.\nIf the user wants to manually handle the requirements, --keep_condition_count\ncan be used and manually binarized using scil_connectivity_math.py\n\nFormerly: scil_filter_connectivity.py", - "help": "usage: scil_connectivity_filter.py [-h] [--lower_than [LOWER_THAN ...]]\n [--greater_than [GREATER_THAN ...]]\n [--keep_condition_count] [--inverse_mask]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_matrix_mask\n\nScript to facilitate filtering of connectivity matrices.\nThe same could be achieved through a complex sequence of\nscil_connectivity_math.py.\n\nCan be used with any connectivity matrix from\nscil_connectivity_compute_matrices.py.\n\nFor example, a simple filtering (Jasmeen style) would be:\nscil_connectivity_filter.py out_mask.npy\n --greater_than */sc.npy 1 0.90\n --lower_than */sim.npy 2 0.90\n --greater_than */len.npy 40 0.90 -v;\n\nThis will result in a binary mask where each node with a value of 1 represents\na node with at least 90% of the population having at least 1 streamline,\n90% of the population is similar to the average (2mm) and 90% of the\npopulation having at least 40mm of average streamlines length.\n\nAll operation are stricly > or <, there is no >= or <=.\n\n--greater_than or --lower_than expect the same convention:\n MATRICES_LIST VALUE_THR POPULATION_PERC\nIt is strongly recommended (but not enforced) that the same number of\nconnectivity matrices is used for each condition.\n\nThis script performs an intersection of all conditions, meaning that all\nconditions must be met in order not to be filtered.\nIf the user wants to manually handle the requirements, --keep_condition_count\ncan be used and manually binarized using scil_connectivity_math.py\n\nFormerly: scil_filter_connectivity.py\n\npositional arguments:\n out_matrix_mask Output mask (matrix) resulting from the provided conditions (.npy).\n\noptions:\n -h, --help show this help message and exit\n --lower_than [LOWER_THAN ...]\n Lower than condition using the VALUE_THR in at least POPULATION_PERC (from MATRICES_LIST).\n See description for more details.\n --greater_than [GREATER_THAN ...]\n Greater than condition using the VALUE_THR in at least POPULATION_PERC (from MATRICES_LIST).\n See description for more details.\n --keep_condition_count\n Report the number of condition(s) that pass/fail rather than a binary mask.\n --inverse_mask Inverse the final mask. 0 where all conditions are respected and 1 where at least one fail.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "application", - "database", - "user" - ], - [ - "total", - "number" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "total", - "90" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "order", - "order" - ], - [ - "considered", - "involved", - "result", - "having" - ], - [ - "long", - "work", - "more" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "conditions", - "conditions" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "lack", - "loss", - "result", - "result" - ], - [ - "clear", - "considered", - "create", - "form", - "manner", - "matter", - "result", - "subject", - "thinking", - "true", - "view", - "work", - "rather" - ], - [ - "long", - "a" - ], - [ - "total", - "40" - ], - [ - "matrices", - "matrices" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "action", - "clear", - "considered", - "future", - "matter", - "possibility", - "potential", - "question", - "result", - "specific", - "any" - ], - [ - "long", - "with" - ], - [ - "average", - "average" - ], - [ - "population", - "population" - ], - [ - "binary", - "binary" - ], - [ - "represent", - "represents" - ], - [ - "meaning", - "true", - "meaning" - ], - [ - "methods", - "using" - ], - [ - "contrast", - "form", - "forms", - "larger", - "result", - "specific", - "variety", - "similar" - ], - [ - "view", - "see" - ], - [ - "reported", - "report" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "matrices", - "matrix" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "result", - "resulting" - ], - [ - "considered", - "is" - ], - [ - "pass", - "pass" - ], - [ - "area", - "work", - "where" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "higher", - "lower" - ], - [ - "streamline", - "streamline" - ], - [ - "clear", - "long", - "question", - "result", - "work", - "there" - ], - [ - "complex", - "structure", - "structures", - "complex" - ], - [ - "connectivity", - "connectivity" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "supported", - "strongly" - ], - [ - "clear", - "result", - "work", - "could" - ], - [ - "greater", - "greater" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_graph_measures", - "docstring": "Evaluate graph theory measures from connectivity matrices.\nA length weighted and a streamline count weighted matrix are required since\nsome measures require one or the other.\n\nThis script evaluates the measures one subject at the time. To generate a\npopulation dictionary (similarly to other scil_connectivity_*.py scripts), use\nthe --append_json option as well as using the same output filename.\n>>> for i in hcp/*/; do scil_connectivity_graph_measures.py ${i}/sc_prob.npy\n ${i}/len_prob.npy hcp_prob.json --append_json --avg_node_wise; done\n\nSome measures output one value per node, the default behavior is to list\nthem all into a list. To obtain only the average use the\n--avg_node_wise option.\n\nThe computed connectivity measures are:\ncentrality, modularity, assortativity, participation, clustering,\nnodal_strength, local_efficiency, global_efficiency, density, rich_club,\npath_length, edge_count, omega, sigma\n\nFor more details about the measures, please refer to\n- https://sites.google.com/site/bctnet/measures\n- https://github.com/aestrivex/bctpy/wiki\n\nThis script is under the GNU GPLv3 license, for more detail please refer to\nhttps://www.gnu.org/licenses/gpl-3.0.en.html\n\nFormerly: scil_evaluate_connectivity_graph_measures.py", - "help": "usage: scil_connectivity_graph_measures.py [-h]\n [--filtering_mask FILTERING_MASK]\n [--avg_node_wise] [--append_json]\n [--small_world] [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_conn_matrix in_length_matrix\n out_json\n\nEvaluate graph theory measures from connectivity matrices.\nA length weighted and a streamline count weighted matrix are required since\nsome measures require one or the other.\n\nThis script evaluates the measures one subject at the time. To generate a\npopulation dictionary (similarly to other scil_connectivity_*.py scripts), use\nthe --append_json option as well as using the same output filename.\n>>> for i in hcp/*/; do scil_connectivity_graph_measures.py ${i}/sc_prob.npy\n ${i}/len_prob.npy hcp_prob.json --append_json --avg_node_wise; done\n\nSome measures output one value per node, the default behavior is to list\nthem all into a list. To obtain only the average use the\n--avg_node_wise option.\n\nThe computed connectivity measures are:\ncentrality, modularity, assortativity, participation, clustering,\nnodal_strength, local_efficiency, global_efficiency, density, rich_club,\npath_length, edge_count, omega, sigma\n\nFor more details about the measures, please refer to\n- https://sites.google.com/site/bctnet/measures\n- https://github.com/aestrivex/bctpy/wiki\n\nThis script is under the GNU GPLv3 license, for more detail please refer to\nhttps://www.gnu.org/licenses/gpl-3.0.en.html\n\nFormerly: scil_evaluate_connectivity_graph_measures.py\n\npositional arguments:\n in_conn_matrix Input connectivity matrix (.npy).\n Typically a streamline count weighted matrix.\n in_length_matrix Input length weighted matrix (.npy).\n out_json Path of the output json.\n\noptions:\n -h, --help show this help message and exit\n --filtering_mask FILTERING_MASK\n Binary filtering mask to apply before computing the measures.\n --avg_node_wise Return a single value for node-wise measures.\n --append_json If the file already exists, will append to the dictionary.\n --small_world Compute measure related to small worldness (omega and sigma).\n This option is much slower.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\n[1] Rubinov, Mikail, and Olaf Sporns. \"Complex network measures of brain\n connectivity: uses and interpretations.\" Neuroimage 52.3 (2010):\n 1059-1069.\n", - "synonyms": [ - [ - "contrast", - "highly", - "similarly" - ], - [ - "create", - "generate" - ], - [ - "order", - "required" - ], - [ - "average", - "per" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "large", - "larger", - "small" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "left", - "into" - ], - [ - "long", - "work", - "more" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "attention", - "experience", - "long", - "result", - "work", - "much" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "meaning", - "refer" - ], - [ - "long", - "a" - ], - [ - "matrices", - "matrices" - ], - [ - "applied", - "apply" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "result", - "work", - "working", - "time" - ], - [ - "average", - "average" - ], - [ - "population", - "population" - ], - [ - "specific", - "related" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "variety", - "work", - "other" - ], - [ - "exist", - "exists" - ], - [ - "left", - "before" - ], - [ - "matrices", - "matrix" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "streamline", - "streamline" - ], - [ - "weighted", - "weighted" - ], - [ - "work", - "working", - "done" - ], - [ - "complex", - "structure", - "structures", - "complex" - ], - [ - "connectivity", - "connectivity" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "network", - "networks", - "network" - ], - [ - "examine", - "evaluate" - ], - [ - "matter", - "question", - "subject", - "subjects", - "subject" - ], - [ - "considered", - "experience", - "large", - "long", - "result", - "variety", - "work", - "working", - "well" - ], - [ - "result", - "since" - ], - [ - "large", - "work", - "some" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_hdf5_average_density_map", - "docstring": "Compute a density map for each connection from a hdf5 file.\nTypically use after scil_tractogram_segment_bundles_for_connectivity.py in\norder to obtain the average density map of each connection to allow the use\nof --similarity in scil_connectivity_compute_matrices.py.\n\nThis script is parallelized, but will run much slower on non-SSD if too many\nprocesses are used. The output is a directory containing the thousands of\nconnections:\nout_dir/\n |-- LABEL1_LABEL1.nii.gz\n |-- LABEL1_LABEL2.nii.gz\n |-- [...]\n |-- LABEL90_LABEL90.nii.gz\n\nFormerly: scil_compute_hdf5_average_density_map.py", - "help": "usage: scil_connectivity_hdf5_average_density_map.py [-h] [--binary]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_hdf5 [in_hdf5 ...]\n out_dir\n\nCompute a density map for each connection from a hdf5 file.\nTypically use after scil_tractogram_segment_bundles_for_connectivity.py in\norder to obtain the average density map of each connection to allow the use\nof --similarity in scil_connectivity_compute_matrices.py.\n\nThis script is parallelized, but will run much slower on non-SSD if too many\nprocesses are used. The output is a directory containing the thousands of\nconnections:\nout_dir/\n |-- LABEL1_LABEL1.nii.gz\n |-- LABEL1_LABEL2.nii.gz\n |-- [...]\n |-- LABEL90_LABEL90.nii.gz\n\nFormerly: scil_compute_hdf5_average_density_map.py\n\npositional arguments:\n in_hdf5 List of HDF5 filenames (.h5) from scil_tractogram_segment_bundles_for_connectivity.py.\n out_dir Path of the output directory.\n\noptions:\n -h, --help show this help message and exit\n --binary Binarize density maps before the population average.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "order", - "order" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "attention", - "experience", - "long", - "result", - "work", - "much" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "long", - "too" - ], - [ - "methods", - "use" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "average", - "average" - ], - [ - "population", - "population" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "left", - "after" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "left", - "before" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "large", - "work", - "many" - ], - [ - "individual", - "each" - ], - [ - "connect", - "connected", - "connection", - "connections", - "connections" - ], - [ - "order", - "allow" - ], - [ - "similarity", - "similarity" - ], - [ - "step", - "start" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "connection", - "connections", - "connection" - ], - [ - "maps", - "maps" - ], - [ - "maps", - "map" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_math", - "docstring": "Performs an operation on a list of matrices. The supported operations are\nlisted below.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of matrices.\n> scil_connectivity_math.py multiplication mat.npy 10 mult_10.npy", - "help": "usage: scil_connectivity_math.py [-h] [--data_type DATA_TYPE]\n [--exclude_background]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference}\n in_matrices [in_matrices ...] out_matrix\n\nPerforms an operation on a list of matrices. The supported operations are\nlisted below.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of matrices.\n> scil_connectivity_math.py multiplication mat.npy 10 mult_10.npy\n\n lower_threshold: MAT THRESHOLD\n All values below the threshold will be set to zero.\n All values above the threshold will be set to one.\n \n upper_threshold: MAT THRESHOLD\n All values below the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_eq: MAT THRESHOLD\n All values below the threshold will be set to zero.\n All values above or equal the threshold will be set to one.\n \n upper_threshold_eq: MAT THRESHOLD\n All values below or equal the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_otsu: MAT\n All values below or equal to the Otsu threshold will be set to zero.\n All values above the Otsu threshold will be set to one.\n (Otsu's method is an algorithm to perform automatic matrix thresholding\n of the background.)\n \n upper_threshold_otsu: MAT\n All values below the Otsu threshold will be set to one.\n All values above or equal to the Otsu threshold will be set to zero.\n Equivalent to lower_threshold_otsu followed by an inversion.\n \n lower_clip: MAT THRESHOLD\n All values below the threshold will be set to threshold.\n \n upper_clip: MAT THRESHOLD\n All values above the threshold will be set to threshold.\n \n absolute_value: MAT\n All negative values will become positive.\n \n round: MAT\n Round all decimal values to the closest integer.\n \n ceil: MAT\n Ceil all decimal values to the next integer.\n \n floor: MAT\n Floor all decimal values to the previous integer.\n \n normalize_sum: MAT\n Normalize the matrix so the sum of all values is one.\n \n normalize_max: MAT\n Normalize the matrix so the maximum value is one.\n \n log_10: MAT\n Apply a log (base 10) to all non zeros values of an matrix.\n \n log_e: MAT\n Apply a natural log to all non zeros values of an matrix.\n \n convert: MAT\n Perform no operation, but simply change the data type.\n \n invert: MAT\n Operation on binary matrix to interchange 0s and 1s in a binary mask.\n \n addition: MATs\n Add multiple matrices together.\n \n subtraction: MAT_1 MAT_2\n Subtract first matrix by the second (MAT_1 - MAT_2).\n \n multiplication: MATs\n Multiply multiple matrices together (danger of underflow and overflow)\n \n division: MAT_1 MAT_2\n Divide first matrix by the second (danger of underflow and overflow)\n Ignore zeros values, excluded from the operation.\n \n mean: MATs\n Compute the mean of matrices.\n If a single 4D matrix is provided, average along the last dimension.\n \n std: MATs\n Compute the standard deviation average of multiple matrices.\n If a single 4D matrix is provided, compute the STD along the last\n dimension.\n \n correlation: MATs\n Computes the correlation of the 3x3x3 neighborhood of each voxel, for\n all pair of input matrices. The final matrix is the average correlation\n (through all pairs).\n For a given pair of matrices\n - Background is considered as 0. May lead to very high correlations\n close to the border of the background regions, or very poor ones if the\n background in both matrices differ.\n - Images are zero-padded. For the same reason as higher, may lead to\n very high correlations if you have data close to the border of the\n matrix.\n - NaN values (if a voxel's neighborhood is entirely uniform; std 0) are\n replaced by\n - 0 if at least one neighborhood was entirely containing background.\n - 1 if the voxel's neighborhoods are uniform in both matrices\n - 0 if the voxel's neighborhoods is uniform in one matrix, but not\n the other.\n\n UPDATE AS OF VERSION 2.0: Random noise was previously added in the\n process to help avoid NaN values. Now replaced by either 0 or 1 as\n explained above.\n \n union: MATs\n Operation on binary matrix to keep voxels, that are non-zero, in at\n least one file.\n \n intersection: MATs\n Operation on binary matrix to keep the voxels, that are non-zero,\n are present in all files.\n \n difference: MAT_1 MAT_2\n Operation on binary matrix to keep voxels from the first file that are\n not in the second file (non-zeros).\n \n\npositional arguments:\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference}\n The type of operation to be performed on the matrices.\n in_matrices The list of matrices files or parameters.\n out_matrix Output matrix path.\n\noptions:\n -h, --help show this help message and exit\n --data_type DATA_TYPE\n Data type of the output image. Use the format: uint8, float16, int32.\n --exclude_background Does not affect the background of the original matrices.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "positive", - "negative" - ], - [ - "subsequently", - "previously" - ], - [ - "methods", - "method" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "clear", - "considered", - "highly", - "long", - "matter", - "true", - "work", - "very" - ], - [ - "working", - "together" - ], - [ - "long", - "work", - "working", - "now" - ], - [ - "possibility", - "danger" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "areas", - "region", - "regions", - "regions" - ], - [ - "positive", - "positive" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "long", - "have" - ], - [ - "thinking", - "you" - ], - [ - "areas", - "neighborhoods" - ], - [ - "high", - "higher", - "increase", - "level", - "higher" - ], - [ - "long", - "a" - ], - [ - "action", - "clear", - "future", - "result", - "step", - "change" - ], - [ - "voxel", - "voxel" - ], - [ - "matrices", - "matrices" - ], - [ - "held", - "on" - ], - [ - "applied", - "apply" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "lack", - "matter", - "possibility", - "question", - "result", - "thinking", - "true", - "reason" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "possibility", - "avoid" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "result", - "followed" - ], - [ - "random", - "random" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "last" - ], - [ - "held", - "in" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "average", - "average" - ], - [ - "clear", - "long", - "matter", - "result", - "work", - "so" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "difference", - "difference" - ], - [ - "algorithm", - "algorithm" - ], - [ - "binary", - "binary" - ], - [ - "thinking", - "simply" - ], - [ - "variety", - "work", - "other" - ], - [ - "methods", - "using" - ], - [ - "order", - "work", - "instead" - ], - [ - "process", - "processes", - "step", - "process" - ], - [ - "area", - "main", - "along" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "supported", - "supported" - ], - [ - "matrices", - "matrix" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "result", - "may" - ], - [ - "considered", - "subsequently", - "was" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "individual", - "each" - ], - [ - "matter", - "question", - "does" - ], - [ - "level", - "above" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "large", - "larger", - "variety", - "work", - "addition" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "future", - "held", - "step", - "next" - ], - [ - "area", - "neighborhood" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "large", - "work", - "some" - ], - [ - "considered", - "become" - ], - [ - "considered", - "specific", - "variety", - "such" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_normalize", - "docstring": "Normalize a connectivity matrix coming from\nscil_tractogram_segment_bundles_for_connectivity.py.\n3 categories of normalization are available:\n-- Edge attributes\n - length: Multiply each edge by the average bundle length.\n Compensate for far away connections when using interface seeding.\n Cannot be used with inverse_length.\n\n - inverse_length: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n Cannot be used with length.\n\n - bundle_volume: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n\n-- Node attributes (Mutually exclusive)\n - parcel_volume: Divide each edge by the sum of node volume.\n Compensate for the likelihood of ending in the node.\n Compensate seeding bias when using interface seeding.\n\n - parcel_surface: Divide each edge by the sum of the node surface.\n Compensate for the likelihood of ending in the node.\n Compensate for seeding bias when using interface seeding.\n\n-- Matrix scaling (Mutually exclusive)\n - max_at_one: Maximum value of the matrix will be set to one.\n - sum_to_one: Ensure the sum of all edges weight is one\n - log_10: Apply a base 10 logarithm to all edges weight\n\nThe volume and length matrix should come from the\nscil_tractogram_segment_bundles_for_connectivity.py script.\n\nA review of the type of normalization is available in:\nColon-Perez, Luis M., et al. \"Dimensionless, scale-invariant, edge weight\nmetric for the study of complex structural networks.\" PLOS one 10.7 (2015).\n\nHowever, the proposed weighting of edge presented in this publication is not\nimplemented.\n\nFormerly: scil_normalize_connectivity.py", - "help": "usage: scil_connectivity_normalize.py [-h]\n [--length LENGTH_MATRIX | --inverse_length LENGTH_MATRIX]\n [--bundle_volume VOLUME_MATRIX]\n [--parcel_volume ATLAS LABELS_LIST | --parcel_surface ATLAS LABELS_LIST]\n [--max_at_one | --sum_to_one | --log_10]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_matrix out_matrix\n\nNormalize a connectivity matrix coming from\nscil_tractogram_segment_bundles_for_connectivity.py.\n3 categories of normalization are available:\n-- Edge attributes\n - length: Multiply each edge by the average bundle length.\n Compensate for far away connections when using interface seeding.\n Cannot be used with inverse_length.\n\n - inverse_length: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n Cannot be used with length.\n\n - bundle_volume: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n\n-- Node attributes (Mutually exclusive)\n - parcel_volume: Divide each edge by the sum of node volume.\n Compensate for the likelihood of ending in the node.\n Compensate seeding bias when using interface seeding.\n\n - parcel_surface: Divide each edge by the sum of the node surface.\n Compensate for the likelihood of ending in the node.\n Compensate for seeding bias when using interface seeding.\n\n-- Matrix scaling (Mutually exclusive)\n - max_at_one: Maximum value of the matrix will be set to one.\n - sum_to_one: Ensure the sum of all edges weight is one\n - log_10: Apply a base 10 logarithm to all edges weight\n\nThe volume and length matrix should come from the\nscil_tractogram_segment_bundles_for_connectivity.py script.\n\nA review of the type of normalization is available in:\nColon-Perez, Luis M., et al. \"Dimensionless, scale-invariant, edge weight\nmetric for the study of complex structural networks.\" PLOS one 10.7 (2015).\n\nHowever, the proposed weighting of edge presented in this publication is not\nimplemented.\n\nFormerly: scil_normalize_connectivity.py\n\npositional arguments:\n in_matrix Input connectivity matrix. This is typically a streamline_count matrix (.npy).\n out_matrix Output normalized matrix (.npy).\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nEdge-wise options:\n --length LENGTH_MATRIX\n Length matrix used for edge-wise multiplication.\n --inverse_length LENGTH_MATRIX\n Length matrix used for edge-wise division.\n --bundle_volume VOLUME_MATRIX\n Volume matrix used for edge-wise division.\n --parcel_volume ATLAS LABELS_LIST\n Atlas and labels list for edge-wise division.\n --parcel_surface ATLAS LABELS_LIST\n Atlas and labels list for edge-wise division.\n\nScaling options:\n --max_at_one Scale matrix with maximum value at one.\n --sum_to_one Scale matrix with sum of all elements at one.\n --log_10 Apply a base 10 logarithm to the matrix.\n", - "synonyms": [ - [ - "volume", - "volumes", - "volume" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "seeding", - "seeding" - ], - [ - "order", - "set" - ], - [ - "clear", - "considered", - "long", - "result", - "far" - ], - [ - "analysis", - "clinical", - "scientific", - "studies", - "study", - "study" - ], - [ - "long", - "a" - ], - [ - "applied", - "apply" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "step", - "work", - "come" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "presented", - "presented" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "proposed", - "proposed" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "atlas", - "atlas" - ], - [ - "average", - "average" - ], - [ - "network", - "networks", - "networks" - ], - [ - "structural", - "structural" - ], - [ - "methods", - "using" - ], - [ - "matrices", - "matrix" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "large", - "big" - ], - [ - "individual", - "each" - ], - [ - "connect", - "connected", - "connection", - "connections", - "connections" - ], - [ - "left", - "away" - ], - [ - "complex", - "structure", - "structures", - "complex" - ], - [ - "connectivity", - "connectivity" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "possibility", - "likelihood" - ], - [ - "bundles", - "bundle" - ], - [ - "clear", - "considered", - "result", - "however" - ], - [ - "exist", - "cannot" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_pairwise_agreement", - "docstring": "Evaluate pair-wise similarity measures of connectivity matrix.\n\nThe computed similarity measures are:\nsum of square difference and pearson correlation coefficent\n\nFormerly: scil_evaluate_connectivity_pairwaise_agreement_measures.py", - "help": "usage: scil_connectivity_pairwise_agreement.py [-h] [--single_compare matrix]\n [--normalize] [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_matrices [in_matrices ...]\n out_json\n\nEvaluate pair-wise similarity measures of connectivity matrix.\n\nThe computed similarity measures are:\nsum of square difference and pearson correlation coefficent\n\nFormerly: scil_evaluate_connectivity_pairwaise_agreement_measures.py\n\npositional arguments:\n in_matrices Path of the input matricies.\n out_json Path of the output json file.\n\noptions:\n -h, --help show this help message and exit\n --single_compare matrix\n Compare inputs to this single file.\n (Else, compute all pairs in in_matrices).\n --normalize If set, will normalize all matrices from zero to one.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "considered", - "are" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "order", - "set" - ], - [ - "matrices", - "matrices" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "difference", - "difference" - ], - [ - "methods", - "using" - ], - [ - "matrices", - "matrix" - ], - [ - "matter", - "thinking", - "else" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "similarity", - "similarity" - ], - [ - "connectivity", - "connectivity" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "examine", - "evaluate" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_print_filenames", - "docstring": "Output the list of filenames using the coordinates from a binary connectivity\nmatrix. Typically used to move around files that are considered valid after\nthe scil_connectivity_filter.py script.\n\nExample:\n# Keep connections with more than 1000 streamlines for 100% of a population\nscil_connectivity_filter.py filtering_mask.npy\n --greater_than */streamlines_count.npy 1000 1.0\nscil_connectivity_print_filenames.py filtering_mask.npy\n labels_list.txt pass.txt\nfor file in $(cat pass.txt);\n do mv ${SOMEWHERE}/${FILE} ${SOMEWHERE_ELSE}/;\ndone\n\nFormerly: scil_print_connectivity_filenames.py", - "help": "usage: scil_connectivity_print_filenames.py [-h] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_matrix labels_list out_txt\n\nOutput the list of filenames using the coordinates from a binary connectivity\nmatrix. Typically used to move around files that are considered valid after\nthe scil_connectivity_filter.py script.\n\nExample:\n# Keep connections with more than 1000 streamlines for 100% of a population\nscil_connectivity_filter.py filtering_mask.npy\n --greater_than */streamlines_count.npy 1000 1.0\nscil_connectivity_print_filenames.py filtering_mask.npy\n labels_list.txt pass.txt\nfor file in $(cat pass.txt);\n do mv ${SOMEWHERE}/${FILE} ${SOMEWHERE_ELSE}/;\ndone\n\nFormerly: scil_print_connectivity_filenames.py\n\npositional arguments:\n in_matrix Binary matrix in numpy (.npy) format.\n Typically from scil_connectivity_filter.py\n labels_list List saved by the decomposition script.\n out_txt Output text file containing all filenames.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "long", - "work", - "more" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "invalid", - "valid", - "valid" - ], - [ - "long", - "a" - ], - [ - "action", - "clear", - "step", - "move" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "population", - "population" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "left", - "after" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "matrices", - "matrix" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "pass", - "pass" - ], - [ - "connect", - "connected", - "connection", - "connections", - "connections" - ], - [ - "work", - "working", - "done" - ], - [ - "connectivity", - "connectivity" - ], - [ - "work", - "all" - ], - [ - "total", - "100" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_reorder_rois", - "docstring": "Re-order one or many connectivity matrices using a text file format.\nThe first row are the (x) and the second row the (y), must be space separated.\nThe resulting matrix does not have to be square (support unequal number of\nx and y).\n\nThe values refer to the coordinates (starting at 0) in the matrix, but if the\n--labels_list parameter is used, the values will refer to the label which will\nbe converted to the appropriate coordinates. This file must be the same as the\none provided to the scil_tractogram_segment_bundles_for_connectivity.py.\n\nTo subsequently use scil_visualize_connectivity.py with a lookup table, you\nmust use a label-based reording json and use --labels_list.\n\nYou can also use the Optimal Leaf Ordering(OLO) algorithm to transform a\nsparse matrix into an ordering that reduces the matrix bandwidth. The output\nfile can then be re-used with --in_ordering. Only one input can be used with\nthis option, we recommand an average streamline count or volume matrix.\n\nFormerly: scil_reorder_connectivity.py", - "help": "usage: scil_connectivity_reorder_rois.py [-h]\n (--in_ordering IN_ORDERING | --optimal_leaf_ordering OUT_FILE)\n [--out_suffix OUT_SUFFIX]\n [--out_dir OUT_DIR]\n [--labels_list LABELS_LIST]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_matrices [in_matrices ...]\n\nRe-order one or many connectivity matrices using a text file format.\nThe first row are the (x) and the second row the (y), must be space separated.\nThe resulting matrix does not have to be square (support unequal number of\nx and y).\n\nThe values refer to the coordinates (starting at 0) in the matrix, but if the\n--labels_list parameter is used, the values will refer to the label which will\nbe converted to the appropriate coordinates. This file must be the same as the\none provided to the scil_tractogram_segment_bundles_for_connectivity.py.\n\nTo subsequently use scil_visualize_connectivity.py with a lookup table, you\nmust use a label-based reording json and use --labels_list.\n\nYou can also use the Optimal Leaf Ordering(OLO) algorithm to transform a\nsparse matrix into an ordering that reduces the matrix bandwidth. The output\nfile can then be re-used with --in_ordering. Only one input can be used with\nthis option, we recommand an average streamline count or volume matrix.\n\nFormerly: scil_reorder_connectivity.py\n\npositional arguments:\n in_matrices Connectivity matrices in .npy or .txt format.\n\noptions:\n -h, --help show this help message and exit\n --in_ordering IN_ORDERING\n Txt file with the first row as x and second as y.\n --optimal_leaf_ordering OUT_FILE\n Output a text file with an ordering that alignsstructures along the diagonal.\n --out_suffix OUT_SUFFIX\n Suffix for the output matrix filename.\n --out_dir OUT_DIR Output directory for the re-ordered matrices.\n --labels_list LABELS_LIST\n List saved by the decomposition script,\n --in_ordering must contain labels rather than coordinates (.txt).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Rubinov, Mikail, and Olaf Sporns. \"Complex network measures of brain\n connectivity: uses and interpretations.\" Neuroimage 52.3 (2010):\n 1059-1069.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "considered", - "are" - ], - [ - "order", - "order" - ], - [ - "left", - "into" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "parameter", - "parameters", - "parameter" - ], - [ - "clear", - "considered", - "create", - "form", - "manner", - "matter", - "result", - "subject", - "thinking", - "true", - "view", - "work", - "rather" - ], - [ - "methods", - "use" - ], - [ - "long", - "have" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "meaning", - "refer" - ], - [ - "long", - "a" - ], - [ - "matrices", - "matrices" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "space", - "space" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "average", - "average" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "algorithm", - "algorithm" - ], - [ - "methods", - "using" - ], - [ - "area", - "main", - "along" - ], - [ - "long", - "than" - ], - [ - "subsequently", - "subsequently" - ], - [ - "matrices", - "matrix" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "result", - "resulting" - ], - [ - "considered", - "is" - ], - [ - "large", - "work", - "many" - ], - [ - "matter", - "question", - "does" - ], - [ - "streamline", - "streamline" - ], - [ - "manner", - "specific", - "appropriate" - ], - [ - "complex", - "structure", - "structures", - "complex" - ], - [ - "connectivity", - "connectivity" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "based", - "based" - ], - [ - "network", - "networks", - "network" - ] - ], - "keywords": [] - }, - { - "name": "scil_denoising_nlmeans", - "docstring": "Script to denoise a dataset with the Non Local Means algorithm.\n\nFormerly: scil_run_nlmeans.py", - "help": "usage: scil_denoising_nlmeans.py [-h] [--mask] [--sigma float] [--log LOGFILE]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_image out_image number_coils\n\nScript to denoise a dataset with the Non Local Means algorithm.\n\nFormerly: scil_run_nlmeans.py\n\npositional arguments:\n in_image Path of the image file to denoise.\n out_image Path to save the denoised image file.\n number_coils Number of receiver coils of the scanner.\n Use number_coils=1 in the case of a SENSE (GE, Philips) reconstruction and \n number_coils >= 1 for GRAPPA reconstruction (Siemens). number_coils=4 works well for the 1.5T\n in Sherbrooke. Use number_coils=0 if the noise is considered Gaussian distributed.\n\noptions:\n -h, --help show this help message and exit\n --mask Path to a binary mask. Only the data inside the mask will be used for computations\n --sigma float The standard deviation of the noise to use instead of computing it automatically.\n --log LOGFILE If supplied, name of the text file to store the logs.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "experience", - "perception", - "thinking", - "true", - "sense" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "algorithm", - "algorithm" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "order", - "work", - "instead" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "work", - "works" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "step", - "start" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "matter", - "question", - "case" - ], - [ - "considered", - "experience", - "large", - "long", - "result", - "variety", - "work", - "working", - "well" - ], - [ - "meaning", - "order", - "result", - "step", - "true", - "means" - ] - ], - "keywords": [] - }, - { - "name": "scil_dki_metrics", - "docstring": "Script to compute the Diffusion Kurtosis Imaging (DKI) and Mean Signal DKI\n(MSDKI) metrics. DKI is a multi-shell diffusion model. The input DWI needs\nto be multi-shell, i.e. multi-bvalued.\n\nSince the diffusion kurtosis model involves the estimation of a large number\nof parameters and since the non-Gaussian components of the diffusion signal\nare more sensitive to artefacts, you should really denoise your DWI volume\nbefore using this DKI script (e.g. scil_denoising_nlmeans.py). Moreover, to\nremove biases due to fiber dispersion, fiber crossings and other mesoscopic\nproperties of the underlying tissue, MSDKI does a powder-average of DWI for all\ndirections, thus removing the orientational dependencies and creating an\nalternative mean kurtosis map.\n\nDKI is also known to be vulnerable to artefacted voxels induced by the\nlow radial diffusivities of aligned white matter (CC, CST voxels). Since it is\nvery hard to capture non-Gaussian information due to the low decays in radial\ndirection, its kurtosis estimates have very low robustness.\nNoisy kurtosis estimates tend to be negative and its absolute values can have\norder of magnitudes higher than the typical kurtosis values. Consequently,\nthese negative kurtosis values will heavily propagate to the mean and radial\nkurtosis metrics. This is well-reported in [Rafael Henriques MSc thesis 2012,\nchapter 3]. Two ways to overcome this issue: i) compute the kurtosis values\nfrom powder-averaged MSDKI, and ii) perform 3D Gaussian smoothing. On\npowder-averaged signal decays, you don't have this low diffusivity issue and\nyour kurtosis estimates have much higher precision (additionally they are\nindependent to the fODF).\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section. If --not_all is set, only the metrics specified\nexplicitly by the flags will be output.\n\nThis script directly comes from the DIPY example gallery and references\ntherein.\n[1] examples_built/reconst_dki/#example-reconst-dki\n[2] examples_built/reconst_msdki/#example-reconst-msdki\n\nFormerly: scil_compute_kurtosis_metrics.py", - "help": "usage: scil_dki_metrics.py [-h] [--mask MASK] [--tolerance tol]\n [--skip_b0_check] [--min_k MIN_K] [--max_k MAX_K]\n [--smooth SMOOTH] [--not_all] [--ak file]\n [--mk file] [--rk file] [--msk file]\n [--dki_fa file] [--dki_md file] [--dki_ad file]\n [--dki_rd file] [--dki_residual file] [--msd file]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec\n\nScript to compute the Diffusion Kurtosis Imaging (DKI) and Mean Signal DKI\n(MSDKI) metrics. DKI is a multi-shell diffusion model. The input DWI needs\nto be multi-shell, i.e. multi-bvalued.\n\nSince the diffusion kurtosis model involves the estimation of a large number\nof parameters and since the non-Gaussian components of the diffusion signal\nare more sensitive to artefacts, you should really denoise your DWI volume\nbefore using this DKI script (e.g. scil_denoising_nlmeans.py). Moreover, to\nremove biases due to fiber dispersion, fiber crossings and other mesoscopic\nproperties of the underlying tissue, MSDKI does a powder-average of DWI for all\ndirections, thus removing the orientational dependencies and creating an\nalternative mean kurtosis map.\n\nDKI is also known to be vulnerable to artefacted voxels induced by the\nlow radial diffusivities of aligned white matter (CC, CST voxels). Since it is\nvery hard to capture non-Gaussian information due to the low decays in radial\ndirection, its kurtosis estimates have very low robustness.\nNoisy kurtosis estimates tend to be negative and its absolute values can have\norder of magnitudes higher than the typical kurtosis values. Consequently,\nthese negative kurtosis values will heavily propagate to the mean and radial\nkurtosis metrics. This is well-reported in [Rafael Henriques MSc thesis 2012,\nchapter 3]. Two ways to overcome this issue: i) compute the kurtosis values\nfrom powder-averaged MSDKI, and ii) perform 3D Gaussian smoothing. On\npowder-averaged signal decays, you don't have this low diffusivity issue and\nyour kurtosis estimates have much higher precision (additionally they are\nindependent to the fODF).\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section. If --not_all is set, only the metrics specified\nexplicitly by the flags will be output.\n\nThis script directly comes from the DIPY example gallery and references\ntherein.\n[1] examples_built/reconst_dki/#example-reconst-dki\n[2] examples_built/reconst_msdki/#example-reconst-msdki\n\nFormerly: scil_compute_kurtosis_metrics.py\n\npositional arguments:\n in_dwi Path of the input multi-shell DWI dataset.\n in_bval Path of the b-value file, in FSL format.\n in_bvec Path of the b-vector file, in FSL format.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computations and reconstruction.\n [Default: None]\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --min_k MIN_K Minimum kurtosis value in the output maps \n (ak, mk, rk). In theory, -3/7 is the min kurtosis \n limit for regions that consist of water confined \n to spherical pores (see DIPY example and \n documentation) [Default: 0.0].\n --max_k MAX_K Maximum kurtosis value in the output maps \n (ak, mk, rk). In theory, 10 is the max kurtosis\n limit for regions that consist of water confined\n to spherical pores (see DIPY example and \n documentation) [Default: 3.0].\n --smooth SMOOTH Smooth input DWI with a 3D Gaussian filter with \n full-width-half-max (fwhm). Kurtosis fitting is \n sensitive and outliers occur easily. According to\n tests on HCP, CB_Brain, Penthera3T, this smoothing\n is thus turned ON by default with fwhm=2.5. \n [Default: 2.5].\n --not_all If set, will only save the metrics explicitly \n specified using the other metrics flags. \n [Default: not set].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics files flags:\n --ak file Output filename for the axial kurtosis.\n --mk file Output filename for the mean kurtosis.\n --rk file Output filename for the radial kurtosis.\n --msk file Output filename for the mean signal kurtosis.\n --dki_fa file Output filename for the fractional anisotropy from DKI.\n --dki_md file Output filename for the mean diffusivity from DKI.\n --dki_ad file Output filename for the axial diffusivity from DKI.\n --dki_rd file Output filename for the radial diffusivity from DKI.\n\nQuality control files flags:\n --dki_residual file Output filename for the map of the residual of the tensor fit.\n Note. In previous versions, the resulting map was normalized. \n It is not anymore.\n --msd file Output filename for the mean signal diffusion (powder-average).\n", - "synonyms": [ - [ - "parameter", - "vector" - ], - [ - "positive", - "negative" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "reported", - "reported" - ], - [ - "long", - "work", - "more" - ], - [ - "fibre", - "fiber" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "order", - "set" - ], - [ - "long", - "have" - ], - [ - "specific", - "specific" - ], - [ - "direction", - "directions" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "diffusion", - "diffusion" - ], - [ - "contrast", - "typical" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "exist", - "exists" - ], - [ - "long", - "than" - ], - [ - "imaging", - "imaging" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "considered", - "form", - "meaning", - "order", - "result", - "thus" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "attention", - "comes" - ], - [ - "maps", - "map" - ], - [ - "fundamental", - "underlying" - ], - [ - "long", - "full" - ], - [ - "work", - "also" - ], - [ - "considered", - "known" - ], - [ - "precision", - "precision" - ], - [ - "result", - "moreover" - ], - [ - "left", - "from" - ], - [ - "create", - "creating" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "create", - "ways" - ], - [ - "thinking", - "you" - ], - [ - "limiting", - "limit" - ], - [ - "long", - "a" - ], - [ - "future", - "will" - ], - [ - "parameters", - "specified" - ], - [ - "average", - "average" - ], - [ - "view", - "see" - ], - [ - "signal", - "signal" - ], - [ - "result", - "resulting" - ], - [ - "considered", - "subsequently", - "was" - ], - [ - "matter", - "question", - "does" - ], - [ - "comprised", - "comprising", - "consist" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "lack", - "quality" - ], - [ - "thinking", - "really" - ], - [ - "lack", - "result", - "due" - ], - [ - "brain", - "tissue" - ], - [ - "considered", - "are" - ], - [ - "attention", - "experience", - "long", - "result", - "work", - "much" - ], - [ - "areas", - "region", - "regions", - "regions" - ], - [ - "work", - "find" - ], - [ - "left", - "half" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "thinking", - "i" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "methods", - "using" - ], - [ - "variety", - "work", - "other" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "left", - "before" - ], - [ - "high", - "higher", - "level", - "low" - ], - [ - "matter", - "question", - "subject", - "issue" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "long", - "two" - ], - [ - "based", - "reported", - "according" - ], - [ - "total", - "number" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "matter", - "question", - "thinking", - "understand" - ], - [ - "work", - "and" - ], - [ - "clear", - "considered", - "highly", - "long", - "matter", - "true", - "work", - "very" - ], - [ - "order", - "order" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "direction", - "direction" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "high", - "higher", - "increase", - "level", - "higher" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "large", - "larger", - "large" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "axial", - "axial" - ], - [ - "binary", - "binary" - ], - [ - "future", - "current" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "order", - "allow" - ], - [ - "maps", - "maps" - ], - [ - "systems", - "components" - ], - [ - "considered", - "experience", - "large", - "long", - "result", - "variety", - "work", - "working", - "well" - ], - [ - "result", - "since" - ], - [ - "left", - "turned" - ] - ], - "keywords": [] - }, - { - "name": "scil_dti_convert_tensors", - "docstring": "Conversion of tensors (the 6 values from the triangular matrix) between various\nsoftware standards. We cannot discover the input format type, user must know\nhow the tensors were created.", - "help": "usage: scil_dti_convert_tensors.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file out_file in_format out_format\n\nConversion of tensors (the 6 values from the triangular matrix) between various\nsoftware standards. We cannot discover the input format type, user must know\nhow the tensors were created.\n\n Dipy's order is [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://github.com/dipy/dipy/blob/master/dipy/reconst/dti.py#L1639\n \n MRTRIX's order is : [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz]\n Shape: [i, j , k, 6].\n Ref: https://mrtrix.readthedocs.io/en/dev/reference/commands/dwi2tensor.html\n \n ANTS's order ('nifti format') is : [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz].\n Shape: [i, j , k, 1, 6] (Careful, file is 5D).\n Ref: https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software\n \n FSL's order is [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide\n (Also used for the Fibernavigator)\n \n\npositional arguments:\n in_file Input tensors filename.\n out_file Output tensors filename.\n in_format Input format. Choices: ['fsl', 'nifti', 'mrtrix', 'dipy']\n out_format Output format. Choices: ['fsl', 'nifti', 'mrtrix', 'dipy']\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "application", - "database", - "user" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "order", - "order" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "matter", - "question", - "thinking", - "true", - "know" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "application", - "systems", - "software" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "specific", - "variety", - "various" - ], - [ - "thinking", - "i" - ], - [ - "diffusion", - "diffusion" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "matter", - "question", - "thinking", - "true", - "work", - "working", - "how" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "methods", - "using" - ], - [ - "variety", - "work", - "other" - ], - [ - "matrices", - "matrix" - ], - [ - "considered", - "is" - ], - [ - "create", - "created" - ], - [ - "shape", - "structure", - "shape" - ], - [ - "exist", - "cannot" - ] - ], - "keywords": [] - }, - { - "name": "scil_dti_metrics", - "docstring": "Script to compute all of the Diffusion Tensor Imaging (DTI) metrics.\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section.\n\nIf --not_all is set, only the metrics specified explicitly by the flags\nwill be output. The available metrics are:\n\nfractional anisotropy (FA), geodesic anisotropy (GA), axial diffusivisty (AD),\nradial diffusivity (RD), mean diffusivity (MD), mode, red-green-blue colored\nFA (rgb), principal tensor e-vector and tensor coefficients (dxx, dxy, dxz,\ndyy, dyz, dzz).\n\nFor all the quality control metrics such as residual, physically implausible\nsignals, pulsation and misalignment artifacts, see\n[J-D Tournier, S. Mori, A. Leemans. Diffusion Tensor Imaging and Beyond.\nMRM 2011].\n\nFormerly: scil_compute_dti_metrics.py", - "help": "usage: scil_dti_metrics.py [-h] [-f] [--mask MASK] [--method method_name]\n [--not_all] [--ad file] [--evecs file]\n [--evals file] [--fa file] [--ga file] [--md file]\n [--mode file] [--norm file] [--rgb file]\n [--rd file] [--tensor file]\n [--tensor_format {fsl,nifti,mrtrix,dipy}]\n [--non-physical file] [--pulsation string]\n [--residual file] [--b0_threshold thr]\n [--skip_b0_check] [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval in_bvec\n\nScript to compute all of the Diffusion Tensor Imaging (DTI) metrics.\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section.\n\nIf --not_all is set, only the metrics specified explicitly by the flags\nwill be output. The available metrics are:\n\nfractional anisotropy (FA), geodesic anisotropy (GA), axial diffusivisty (AD),\nradial diffusivity (RD), mean diffusivity (MD), mode, red-green-blue colored\nFA (rgb), principal tensor e-vector and tensor coefficients (dxx, dxy, dxz,\ndyy, dyz, dzz).\n\nFor all the quality control metrics such as residual, physically implausible\nsignals, pulsation and misalignment artifacts, see\n[J-D Tournier, S. Mori, A. Leemans. Diffusion Tensor Imaging and Beyond.\nMRM 2011].\n\nFormerly: scil_compute_dti_metrics.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n\noptions:\n -h, --help show this help message and exit\n -f Force overwriting of the output files.\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computations and reconstruction. (Default: None)\n --method method_name Tensor fit method.\n WLS for weighted least squares\n LS for ordinary least squares\n NLLS for non-linear least-squares\n restore for RESTORE robust tensor fitting. (Default: WLS)\n --not_all If set, will only save the metrics explicitly specified using the other metrics flags. (Default: not set).\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nMetrics files flags:\n --ad file Output filename for the axial diffusivity.\n --evecs file Output filename for the eigenvectors of the tensor.\n --evals file Output filename for the eigenvalues of the tensor.\n --fa file Output filename for the fractional anisotropy.\n --ga file Output filename for the geodesic anisotropy.\n --md file Output filename for the mean diffusivity.\n --mode file Output filename for the mode.\n --norm file Output filename for the tensor norm.\n --rgb file Output filename for the colored fractional anisotropy.\n --rd file Output filename for the radial diffusivity.\n --tensor file Output filename for the tensor coefficients.\n --tensor_format {fsl,nifti,mrtrix,dipy}\n Format used for the tensors saved in --tensor file.(default: fsl)\n \n Dipy's order is [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://github.com/dipy/dipy/blob/master/dipy/reconst/dti.py#L1639\n \n MRTRIX's order is : [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz]\n Shape: [i, j , k, 6].\n Ref: https://mrtrix.readthedocs.io/en/dev/reference/commands/dwi2tensor.html\n \n ANTS's order ('nifti format') is : [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz].\n Shape: [i, j , k, 1, 6] (Careful, file is 5D).\n Ref: https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software\n \n FSL's order is [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide\n (Also used for the Fibernavigator)\n \n\nQuality control files flags:\n --non-physical file Output filename for the voxels with physically implausible signals \n where the mean of b=0 images is below one or more diffusion-weighted images.\n --pulsation string Standard deviation map across all diffusion-weighted images and across b=0 images if more than one is available.\n Shows pulsation and misalignment artifacts.\n --residual file Output filename for the map of the residual of the tensor fit.\n", - "synonyms": [ - [ - "parameter", - "vector" - ], - [ - "step", - "continue" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "methods", - "method" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "matter", - "question", - "thinking", - "understand" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "considered", - "are" - ], - [ - "order", - "order" - ], - [ - "long", - "work", - "more" - ], - [ - "blue", - "dark", - "green", - "red", - "white", - "blue" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "methods", - "use" - ], - [ - "work", - "find" - ], - [ - "order", - "set" - ], - [ - "specific", - "specific" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "application", - "systems", - "software" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "blue", - "green", - "red", - "white", - "red" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "blue", - "colored" - ], - [ - "blue", - "green", - "red", - "white", - "green" - ], - [ - "long", - "with" - ], - [ - "parameters", - "specified" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "axial", - "axial" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "variety", - "work", - "other" - ], - [ - "view", - "see" - ], - [ - "exist", - "exists" - ], - [ - "long", - "than" - ], - [ - "signal", - "signals" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "imaging", - "imaging" - ], - [ - "areas", - "across" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "area", - "work", - "where" - ], - [ - "lack", - "minimal" - ], - [ - "weighted", - "weighted" - ], - [ - "order", - "allow" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "lack", - "quality" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "maps", - "map" - ], - [ - "principal", - "principal" - ], - [ - "shape", - "structure", - "shape" - ], - [ - "considered", - "specific", - "variety", - "such" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_apply_bias_field", - "docstring": "Apply bias field correction to DWI. This script doesn't compute the bias\nfield itself. It ONLY applies an existing bias field. Please use the ANTs\nN4BiasFieldCorrection executable to compute the bias field.\n\nFormerly: scil_apply_bias_field_on_dwi.py", - "help": "usage: scil_dwi_apply_bias_field.py [-h] [--mask MASK]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bias_field out_name\n\nApply bias field correction to DWI. This script doesn't compute the bias\nfield itself. It ONLY applies an existing bias field. Please use the ANTs\nN4BiasFieldCorrection executable to compute the bias field.\n\nFormerly: scil_apply_bias_field_on_dwi.py\n\npositional arguments:\n in_dwi DWI Nifti image.\n in_bias_field Bias field Nifti image.\n out_name Corrected DWI Nifti image.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Apply bias field correction only in the region defined by the mask.\n If this is not given, the bias field is still only applied only in non-background data \n (i.e. where the dwi is not 0).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "applied", - "applied" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "applied", - "apply" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "thinking", - "i" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "methods", - "using" - ], - [ - "defined", - "function", - "defined" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "area", - "work", - "where" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "clear", - "long", - "work", - "still" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "area", - "areas", - "region", - "regions", - "region" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_compute_snr", - "docstring": "Script to compute signal to noise ratio (SNR) in a region of interest (ROI)\nof a DWI volume.\n\nIt will compute the SNR for all DWI volumes of the input image seperately.\nThe output will contain the SNR which is the ratio of\nmean(signal) / std(noise).\nThe mean of the signal is computed inside the mask.\nThe standard deviation of the noise is estimated inside the noise_mask\nor inside the same mask if a noise_map is provided.\nIf it's not supplied, it will be estimated using the data outside the brain,\ncomputed with Dipy medotsu\n\nIf verbose is True, the SNR for every DWI volume will be output.\n\nThis works best in a well-defined ROI such as the corpus callosum.\nIt is heavily dependent on the ROI and its quality.\n\nWe highly recommend using a noise_map if you can acquire one.\nSee refs [1, 2] that describe the noise map acquisition.\n[1] St-Jean, et al (2016). Non Local Spatial and Angular Matching...\n https://doi.org/10.1016/j.media.2016.02.010\n[2] Reymbaut, et al (2021). Magic DIAMOND...\n https://doi.org/10.1016/j.media.2021.101988\n\nFormerly: scil_snr_in_roi.py", - "help": "usage: scil_dwi_compute_snr.py [-h]\n [--noise_mask NOISE_MASK | --noise_map NOISE_MAP]\n [--b0_thr B0_THR] [--out_basename OUT_BASENAME]\n [--split_shells] [--indent INDENT]\n [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_mask\n\nScript to compute signal to noise ratio (SNR) in a region of interest (ROI)\nof a DWI volume.\n\nIt will compute the SNR for all DWI volumes of the input image seperately.\nThe output will contain the SNR which is the ratio of\nmean(signal) / std(noise).\nThe mean of the signal is computed inside the mask.\nThe standard deviation of the noise is estimated inside the noise_mask\nor inside the same mask if a noise_map is provided.\nIf it's not supplied, it will be estimated using the data outside the brain,\ncomputed with Dipy medotsu\n\nIf verbose is True, the SNR for every DWI volume will be output.\n\nThis works best in a well-defined ROI such as the corpus callosum.\nIt is heavily dependent on the ROI and its quality.\n\nWe highly recommend using a noise_map if you can acquire one.\nSee refs [1, 2] that describe the noise map acquisition.\n[1] St-Jean, et al (2016). Non Local Spatial and Angular Matching...\n https://doi.org/10.1016/j.media.2016.02.010\n[2] Reymbaut, et al (2021). Magic DIAMOND...\n https://doi.org/10.1016/j.media.2021.101988\n\nFormerly: scil_snr_in_roi.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n in_mask Binary mask of the region used to estimate SNR.\n\noptions:\n -h, --help show this help message and exit\n --b0_thr B0_THR All b-values with values less than or equal to b0_thr are considered as b0s i.e. without diffusion weighting. [0.0]\n --out_basename OUT_BASENAME\n Path and prefix for the various saved file.\n --split_shells SNR will be split into shells.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMasks options:\n --noise_mask NOISE_MASK\n Binary mask used to estimate the noise from the DWI.\n --noise_map NOISE_MAP\n Noise map.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "volume", - "volumes", - "volume" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "work", - "and" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "considered", - "are" - ], - [ - "left", - "into" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "total", - "estimated" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "higher", - "interest" - ], - [ - "considered", - "highly", - "highly" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "thinking", - "i" - ], - [ - "specific", - "variety", - "various" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "defined", - "function", - "defined" - ], - [ - "long", - "than" - ], - [ - "highly", - "less" - ], - [ - "meaning", - "true", - "true" - ], - [ - "work", - "works" - ], - [ - "signal", - "signal" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "callosum", - "callosum" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "volume", - "volumes", - "volumes" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "corpus", - "corpus" - ], - [ - "lack", - "quality" - ], - [ - "area", - "areas", - "region", - "regions", - "region" - ], - [ - "maps", - "map" - ], - [ - "spatial", - "temporal", - "spatial" - ], - [ - "considered", - "experience", - "large", - "long", - "result", - "variety", - "work", - "working", - "well" - ], - [ - "considered", - "specific", - "variety", - "such" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_concatenate", - "docstring": "Concatenate DWI, bval and bvecs together. File must be specified in matching\norder. Default data type will be the same as the first input DWI.\n\nFormerly: scil_concatenate_dwi.py", - "help": "usage: scil_dwi_concatenate.py [-h] [--in_dwis IN_DWIS [IN_DWIS ...]]\n [--in_bvals IN_BVALS [IN_BVALS ...]]\n [--in_bvecs IN_BVECS [IN_BVECS ...]]\n [--data_type DATA_TYPE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_dwi out_bval out_bvec\n\nConcatenate DWI, bval and bvecs together. File must be specified in matching\norder. Default data type will be the same as the first input DWI.\n\nFormerly: scil_concatenate_dwi.py\n\npositional arguments:\n out_dwi The name of the output DWI file.\n out_bval The name of the output b-values file (.bval).\n out_bvec The name of the output b-vectors file (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n The DWI file (.nii) to concatenate.\n --in_bvals IN_BVALS [IN_BVALS ...]\n The b-values files in FSL format (.bval).\n --in_bvecs IN_BVECS [IN_BVECS ...]\n The b-vectors files in FSL format (.bvec).\n --data_type DATA_TYPE\n Data type of the output image. Use the format: uint8, int16, int/float32, int/float64.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "order", - "order" - ], - [ - "working", - "together" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "parameters", - "specified" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "methods", - "using" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_convert_FDF", - "docstring": "Converts a Varian FDF file or directory to a nifti file.\nIf the procpar contains diffusion information, it will be saved as bval and\nbvec in the same folder as the output file.\n\nex: scil_dwi_convert_FDF.py semsdw/b0_folder/ semsdw/dwi_folder/ dwi.nii.gz --bval dwi.bval --bvec dwi.bvec -f\n\nFormerly: scil_convert_fdf.py", - "help": "usage: scil_dwi_convert_FDF.py [-h] [--bval BVAL] [--bvec BVEC]\n [--flip dimension [dimension ...]]\n [--swap dimension [dimension ...]]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_b0_path in_dwi_path out_path\n\nConverts a Varian FDF file or directory to a nifti file.\nIf the procpar contains diffusion information, it will be saved as bval and\nbvec in the same folder as the output file.\n\nex: scil_dwi_convert_FDF.py semsdw/b0_folder/ semsdw/dwi_folder/ dwi.nii.gz --bval dwi.bval --bvec dwi.bvec -f\n\nFormerly: scil_convert_fdf.py\n\npositional arguments:\n in_b0_path Path to the b0 FDF file or folder to convert.\n in_dwi_path Path to the DWI FDF file or folder to convert.\n out_path Path to the nifti file to write on disk.\n\noptions:\n -h, --help show this help message and exit\n --bval BVAL Path to the bval file to write on disk.\n --bvec BVEC Path to the bvec file to write on disk.\n --flip dimension [dimension ...]\n The axes you want to flip. eg: to flip the x and y axes use: x y. [None]\n --swap dimension [dimension ...]\n The axes you want to swap. eg: to swap the x and y axes use: x y. [None]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "diffusion", - "diffusion" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "methods", - "using" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_detect_volume_outliers", - "docstring": "This script simply finds the 3 closest angular neighbors of each direction\n(per shell) and compute the voxel-wise correlation.\nIf the angles or correlations to neighbors are below the shell average (by\nargs.std_scale x STD) it will flag the volume as a potential outlier.\n\nThis script supports multi-shells, but each shell is independant and detected\nusing the --b0_threshold parameter.\n\nThis script can be run before any processing to identify potential problem\nbefore launching pre-processing.", - "help": "usage: scil_dwi_detect_volume_outliers.py [-h] [--std_scale STD_SCALE]\n [--b0_threshold thr]\n [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval in_bvec\n\nThis script simply finds the 3 closest angular neighbors of each direction\n(per shell) and compute the voxel-wise correlation.\nIf the angles or correlations to neighbors are below the shell average (by\nargs.std_scale x STD) it will flag the volume as a potential outlier.\n\nThis script supports multi-shells, but each shell is independant and detected\nusing the --b0_threshold parameter.\n\nThis script can be run before any processing to identify potential problem\nbefore launching pre-processing.\n\npositional arguments:\n in_dwi The DWI file (.nii) to concatenate.\n in_bval The b-values files in FSL format (.bval).\n in_bvec The b-vectors files in FSL format (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --std_scale STD_SCALE\n How many deviation from the mean are required to be considered an outlier. [2.0]\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "step", - "continue" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "order", - "required" - ], - [ - "average", - "per" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "matter", - "question", - "thinking", - "understand" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "parameter", - "parameters", - "parameter" - ], - [ - "methods", - "use" - ], - [ - "direction", - "direction" - ], - [ - "work", - "find" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "processing", - "processing" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "action", - "clear", - "considered", - "future", - "matter", - "possibility", - "potential", - "question", - "result", - "specific", - "any" - ], - [ - "matter", - "question", - "thinking", - "true", - "work", - "working", - "how" - ], - [ - "long", - "with" - ], - [ - "average", - "average" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "supported", - "supports" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "methods", - "using" - ], - [ - "thinking", - "simply" - ], - [ - "exist", - "exists" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "question", - "problem" - ], - [ - "left", - "before" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "large", - "work", - "many" - ], - [ - "future", - "possibility", - "potential", - "potential" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "lack", - "minimal" - ], - [ - "order", - "allow" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_extract_b0", - "docstring": "Extract B0s from DWI, based on the bval and bvec information.\n\nThe default behavior is to save the first b0 of the series.\n\nFormerly: scil_extract_b0.py", - "help": "usage: scil_dwi_extract_b0.py [-h]\n [--all | --mean | --cluster-mean | --cluster-first]\n [--block-size INT] [--single-image]\n [--b0_threshold thr] [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_b0\n\nExtract B0s from DWI, based on the bval and bvec information.\n\nThe default behavior is to save the first b0 of the series.\n\nFormerly: scil_extract_b0.py\n\npositional arguments:\n in_dwi DWI Nifti image.\n in_bval b-values filename, in FSL format (.bval).\n in_bvec b-values filename, in FSL format (.bvec).\n out_b0 Output b0 file(s).\n\noptions:\n -h, --help show this help message and exit\n --block-size INT, -s INT\n Load the data using this block size. Useful\n when the data is too large to be loaded in memory.\n --single-image If output b0 volume has multiple time points, only outputs a single \n image instead of a numbered series of images.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nOptions in the case of multiple b0s.:\n --all Extract all b0s. Index number will be appended to the output file.\n --mean Extract mean b0.\n --cluster-mean Extract mean of each continuous cluster of b0s.\n --cluster-first Extract first b0 of each continuous cluster of b0s.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "step", - "continue" - ], - [ - "memory", - "memory" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "matter", - "question", - "thinking", - "understand" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "long", - "too" - ], - [ - "image", - "images" - ], - [ - "methods", - "use" - ], - [ - "work", - "find" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "tool", - "useful" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "thinking", - "i" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "large", - "larger", - "large" - ], - [ - "long", - "with" - ], - [ - "long", - "result", - "work", - "working", - "time" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "methods", - "using" - ], - [ - "order", - "work", - "instead" - ], - [ - "exist", - "exists" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "individual", - "each" - ], - [ - "larger", - "size", - "size" - ], - [ - "lack", - "minimal" - ], - [ - "order", - "allow" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "matter", - "question", - "case" - ], - [ - "based", - "based" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_extract_shell", - "docstring": "Extracts the DWI volumes that are on specific b-value shells. Many shells\ncan be extracted at once by specifying multiple b-values. The extracted\nvolumes are in the same order as in the original file.\n\nIf the b-values of a shell are not all identical, use the --tolerance\nargument to adjust the accepted interval. For example, a b-value of 2000\nand a tolerance of 20 will extract all volumes with a b-values from 1980 to\n2020.\n\nFiles that are too large to be loaded in memory can still be processed by\nsetting the --block-size argument. A block size of X means that X DWI volumes\nare loaded at a time for processing.\n\nFormerly: scil_extract_dwi_shell.py", - "help": "usage: scil_dwi_extract_shell.py [-h] [--out_indices OUT_INDICES]\n [--block-size INT] [--tolerance INT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_bvals_to_extract\n [in_bvals_to_extract ...] out_dwi out_bval\n out_bvec\n\nExtracts the DWI volumes that are on specific b-value shells. Many shells\ncan be extracted at once by specifying multiple b-values. The extracted\nvolumes are in the same order as in the original file.\n\nIf the b-values of a shell are not all identical, use the --tolerance\nargument to adjust the accepted interval. For example, a b-value of 2000\nand a tolerance of 20 will extract all volumes with a b-values from 1980 to\n2020.\n\nFiles that are too large to be loaded in memory can still be processed by\nsetting the --block-size argument. A block size of X means that X DWI volumes\nare loaded at a time for processing.\n\nFormerly: scil_extract_dwi_shell.py\n\npositional arguments:\n in_dwi The DW image file to split.\n in_bval The b-values file in FSL format (.bval).\n in_bvec The b-vectors file in FSL format (.bvec).\n in_bvals_to_extract The list of b-values to extract. For example 0 2000.\n out_dwi The name of the output DWI file.\n out_bval The name of the output b-value file (.bval).\n out_bvec The name of the output b-vector file (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --out_indices OUT_INDICES\n Optional filename for valid indices in input dwi volume\n --block-size INT, -s INT\n Loads the data using this block size. Useful\n when the data is too large to be loaded in memory.\n --tolerance INT, -t INT\n The tolerated gap between the b-values to extract\n and the actual b-values. [20]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "parameter", - "vector" - ], - [ - "memory", - "memory" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "order", - "order" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "clear", - "long", - "too" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "question", - "argument" - ], - [ - "invalid", - "valid", - "valid" - ], - [ - "specific", - "specific" - ], - [ - "processing", - "processing" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "tool", - "useful" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "large", - "larger", - "large" - ], - [ - "long", - "with" - ], - [ - "long", - "result", - "work", - "working", - "time" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "methods", - "using" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "left", - "long", - "work", - "once" - ], - [ - "considered", - "is" - ], - [ - "large", - "work", - "many" - ], - [ - "specific", - "actual" - ], - [ - "larger", - "size", - "size" - ], - [ - "volume", - "volumes", - "volumes" - ], - [ - "clear", - "long", - "work", - "still" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "meaning", - "order", - "result", - "step", - "true", - "means" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_powder_average", - "docstring": "Script to compute powder average (mean diffusion weighted image) from set of\ndiffusion images.\n\nBy default will output an average image calculated from all images with\nnon-zero bvalue.\n\nSpecify --bvalue to output an image for a single shell\n\nScript currently does not take into account the diffusion gradient directions\nbeing averaged.\n\nFormerly: scil_compute_powder_average.py", - "help": "usage: scil_dwi_powder_average.py [-h] [-f] [--mask file] [--b0_thr B0_THR]\n [--shells SHELLS [SHELLS ...]]\n [--shell_thr SHELL_THR]\n [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval out_avg\n\nScript to compute powder average (mean diffusion weighted image) from set of\ndiffusion images.\n\nBy default will output an average image calculated from all images with\nnon-zero bvalue.\n\nSpecify --bvalue to output an image for a single shell\n\nScript currently does not take into account the diffusion gradient directions\nbeing averaged.\n\nFormerly: scil_compute_powder_average.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n out_avg Path of the output file.\n\noptions:\n -h, --help show this help message and exit\n -f Force overwriting of the output files.\n --mask file Path to a binary mask.\n Only data inside the mask will be used for powder avg. (Default: None)\n --b0_thr B0_THR Exclude b0 volumes from powder average with bvalue less than specified threshold.\n (Default: remove volumes with bvalue < 50\n --shells SHELLS [SHELLS ...]\n bvalue (shells) to include in powder average passed as a list \n (e.g. --shells 1000 2000). If not specified will include all volumes with a non-zero bvalue.\n --shell_thr SHELL_THR\n Include volumes with bvalue +- the specified threshold.\n (Default: [50]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "considered", - "involved", - "being" - ], - [ - "left", - "into" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "order", - "set" - ], - [ - "direction", - "directions" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "clear", - "order", - "step", - "work", - "take" - ], - [ - "variety", - "include" - ], - [ - "long", - "with" - ], - [ - "average", - "average" - ], - [ - "parameters", - "specified" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "total", - "50" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "long", - "than" - ], - [ - "highly", - "less" - ], - [ - "pass", - "passed" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "matter", - "question", - "does" - ], - [ - "weighted", - "weighted" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "volume", - "volumes", - "volumes" - ], - [ - "work", - "all" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_prepare_eddy_command", - "docstring": "Prepare a typical command for eddy and create the necessary files. When using\nmultiple acquisitions and/or opposite phase directions, images, b-values and\nb-vectors should be merged together using scil_dwi_concatenate.py. If using\ntopup prior to calling this script, images should be concatenated in the same\norder as the b0s used with prepare_topup.\n\nFormerly: scil_prepare_eddy_command.py", - "help": "usage: scil_dwi_prepare_eddy_command.py [-h] [--n_reverse N_REVERSE]\n [--topup TOPUP]\n [--topup_params TOPUP_PARAMS]\n [--eddy_cmd {eddy_openmp,eddy_cuda,eddy_cuda8.0,eddy_cuda9.1,eddy_cuda10.2,eddy,eddy_cpu}]\n [--b0_thr B0_THR]\n [--encoding_direction {x,y,z}]\n [--readout READOUT]\n [--slice_drop_correction]\n [--lsr_resampling]\n [--out_directory OUT_DIRECTORY]\n [--out_prefix OUT_PREFIX]\n [--out_script] [--fix_seed]\n [--eddy_options EDDY_OPTIONS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bvals in_bvecs in_mask\n\nPrepare a typical command for eddy and create the necessary files. When using\nmultiple acquisitions and/or opposite phase directions, images, b-values and\nb-vectors should be merged together using scil_dwi_concatenate.py. If using\ntopup prior to calling this script, images should be concatenated in the same\norder as the b0s used with prepare_topup.\n\nFormerly: scil_prepare_eddy_command.py\n\npositional arguments:\n in_dwi Input DWI Nifti image. If using multiple acquisition and/or opposite phase directions, please merge in the same order as for prepare_topup using scil_dwi_concatenate.py.\n in_bvals Input b-values file in FSL format.\n in_bvecs Input b-vectors file in FSL format.\n in_mask Binary brain mask.\n\noptions:\n -h, --help show this help message and exit\n --n_reverse N_REVERSE\n Number of reverse phase volumes included in the DWI image [0].\n --topup TOPUP Topup output name. If given, apply topup during eddy.\n Should be the same as --out_prefix from scil_dwi_prepare_topup_command.py.\n --topup_params TOPUP_PARAMS\n Parameters file (typically named acqparams) used to run topup.\n --eddy_cmd {eddy_openmp,eddy_cuda,eddy_cuda8.0,eddy_cuda9.1,eddy_cuda10.2,eddy,eddy_cpu}\n Eddy command [eddy_openmp].\n --b0_thr B0_THR All b-values with values less than or equal to b0_thr are considered\n as b0s i.e. without diffusion weighting [20].\n --encoding_direction {x,y,z}\n Acquisition direction, default is AP-PA [y].\n --readout READOUT Total readout time from the DICOM metadata [0.062].\n --slice_drop_correction\n If set, will activate eddy's outlier correction,\n which includes slice drop correction.\n --lsr_resampling Perform least-square resampling, allowing eddy to combine forward and reverse phase acquisitions for better reconstruction. Only works if directions and b-values are identical in both phase direction.\n --out_directory OUT_DIRECTORY\n Output directory for eddy files [.].\n --out_prefix OUT_PREFIX\n Prefix of the eddy-corrected DWI [dwi_eddy_corrected].\n --out_script If set, will output a .sh script (eddy.sh).\n else, will output the lines to the terminal [False].\n --fix_seed If set, will use the fixed seed strategy for eddy.\n Enhances reproducibility.\n --eddy_options EDDY_OPTIONS\n Additional options you want to use to run eddy.\n Add these options using quotes (i.e. \"--ol_nstd=6 --mb=4\").\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "order", - "order" - ], - [ - "working", - "together" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "direction", - "direction" - ], - [ - "order", - "set" - ], - [ - "thinking", - "you" - ], - [ - "direction", - "opposite" - ], - [ - "higher", - "increase", - "drop" - ], - [ - "direction", - "directions" - ], - [ - "long", - "a" - ], - [ - "experience", - "thinking", - "work", - "working", - "better" - ], - [ - "applied", - "apply" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "create" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "contrast", - "typical" - ], - [ - "held", - "in" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "long", - "result", - "work", - "working", - "time" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "long", - "than" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "highly", - "less" - ], - [ - "work", - "works" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "matter", - "thinking", - "else" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "step", - "forward" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "volume", - "volumes", - "volumes" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "false", - "false" - ], - [ - "increase", - "total", - "total" - ], - [ - "assigned", - "command" - ], - [ - "order", - "necessary" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_prepare_topup_command", - "docstring": "Prepare a typical command for topup and create the necessary files.\nThe reversed b0 must be in a different file.\n\nFormerly: scil_prepare_topup_command.py", - "help": "usage: scil_dwi_prepare_topup_command.py [-h] [--config CONFIG] [--synb0]\n [--encoding_direction {x,y,z}]\n [--readout READOUT]\n [--out_b0s OUT_B0S]\n [--out_directory OUT_DIRECTORY]\n [--out_prefix OUT_PREFIX]\n [--out_params OUT_PARAMS]\n [--out_script]\n [--topup_options TOPUP_OPTIONS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_forward_b0 in_reverse_b0\n\nPrepare a typical command for topup and create the necessary files.\nThe reversed b0 must be in a different file.\n\nFormerly: scil_prepare_topup_command.py\n\npositional arguments:\n in_forward_b0 Input b0 Nifti image with forward phase encoding.\n in_reverse_b0 Input b0 Nifti image with reversed phase encoding.\n\noptions:\n -h, --help show this help message and exit\n --config CONFIG Topup config file [b02b0.cnf].\n --synb0 If set, will use SyNb0 custom acqparams file.\n --encoding_direction {x,y,z}\n Acquisition direction of the forward b0 image, default is AP [y].\n --readout READOUT Total readout time from the DICOM metadata [0.062].\n --out_b0s OUT_B0S Output fused b0 file [fused_b0s.nii.gz].\n --out_directory OUT_DIRECTORY\n Output directory for topup files [.].\n --out_prefix OUT_PREFIX\n Prefix of the topup results [topup_results].\n --out_params OUT_PARAMS\n Filename for the acquisition parameters file [acqparams.txt].\n --out_script If set, will output a .sh script (topup.sh).\n else, will output the lines to the terminal [False].\n --topup_options TOPUP_OPTIONS\n Additional options you want to use to run topup.\n Add these options using quotes (i.e. \"--fwhm=6 --miter=4\").\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "individual", - "specific", - "unique", - "variety", - "different" - ], - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "direction", - "direction" - ], - [ - "order", - "set" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "create" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "contrast", - "typical" - ], - [ - "held", - "in" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "long", - "with" - ], - [ - "long", - "result", - "work", - "working", - "time" - ], - [ - "methods", - "using" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "matter", - "thinking", - "else" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "step", - "forward" - ], - [ - "result", - "results" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "false", - "false" - ], - [ - "increase", - "total", - "total" - ], - [ - "assigned", - "command" - ], - [ - "order", - "necessary" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_reorder_philips", - "docstring": "Re-order gradient according to original table (Philips)\nThis script is not needed for version 5.6 and higher\n\nFormerly: scil_reorder_dwi_philips.py", - "help": "usage: scil_dwi_reorder_philips.py [-h] [--json JSON]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_table\n out_basename\n\nRe-order gradient according to original table (Philips)\nThis script is not needed for version 5.6 and higher\n\nFormerly: scil_reorder_dwi_philips.py\n\npositional arguments:\n in_dwi Input dwi file.\n in_bval Input bval FSL format.\n in_bvec Input bvec FSL format.\n in_table Original philips table - first line is skipped.\n out_basename Basename output file.\n\noptions:\n -h, --help show this help message and exit\n --json JSON If you give a json file, it will check if you need to reorder your Philips dwi.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "order", - "order" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "thinking", - "you" - ], - [ - "high", - "higher", - "increase", - "level", - "higher" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "methods", - "using" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "clear", - "give" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "create", - "lack", - "step", - "work", - "working", - "need" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "based", - "reported", - "according" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_split_by_indices", - "docstring": "Splits the DWI image at certain indices along the last dimension (b-values).\nMany indices can be given at once by specifying multiple values. The splited\nvolumes are in the same order as in the original file. Also outputs the\ncorresponding .bval and .bvec files.\n\nThis script can be useful for splitting images at places where a b-value\nextraction does not work. For instance, if one wants to split the x first\nb-1500s from the rest of the b-1500s in an image, simply put x as an index.\n\nFormerly: scil_split_image.py", - "help": "usage: scil_dwi_split_by_indices.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_basename\n split_indices [split_indices ...]\n\nSplits the DWI image at certain indices along the last dimension (b-values).\nMany indices can be given at once by specifying multiple values. The splited\nvolumes are in the same order as in the original file. Also outputs the\ncorresponding .bval and .bvec files.\n\nThis script can be useful for splitting images at places where a b-value\nextraction does not work. For instance, if one wants to split the x first\nb-1500s from the rest of the b-1500s in an image, simply put x as an index.\n\nFormerly: scil_split_image.py\n\npositional arguments:\n in_dwi The DW image file to split.\n in_bval The b-values file in FSL format (.bval).\n in_bvec The b-vectors file in FSL format (.bvec).\n out_basename The basename of the output files. Indices number will be appended to out_basename. For example, if split_indices were 3 10, the files would be saved as out_basename_0_2, out_basename_3_10, out_basename_11_20, where the size of the last dimension is 21 in this example.\n split_indices The list of indices where to split the image. For example 3 10. This would split the image in three parts, such as [:3], [3:10], [10:]. Indices must be in increasing order.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "considered", - "are" - ], - [ - "order", - "order" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "work", - "working", - "work" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "image", - "images" - ], - [ - "long", - "a" - ], - [ - "tool", - "useful" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "last" - ], - [ - "held", - "in" - ], - [ - "areas", - "places" - ], - [ - "clear", - "left", - "work", - "put" - ], - [ - "methods", - "using" - ], - [ - "thinking", - "simply" - ], - [ - "contrast", - "specific", - "subject", - "instance" - ], - [ - "area", - "main", - "along" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "left", - "rest" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "left", - "long", - "work", - "once" - ], - [ - "clear", - "considered", - "individual", - "lack", - "matter", - "result", - "specific", - "subject", - "certain" - ], - [ - "large", - "work", - "many" - ], - [ - "area", - "work", - "where" - ], - [ - "larger", - "size", - "size" - ], - [ - "matter", - "question", - "does" - ], - [ - "volume", - "volumes", - "volumes" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "areas", - "parts" - ], - [ - "greater", - "higher", - "increase", - "lack", - "increasing" - ], - [ - "considered", - "specific", - "variety", - "such" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_to_sh", - "docstring": "Script to compute the SH coefficient directly on the raw DWI signal.\n\nFormerly: scil_compute_sh_from_signal.py", - "help": "usage: scil_dwi_to_sh.py [-h] [--sh_order SH_ORDER]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--smooth SMOOTH] [--use_attenuation] [--mask MASK]\n [--b0_threshold thr] [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_sh\n\nScript to compute the SH coefficient directly on the raw DWI signal.\n\nFormerly: scil_compute_sh_from_signal.py\n\npositional arguments:\n in_dwi Path of the dwi volume.\n in_bval Path of the b-value file, in FSL format.\n in_bvec Path of the b-vector file, in FSL format.\n out_sh Name of the output SH file to save.\n\noptions:\n -h, --help show this help message and exit\n --sh_order SH_ORDER SH order to fit (int). [4]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --smooth SMOOTH Lambda-regularization coefficient in the SH fit (float). [0.006]\n --use_attenuation If set, will use signal attenuation before fitting the SH (i.e. divide by the b0).\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computations and reconstruction \n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "parameter", - "vector" - ], - [ - "step", - "continue" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "matter", - "question", - "thinking", - "understand" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "order", - "order" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "work", - "find" - ], - [ - "order", - "set" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "exist", - "exists" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "signal", - "signal" - ], - [ - "left", - "before" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "lack", - "minimal" - ], - [ - "order", - "allow" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_fodf_max_in_ventricles", - "docstring": "Script to compute the maximum fODF in the ventricles. The ventricules are\nestimated from an MD and FA threshold.\n\nThis allows to clip the noise of fODF using an absolute thresold.\n\nFormerly: scil_compute_fodf_max_in_ventricles.py", - "help": "usage: scil_fodf_max_in_ventricles.py [-h] [--fa_threshold FA_THRESHOLD]\n [--md_threshold MD_THRESHOLD]\n [--max_value_output file]\n [--mask_output file] [--small_dims]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n fODFs FA MD\n\nScript to compute the maximum fODF in the ventricles. The ventricules are\nestimated from an MD and FA threshold.\n\nThis allows to clip the noise of fODF using an absolute thresold.\n\nFormerly: scil_compute_fodf_max_in_ventricles.py\n\npositional arguments:\n fODFs Path of the fODF volume in spherical harmonics (SH).\n FA Path to the FA volume.\n MD Path to the mean diffusivity (MD) volume.\n\noptions:\n -h, --help show this help message and exit\n --fa_threshold FA_THRESHOLD\n Maximal threshold of FA (voxels under that threshold are considered \n for evaluation. [0.1]).\n --md_threshold MD_THRESHOLD\n Minimal threshold of MD in mm2/s (voxels above that threshold are \n considered for evaluation. [0.003]).\n --max_value_output file\n Output path for the text file containing the value. If not set the \n file will not be saved.\n --mask_output file Output path for the ventricule mask. If not set, the mask \n will not be saved.\n --small_dims If set, takes the full range of data to search the max fodf amplitude \n in ventricles. Useful when the data has small dimensions.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Dell'Acqua, Flavio, et al. \"Can spherical deconvolution provide more\n information than fiber orientations? Hindrance modulated orientational\n anisotropy, a true-tract specific index to characterize white matter\n diffusion.\" Human brain mapping 34.10 (2013): 2464-2483.\n", - "synonyms": [ - [ - "animal", - "human", - "human" - ], - [ - "tract", - "tracts", - "tract" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "large", - "larger", - "small" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "long", - "work", - "more" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "fibre", - "fiber" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "order", - "set" - ], - [ - "total", - "estimated" - ], - [ - "specific", - "specific" - ], - [ - "maps", - "mapping" - ], - [ - "long", - "a" - ], - [ - "tool", - "useful" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "application", - "allows" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "create", - "provide" - ], - [ - "methods", - "using" - ], - [ - "long", - "than" - ], - [ - "meaning", - "true", - "true" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "lack", - "minimal" - ], - [ - "level", - "above" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "long", - "full" - ] - ], - "keywords": [] - }, - { - "name": "scil_fodf_memsmt", - "docstring": "Script to compute multi-encoding multi-shell multi-tissue (memsmt)\nConstrained Spherical Deconvolution ODFs.\n\nIn order to operate, the script only needs the data from one type of b-tensor\nencoding. However, giving only a spherical one will not produce good fODFs, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable.\n\nAll of `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\n>>> scil_fodf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_fodf.py", - "help": "usage: scil_fodf_memsmt.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals\n IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS\n [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5}\n [{0,1,-0.5,0.5} ...] [--sh_order int] [--mask MASK]\n [--tolerance tol] [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--not_all] [--wm_out_fODF file]\n [--gm_out_fODF file] [--csf_out_fODF file]\n [--vf file] [--vf_rgb file]\n in_wm_frf in_gm_frf in_csf_frf\n\nScript to compute multi-encoding multi-shell multi-tissue (memsmt)\nConstrained Spherical Deconvolution ODFs.\n\nIn order to operate, the script only needs the data from one type of b-tensor\nencoding. However, giving only a spherical one will not produce good fODFs, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable.\n\nAll of `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\n>>> scil_fodf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_fodf.py\n\npositional arguments:\n in_wm_frf Text file of WM response function.\n in_gm_frf Text file of GM response function.\n in_csf_frf Text file of CSF response function.\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n Path to the input diffusion volume for each b-tensor encoding type.\n --in_bvals IN_BVALS [IN_BVALS ...]\n Path to the bval file, in FSL format, for each b-tensor encoding type.\n --in_bvecs IN_BVECS [IN_BVECS ...]\n Path to the bvec file, in FSL format, for each b-tensor encoding type.\n --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...]\n Value of b_delta for each b-tensor encoding type, in the same order as dwi, bval and bvec inputs.\n --sh_order int SH order used for the CSD. (Default: 8)\n --mask MASK Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --not_all If set, only saves the files specified using the file flags. (Default: False)\n\nFile flags:\n --wm_out_fODF file Output filename for the WM fODF coefficients.\n --gm_out_fODF file Output filename for the GM fODF coefficients.\n --csf_out_fODF file Output filename for the CSF fODF coefficients.\n --vf file Output filename for the volume fractions map.\n --vf_rgb file Output filename for the volume fractions map in rgb.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "true", - "always" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "matter", - "question", - "thinking", - "understand" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "brain", - "tissue" - ], - [ - "considered", - "are" - ], - [ - "clear", - "considered", - "highly", - "long", - "matter", - "true", - "work", - "very" - ], - [ - "order", - "order" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "papers", - "paper" - ], - [ - "work", - "working", - "work" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "work", - "find" - ], - [ - "order", - "set" - ], - [ - "clear", - "matter", - "true", - "seems" - ], - [ - "long", - "have" - ], - [ - "specific", - "specific" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "diffusion", - "diffusion" - ], - [ - "imaging", - "mri" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "action", - "response" - ], - [ - "defined", - "function", - "functional", - "functions", - "function" - ], - [ - "create", - "produce" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "parameters", - "specified" - ], - [ - "clear", - "long", - "matter", - "result", - "work", - "so" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "clear", - "experience", - "thinking", - "true", - "good" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "variety", - "work", - "other" - ], - [ - "future", - "current" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "exist", - "exists" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "supported", - "supported" - ], - [ - "variance", - "variance" - ], - [ - "shape", - "shapes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "considered", - "form", - "meaning", - "order", - "result", - "thus" - ], - [ - "order", - "allow" - ], - [ - "step", - "start" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "step", - "true", - "work", - "yet" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "false", - "false" - ], - [ - "based", - "based" - ], - [ - "maps", - "map" - ], - [ - "clear", - "matter", - "question", - "thinking", - "sure" - ], - [ - "clear", - "considered", - "result", - "however" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "shape", - "structure", - "shape" - ], - [ - "indicating", - "suggest" - ], - [ - "considered", - "key", - "work", - "important" - ] - ], - "keywords": [] - }, - { - "name": "scil_fodf_metrics", - "docstring": "Script to compute the maximum Apparent Fiber Density (AFD), the fiber ODFs\norientations, values and indices (peaks, peak_values, peak_indices), the Number\nof Fiber Orientations (NuFO) maps from fiber ODFs and the RGB map.\n\nAFD_max map is the maximal fODF amplitude for each voxel.\n\nNuFO is the the number of maxima of the fODF with an ABSOLUTE amplitude above\nthe threshold set using --at, AND an amplitude above the RELATIVE threshold\nset using --rt.\n\nThe --at argument should be set to a value which is 1.5 times the maximal\nvalue of the fODF in the ventricules. This can be obtained with the\nscil_fodf_max_in_ventricles.py script.\n\nIf the --abs_peaks_and_values argument is set, the peaks are all normalized\nand the peak_values are equal to the actual fODF amplitude of the peaks. By\ndefault, the script max-normalizes the peak_values for each voxel and\nmultiplies the peaks by peak_values.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Raffelt et al. NeuroImage 2012] and [Dell'Acqua et al HBM 2013] for the\ndefinitions.\n\nFormerly: scil_compute_fodf_metrics.py", - "help": "usage: scil_fodf_metrics.py [-h] [--sphere string] [--mask] [--at A_THRESHOLD]\n [--rt R_THRESHOLD] [--abs_peaks_and_values]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [--processes NBR] [-f]\n [--not_all] [--afd_max file] [--afd_total file]\n [--afd_sum file] [--nufo file] [--rgb file]\n [--peaks file] [--peak_values file]\n [--peak_indices file]\n in_fODF\n\nScript to compute the maximum Apparent Fiber Density (AFD), the fiber ODFs\norientations, values and indices (peaks, peak_values, peak_indices), the Number\nof Fiber Orientations (NuFO) maps from fiber ODFs and the RGB map.\n\nAFD_max map is the maximal fODF amplitude for each voxel.\n\nNuFO is the the number of maxima of the fODF with an ABSOLUTE amplitude above\nthe threshold set using --at, AND an amplitude above the RELATIVE threshold\nset using --rt.\n\nThe --at argument should be set to a value which is 1.5 times the maximal\nvalue of the fODF in the ventricules. This can be obtained with the\nscil_fodf_max_in_ventricles.py script.\n\nIf the --abs_peaks_and_values argument is set, the peaks are all normalized\nand the peak_values are equal to the actual fODF amplitude of the peaks. By\ndefault, the script max-normalizes the peak_values for each voxel and\nmultiplies the peaks by peak_values.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Raffelt et al. NeuroImage 2012] and [Dell'Acqua et al HBM 2013] for the\ndefinitions.\n\nFormerly: scil_compute_fodf_metrics.py\n\npositional arguments:\n in_fODF Path of the fODF volume in spherical harmonics (SH).\n\noptions:\n -h, --help show this help message and exit\n --sphere string Discrete sphere to use in the processing [repulsion724].\n --mask Path to a binary mask. Only the data inside the mask\n will beused for computations and reconstruction [None].\n --at A_THRESHOLD Absolute threshold on fODF amplitude. This value should be set to\n approximately 1.5 to 2 times the maximum fODF amplitude in isotropic voxels\n (ie. ventricles).\n Use scil_fodf_max_in_ventricles.py to find the maximal value.\n See [Dell'Acqua et al HBM 2013] [0.0].\n --rt R_THRESHOLD Relative threshold on fODF amplitude in percentage [0.1].\n --abs_peaks_and_values\n If set, the peak_values are not max-normalized for each voxel, \n but rather they keep the actual fODF amplitude of the peaks. \n Also, the peaks are given as unit directions instead of being proportional to peak_values. [False]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -f Force overwriting of the output files.\n --not_all If set, only saves the files specified using the file flags [False].\n\nFile flags:\n --afd_max file Output filename for the AFD_max map.\n --afd_total file Output filename for the AFD_total map(SH coeff = 0).\n --afd_sum file Output filename for the sum of all peak contributions\n (sum of fODF lobes on the sphere).\n --nufo file Output filename for the NuFO map.\n --rgb file Output filename for the RGB map.\n --peaks file Output filename for the extracted peaks.\n --peak_values file Output filename for the extracted peaks values.\n --peak_indices file Output filename for the generated peaks indices on the sphere.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "lobe", - "lobes", - "lobes" - ], - [ - "considered", - "are" - ], - [ - "considered", - "involved", - "being" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "clear", - "considered", - "create", - "form", - "manner", - "matter", - "result", - "subject", - "thinking", - "true", - "view", - "work", - "rather" - ], - [ - "methods", - "use" - ], - [ - "work", - "find" - ], - [ - "question", - "argument" - ], - [ - "order", - "set" - ], - [ - "specific", - "specific" - ], - [ - "processing", - "processing" - ], - [ - "direction", - "directions" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "average", - "percentage" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "parameters", - "specified" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "order", - "work", - "instead" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "specific", - "actual" - ], - [ - "individual", - "each" - ], - [ - "level", - "above" - ], - [ - "step", - "start" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "false", - "false" - ], - [ - "maps", - "maps" - ], - [ - "maps", - "map" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_fodf_msmt", - "docstring": "Script to compute Multishell Multi-tissue Constrained Spherical Deconvolution\nODFs.\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical\ndeconvolution for improved analysis of multi-shell diffusion\nMRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_fodf.py", - "help": "usage: scil_fodf_msmt.py [-h] [--sh_order int] [--mask] [--tolerance tol]\n [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [--not_all] [--wm_out_fODF file]\n [--gm_out_fODF file] [--csf_out_fODF file]\n [--vf file] [--vf_rgb file]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_wm_frf in_gm_frf in_csf_frf\n\nScript to compute Multishell Multi-tissue Constrained Spherical Deconvolution\nODFs.\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical\ndeconvolution for improved analysis of multi-shell diffusion\nMRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_fodf.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bval file, in FSL format.\n in_bvec Path of the bvec file, in FSL format.\n in_wm_frf Text file of WM response function.\n in_gm_frf Text file of GM response function.\n in_csf_frf Text file of CSF response function.\n\noptions:\n -h, --help show this help message and exit\n --sh_order int SH order used for the CSD. (Default: 8)\n --mask Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --not_all If set, only saves the files specified using the file flags. (Default: False)\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nFile flags:\n --wm_out_fODF file Output filename for the WM fODF coefficients.\n --gm_out_fODF file Output filename for the GM fODF coefficients.\n --csf_out_fODF file Output filename for the CSF fODF coefficients.\n --vf file Output filename for the volume fractions map.\n --vf_rgb file Output filename for the volume fractions map in rgb.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "matter", - "question", - "thinking", - "understand" - ], - [ - "work", - "and" - ], - [ - "brain", - "tissue" - ], - [ - "order", - "order" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "work", - "find" - ], - [ - "order", - "set" - ], - [ - "specific", - "specific" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "diffusion", - "diffusion" - ], - [ - "imaging", - "mri" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "action", - "response" - ], - [ - "defined", - "function", - "functional", - "functions", - "function" - ], - [ - "long", - "with" - ], - [ - "parameters", - "specified" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "future", - "current" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "exist", - "exists" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "order", - "allow" - ], - [ - "step", - "start" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "false", - "false" - ], - [ - "based", - "based" - ], - [ - "maps", - "map" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_fodf_ssst", - "docstring": "Script to compute Constrained Spherical Deconvolution (CSD) fiber ODFs.\n\nSee [Tournier et al. NeuroImage 2007]\n\nFormerly: scil_compute_ssst_fodf.py", - "help": "usage: scil_fodf_ssst.py [-h] [--sh_order int] [--mask] [--b0_threshold thr]\n [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec frf_file out_fODF\n\nScript to compute Constrained Spherical Deconvolution (CSD) fiber ODFs.\n\nSee [Tournier et al. NeuroImage 2007]\n\nFormerly: scil_compute_ssst_fodf.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n frf_file Path of the FRF file\n out_fODF Output path for the fiber ODF coefficients.\n\noptions:\n -h, --help show this help message and exit\n --sh_order int SH order used for the CSD. (Default: 8)\n --mask Path to a binary mask. Only the data inside the mask will be used \n for computations and reconstruction.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "step", - "continue" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "matter", - "question", - "thinking", - "understand" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "order", - "order" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "work", - "find" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "exist", - "exists" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "lack", - "minimal" - ], - [ - "order", - "allow" - ], - [ - "step", - "start" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_fodf_to_bingham", - "docstring": "Script for fitting a Bingham distribution to each fODF lobe, as described\nin [1].\n\nThe Bingham fit is saved, with each Bingham distribution described by 7\ncoefficients (for example, for a maximum number of lobes of 5, the number\nof coefficients is 7 x 5 = 35 -- less than the number of coefficients for\nSH of maximum order 8).\n\nUsing 12 threads, the execution takes approximately 30 minutes for a brain with\n1mm isotropic resolution.\n\nFormerly: scil_fit_bingham_to_fodf.py", - "help": "usage: scil_fodf_to_bingham.py [-h] [--max_lobes MAX_LOBES] [--at AT]\n [--rt RT] [--min_sep_angle MIN_SEP_ANGLE]\n [--max_fit_angle MAX_FIT_ANGLE] [--mask MASK]\n [-v [{DEBUG,INFO,WARNING}]] [--processes NBR]\n [-f]\n in_sh out_bingham\n\nScript for fitting a Bingham distribution to each fODF lobe, as described\nin [1].\n\nThe Bingham fit is saved, with each Bingham distribution described by 7\ncoefficients (for example, for a maximum number of lobes of 5, the number\nof coefficients is 7 x 5 = 35 -- less than the number of coefficients for\nSH of maximum order 8).\n\nUsing 12 threads, the execution takes approximately 30 minutes for a brain with\n1mm isotropic resolution.\n\nFormerly: scil_fit_bingham_to_fodf.py\n\npositional arguments:\n in_sh Input SH image.\n out_bingham Output Bingham functions image.\n\noptions:\n -h, --help show this help message and exit\n --max_lobes MAX_LOBES\n Maximum number of lobes per voxel to extract. [5]\n --at AT Absolute threshold for peaks extraction. [0.0]\n --rt RT Relative threshold for peaks extraction. [0.1]\n --min_sep_angle MIN_SEP_ANGLE\n Minimum separation angle between two peaks. [25.0]\n --max_fit_angle MAX_FIT_ANGLE\n Maximum distance in degrees around a peak direction for fitting the Bingham function. [15.0]\n --mask MASK Optional mask file. Only SH inside the mask are fitted.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -f Force overwriting of the output files.\n\n[1] T. W. Riffert, J. Schreiber, A. Anwander, and T. R. Kn\u00f6sche, \u201cBeyond\n fractional anisotropy: Extraction of bundle-specific structural metrics\n from crossing fiber models,\u201d NeuroImage, vol. 100, pp. 176-191, Oct. 2014,\n doi: 10.1016/j.neuroimage.2014.06.015.\n\n[2] J. Schreiber, T. Riffert, A. Anwander, and T. R. Kn\u00f6sche, \u201cPlausibility\n Tracking: A method to evaluate anatomical connectivity and microstructural\n properties along fiber pathways,\u201d NeuroImage, vol. 90, pp. 163-178, Apr.\n 2014, doi: 10.1016/j.neuroimage.2014.01.002.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "methods", - "method" - ], - [ - "average", - "per" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "total", - "90" - ], - [ - "work", - "and" - ], - [ - "lobe", - "lobes", - "lobes" - ], - [ - "considered", - "are" - ], - [ - "order", - "order" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "fibre", - "fiber" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "direction", - "direction" - ], - [ - "specific", - "specific" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "anatomical", - "anatomical" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "defined", - "function", - "functional", - "functions", - "function" - ], - [ - "pathway", - "pathways", - "pathways" - ], - [ - "long", - "with" - ], - [ - "structural", - "structural" - ], - [ - "methods", - "using" - ], - [ - "area", - "main", - "along" - ], - [ - "long", - "than" - ], - [ - "highly", - "less" - ], - [ - "degree", - "degrees" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "individual", - "each" - ], - [ - "long", - "two" - ], - [ - "step", - "start" - ], - [ - "connectivity", - "connectivity" - ], - [ - "total", - "100" - ], - [ - "lobe", - "lobes", - "occipital", - "parietal", - "lobe" - ], - [ - "function", - "functions", - "functions" - ], - [ - "bundles", - "bundle" - ], - [ - "data", - "tracking", - "tracking" - ], - [ - "examine", - "evaluate" - ] - ], - "keywords": [] - }, - { - "name": "scil_freewater_maps", - "docstring": "Compute Free Water maps [1] using AMICO.\nThis script supports both single and multi-shell data.\n\nFormerly: scil_compute_freewater.py", - "help": "usage: scil_freewater_maps.py [-h] [--mask MASK] [--out_dir OUT_DIR]\n [--b_thr B_THR] [--para_diff PARA_DIFF]\n [--iso_diff ISO_DIFF]\n [--perp_diff_min PERP_DIFF_MIN]\n [--perp_diff_max PERP_DIFF_MAX]\n [--lambda1 LAMBDA1] [--lambda2 LAMBDA2]\n [--save_kernels DIRECTORY | --load_kernels DIRECTORY]\n [--compute_only] [--mouse] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec\n\nCompute Free Water maps [1] using AMICO.\nThis script supports both single and multi-shell data.\n\nFormerly: scil_compute_freewater.py\n\npositional arguments:\n in_dwi DWI file.\n in_bval b-values filename, in FSL format (.bval).\n in_bvec b-vectors filename, in FSL format (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Brain mask filename.\n --out_dir OUT_DIR Output directory for the Free Water results. [results]\n --b_thr B_THR Limit value to consider that a b-value is on an\n existing shell. Above this limit, the b-value is\n placed on a new shell. This includes b0s values.\n --mouse If set, use mouse fitting profile.\n --processes NBR Number of sub-processes to start. Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n\nModel options:\n --para_diff PARA_DIFF\n Axial diffusivity (AD) in the CC. [0.0015]\n --iso_diff ISO_DIFF Mean diffusivity (MD) in ventricles. [0.003]\n --perp_diff_min PERP_DIFF_MIN\n Radial diffusivity (RD) minimum. [0.0001]\n --perp_diff_max PERP_DIFF_MAX\n Radial diffusivity (RD) maximum. [0.0007]\n --lambda1 LAMBDA1 First regularization parameter. [0.0]\n --lambda2 LAMBDA2 Second regularization parameter. [0.25]\n\nKernels options:\n --save_kernels DIRECTORY\n Output directory for the COMMIT kernels.\n --load_kernels DIRECTORY\n Input directory where the COMMIT kernels are located.\n --compute_only Compute kernels only, --save_kernels must be used.\n\nReference:\n [1] Pasternak 0, Sochen N, Gur Y, Intrator N, Assaf Y.\n Free water elimination and mapping from diffusion mri.\n Magn Reson Med. 62 (3) (2009) 717-730.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "parameter", - "parameters", - "parameter" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "limiting", - "limit" - ], - [ - "maps", - "mapping" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "area", - "located" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "imaging", - "mri" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "supported", - "supports" - ], - [ - "axial", - "axial" - ], - [ - "methods", - "using" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "area", - "work", - "where" - ], - [ - "result", - "results" - ], - [ - "level", - "above" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "step", - "start" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "maps", - "maps" - ], - [ - "considered", - "possibility", - "question", - "step", - "consider" - ] - ], - "keywords": [] - }, - { - "name": "scil_freewater_priors", - "docstring": "Synonym for scil_NODDI_priors.py", - "help": "usage: scil_freewater_priors.py [-h]\n [--fa_min_single_fiber FA_MIN_SINGLE_FIBER]\n [--fa_max_ventricles FA_MAX_VENTRICLES]\n [--md_min_ventricles MD_MIN_VENTRICLES]\n [--roi_radius ROI_RADIUS]\n [--roi_center pos pos pos]\n [--out_txt_1fiber_para FILE]\n [--out_txt_1fiber_perp FILE]\n [--out_mask_1fiber FILE]\n [--out_txt_ventricles FILE]\n [--out_mask_ventricles FILE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_FA in_AD in_RD in_MD\n\nCompute the axial (para_diff), radial (perp_diff), and mean (iso_diff)\ndiffusivity priors for NODDI.\n\nFormerly: scil_compute_NODDI_priors.py\n\npositional arguments:\n in_FA Path to the FA volume.\n in_AD Path to the axial diffusivity (AD) volume.\n in_RD Path to the radial diffusivity (RD) volume.\n in_MD Path to the mean diffusivity (MD) volume.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics options:\n --fa_min_single_fiber FA_MIN_SINGLE_FIBER\n Minimal threshold of FA (voxels above that threshold are considered in \n the single fiber mask). [0.7]\n --fa_max_ventricles FA_MAX_VENTRICLES\n Maximal threshold of FA (voxels under that threshold are considered in \n the ventricles). [0.1]\n --md_min_ventricles MD_MIN_VENTRICLES\n Minimal threshold of MD in mm2/s (voxels above that threshold are considered \n for in the ventricles). [0.003]\n\nRegions options:\n --roi_radius ROI_RADIUS\n Radius of the region used to estimate the priors. The roi will be a cube spanning \n from ROI_CENTER in each direction. [20]\n --roi_center pos pos pos\n Center of the roi of size roi_radius used to estimate the priors; a 3-value coordinate. \n If not set, uses the center of the 3D volume.\n\nOutputs:\n --out_txt_1fiber_para FILE\n Output path for the text file containing the single fiber average value of AD.\n If not set, the file will not be saved.\n --out_txt_1fiber_perp FILE\n Output path for the text file containing the single fiber average value of RD.\n If not set, the file will not be saved.\n --out_mask_1fiber FILE\n Output path for single fiber mask. If not set, the mask will not be saved.\n --out_txt_ventricles FILE\n Output path for the text file containing the ventricles average value of MD.\n If not set, the file will not be saved.\n --out_mask_ventricles FILE\n Output path for the ventricule mask.\n If not set, the mask will not be saved.\n\nReference:\n [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC.\n NODDI: practical in vivo neurite orientation dispersion and density\n imaging of the human brain. NeuroImage. 2012 Jul 16;61:1000-16.\n", - "synonyms": [ - [ - "animal", - "human", - "human" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "orientation", - "orientation" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "fibre", - "fiber" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "areas", - "region", - "regions", - "regions" - ], - [ - "direction", - "direction" - ], - [ - "order", - "set" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "average", - "average" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "axial", - "axial" - ], - [ - "methods", - "using" - ], - [ - "vivo", - "vivo" - ], - [ - "imaging", - "imaging" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "individual", - "each" - ], - [ - "larger", - "size", - "size" - ], - [ - "lack", - "minimal" - ], - [ - "level", - "above" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "area", - "areas", - "region", - "regions", - "region" - ] - ], - "keywords": [] - }, - { - "name": "scil_frf_mean", - "docstring": "Compute the mean Fiber Response Function from a set of individually\ncomputed Response Functions.\n\nThe FRF files are obtained from scil_frf_ssst.py, scil_frf_msmt.py in the\ncase of multi-shell data or scil_frf_memsmt.py in the case of multi-encoding\nmulti-shell data.\n\nFormerly: scil_compute_mean_frf.py", - "help": "usage: scil_frf_mean.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n list [list ...] file\n\nCompute the mean Fiber Response Function from a set of individually\ncomputed Response Functions.\n\nThe FRF files are obtained from scil_frf_ssst.py, scil_frf_msmt.py in the\ncase of multi-shell data or scil_frf_memsmt.py in the case of multi-encoding\nmulti-shell data.\n\nFormerly: scil_compute_mean_frf.py\n\npositional arguments:\n list List of FRF filepaths.\n file Path of the output mean FRF file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "long", - "a" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "work", - "and" - ], - [ - "held", - "on" - ], - [ - "methods", - "using" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "considered", - "are" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "function", - "functions", - "functions" - ], - [ - "matter", - "question", - "case" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "action", - "response" - ], - [ - "left", - "result", - "when" - ], - [ - "held", - "in" - ], - [ - "considered", - "is" - ], - [ - "defined", - "function", - "functional", - "functions", - "function" - ], - [ - "order", - "set" - ] - ], - "keywords": [] - }, - { - "name": "scil_frf_memsmt", - "docstring": "Script to estimate response functions for multi-encoding multi-shell\nmulti-tissue (memsmt) constrained spherical deconvolution. In order to operate,\nthe script only needs the data from one type of b-tensor encoding. However,\ngiving only a spherical one will not produce good fiber response functions, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable. Moreover, all of `--in_dwis`, `--in_bvals`, `--in_bvecs` and\n`--in_bdeltas` must have the same number of arguments. Be sure to keep the\nsame order of encodings throughout all these inputs and to set `--in_bdeltas`\naccordingly (IMPORTANT).\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n\nIn the wm, we compute the response function in each voxels where\nthe FA is superior at threshold_fa_wm.\n\nIn the gm (or csf), we compute the response function in each voxels where\nthe FA is below at threshold_fa_gm (or threshold_fa_csf) and where\nthe MD is below threshold_md_gm (or threshold_md_csf).\n\n>>> scil_frf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_frf.py", - "help": "usage: scil_frf_memsmt.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals\n IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS\n [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5}\n [{0,1,-0.5,0.5} ...] [--mask MASK]\n [--mask_wm MASK_WM] [--mask_gm MASK_GM]\n [--mask_csf MASK_CSF] [--fa_thr_wm FA_THR_WM]\n [--fa_thr_gm FA_THR_GM] [--fa_thr_csf FA_THR_CSF]\n [--md_thr_gm MD_THR_GM] [--md_thr_csf MD_THR_CSF]\n [--min_nvox MIN_NVOX] [--tolerance tol]\n [--skip_b0_check] [--dti_bval_limit DTI_BVAL_LIMIT]\n [--roi_radii ROI_RADII [ROI_RADII ...]]\n [--roi_center tuple(3) tuple(3) tuple(3)]\n [--wm_frf_mask file] [--gm_frf_mask file]\n [--csf_frf_mask file] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n out_wm_frf out_gm_frf out_csf_frf\n\nScript to estimate response functions for multi-encoding multi-shell\nmulti-tissue (memsmt) constrained spherical deconvolution. In order to operate,\nthe script only needs the data from one type of b-tensor encoding. However,\ngiving only a spherical one will not produce good fiber response functions, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable. Moreover, all of `--in_dwis`, `--in_bvals`, `--in_bvecs` and\n`--in_bdeltas` must have the same number of arguments. Be sure to keep the\nsame order of encodings throughout all these inputs and to set `--in_bdeltas`\naccordingly (IMPORTANT).\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n\nIn the wm, we compute the response function in each voxels where\nthe FA is superior at threshold_fa_wm.\n\nIn the gm (or csf), we compute the response function in each voxels where\nthe FA is below at threshold_fa_gm (or threshold_fa_csf) and where\nthe MD is below threshold_md_gm (or threshold_md_csf).\n\n>>> scil_frf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_frf.py\n\npositional arguments:\n out_wm_frf Path to the output WM frf file, in .txt format.\n out_gm_frf Path to the output GM frf file, in .txt format.\n out_csf_frf Path to the output CSF frf file, in .txt format.\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n Path to the input diffusion volume for each b-tensor encoding type.\n --in_bvals IN_BVALS [IN_BVALS ...]\n Path to the bval file, in FSL format, for each b-tensor encoding type.\n --in_bvecs IN_BVECS [IN_BVECS ...]\n Path to the bvec file, in FSL format, for each b-tensor encoding type.\n --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...]\n Value of b_delta for each b-tensor encoding type, in the same order as \n dwi, bval and bvec inputs.\n --mask MASK Path to a binary mask. Only the data inside the mask will be used for\n computations and reconstruction. Useful if no tissue masks are available.\n --mask_wm MASK_WM Path to the input WM mask file, used to improve the final WM frf mask.\n --mask_gm MASK_GM Path to the input GM mask file, used to improve the final GM frf mask.\n --mask_csf MASK_CSF Path to the input CSF mask file, used to improve the final CSF frf mask.\n --fa_thr_wm FA_THR_WM\n If supplied, use this threshold to select single WM fiber voxels from \n the FA inside the WM mask defined by mask_wm. \n Each voxel above this threshold will be selected. [0.7]\n --fa_thr_gm FA_THR_GM\n If supplied, use this threshold to select GM voxels from the FA inside \n the GM mask defined by mask_gm. \n Each voxel below this threshold will be selected. [0.2]\n --fa_thr_csf FA_THR_CSF\n If supplied, use this threshold to select CSF voxels from the FA inside \n the CSF mask defined by mask_csf. \n Each voxel below this threshold will be selected. [0.1]\n --md_thr_gm MD_THR_GM\n If supplied, use this threshold to select GM voxels from the MD inside \n the GM mask defined by mask_gm. \n Each voxel below this threshold will be selected. [0.0007]\n --md_thr_csf MD_THR_CSF\n If supplied, use this threshold to select CSF voxels from the MD inside \n the CSF mask defined by mask_csf. \n Each voxel below this threshold will be selected. [0.003]\n --min_nvox MIN_NVOX Minimal number of voxels needed for each tissue masks in order to \n proceed to frf estimation. [100]\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --dti_bval_limit DTI_BVAL_LIMIT\n The highest b-value taken for the DTI model. [1200]\n --roi_radii ROI_RADII [ROI_RADII ...]\n If supplied, use those radii to select a cuboid roi to estimate the \n response functions. The roi will be a cuboid spanning from the middle of \n the volume in each direction with the different radii. The type is either \n an int (e.g. --roi_radii 10) or an array-like (3,) (e.g. --roi_radii 20 30 10). [[20]]\n --roi_center tuple(3) tuple(3) tuple(3)\n If supplied, use this center to span the cuboid roi using roi_radii. \n [center of the 3D volume] (e.g. --roi_center 66 79 79)\n --wm_frf_mask file Path to the output WM frf mask file, the voxels used to compute the WM frf.\n --gm_frf_mask file Path to the output GM frf mask file, the voxels used to compute the GM frf.\n --csf_frf_mask file Path to the output CSF frf mask file, the voxels used to compute the CSF frf.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "individual", - "specific", - "unique", - "variety", - "different" - ], - [ - "total", - "number" - ], - [ - "true", - "always" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "matter", - "question", - "thinking", - "understand" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "individuals", - "those" - ], - [ - "work", - "and" - ], - [ - "brain", - "tissue" - ], - [ - "considered", - "are" - ], - [ - "clear", - "considered", - "highly", - "long", - "matter", - "true", - "work", - "very" - ], - [ - "order", - "order" - ], - [ - "result", - "moreover" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "work", - "working", - "work" - ], - [ - "left", - "from" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "work", - "find" - ], - [ - "direction", - "direction" - ], - [ - "order", - "set" - ], - [ - "clear", - "matter", - "true", - "seems" - ], - [ - "long", - "have" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "highest", - "level", - "highest" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "tool", - "useful" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "inferior", - "superior", - "superior" - ], - [ - "diffusion", - "diffusion" - ], - [ - "imaging", - "mri" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "action", - "response" - ], - [ - "held", - "in" - ], - [ - "defined", - "function", - "functional", - "functions", - "function" - ], - [ - "create", - "produce" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "clear", - "long", - "matter", - "result", - "work", - "so" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "clear", - "experience", - "thinking", - "true", - "good" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "variety", - "work", - "other" - ], - [ - "clear", - "held", - "work", - "taken" - ], - [ - "future", - "current" - ], - [ - "exist", - "exists" - ], - [ - "defined", - "function", - "defined" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "supported", - "supported" - ], - [ - "variance", - "variance" - ], - [ - "shape", - "shapes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "middle", - "middle" - ], - [ - "area", - "work", - "where" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "blue", - "dark", - "green", - "grey", - "white", - "gray" - ], - [ - "lack", - "minimal" - ], - [ - "level", - "above" - ], - [ - "considered", - "form", - "meaning", - "order", - "result", - "thus" - ], - [ - "order", - "allow" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "step", - "true", - "work", - "yet" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "total", - "100" - ], - [ - "function", - "functions", - "functions" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "based", - "based" - ], - [ - "clear", - "matter", - "question", - "thinking", - "sure" - ], - [ - "clear", - "considered", - "result", - "however" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "shape", - "structure", - "shape" - ], - [ - "indicating", - "suggest" - ], - [ - "considered", - "key", - "work", - "important" - ] - ], - "keywords": [] - }, - { - "name": "scil_frf_msmt", - "docstring": "Compute response functions for multi-shell multi-tissue (MSMT) constrained\nspherical deconvolution from DWI data.\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n - In the wm, we compute the response function in each voxel where the FA is\n superior at threshold_fa_wm.\n - In the gm (or csf), we compute the response function in each voxel where\n the FA is below at threshold_fa_gm (or threshold_fa_csf) and where the MD\n is below threshold_md_gm (or threshold_md_csf).\n\nWe output one response function file for each tissue, containing the response\nfunction for each b-value (arranged by lines). These are saved as the diagonal\nof the axis-symmetric diffusion tensor (3 e-values) and a mean b0 value.\nFor example, a typical wm_frf is [15e-4, 4e-4, 4e-4, 700], where the tensor\ne-values are (15,4,4)x10^-4 mm^2/s and the mean b0 is 700.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical deconvolution\nfor improved analysis of multi-shell diffusion MRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_frf.py", - "help": "usage: scil_frf_msmt.py [-h] [--mask MASK] [--mask_wm MASK_WM]\n [--mask_gm MASK_GM] [--mask_csf MASK_CSF]\n [--fa_thr_wm FA_THR_WM] [--fa_thr_gm FA_THR_GM]\n [--fa_thr_csf FA_THR_CSF] [--md_thr_gm MD_THR_GM]\n [--md_thr_csf MD_THR_CSF] [--min_nvox MIN_NVOX]\n [--tolerance TOLERANCE] [--skip_b0_check]\n [--dti_bval_limit DTI_BVAL_LIMIT]\n [--roi_radii ROI_RADII [ROI_RADII ...]]\n [--roi_center tuple(3) tuple(3) tuple(3)]\n [--wm_frf_mask file] [--gm_frf_mask file]\n [--csf_frf_mask file] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_wm_frf out_gm_frf\n out_csf_frf\n\nCompute response functions for multi-shell multi-tissue (MSMT) constrained\nspherical deconvolution from DWI data.\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n - In the wm, we compute the response function in each voxel where the FA is\n superior at threshold_fa_wm.\n - In the gm (or csf), we compute the response function in each voxel where\n the FA is below at threshold_fa_gm (or threshold_fa_csf) and where the MD\n is below threshold_md_gm (or threshold_md_csf).\n\nWe output one response function file for each tissue, containing the response\nfunction for each b-value (arranged by lines). These are saved as the diagonal\nof the axis-symmetric diffusion tensor (3 e-values) and a mean b0 value.\nFor example, a typical wm_frf is [15e-4, 4e-4, 4e-4, 700], where the tensor\ne-values are (15,4,4)x10^-4 mm^2/s and the mean b0 is 700.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical deconvolution\nfor improved analysis of multi-shell diffusion MRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_frf.py\n\npositional arguments:\n in_dwi Path to the input diffusion volume.\n in_bval Path to the bval file, in FSL format.\n in_bvec Path to the bvec file, in FSL format.\n out_wm_frf Path to the output WM frf file, in .txt format.\n out_gm_frf Path to the output GM frf file, in .txt format.\n out_csf_frf Path to the output CSF frf file, in .txt format.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to a binary mask. Only the data inside the mask\n will be used for computations and reconstruction.\n Useful if no tissue masks are available.\n --mask_wm MASK_WM Path to the input WM mask file, used to improve the\n final WM frf mask.\n --mask_gm MASK_GM Path to the input GM mask file, used to improve the\n final GM frf mask.\n --mask_csf MASK_CSF Path to the input CSF mask file, used to improve the\n final CSF frf mask.\n --fa_thr_wm FA_THR_WM\n If supplied, use this threshold to select single WM\n fiber voxels from the FA inside the WM mask defined by\n mask_wm. Each voxel above this threshold will be\n selected. [0.7]\n --fa_thr_gm FA_THR_GM\n If supplied, use this threshold to select GM voxels\n from the FA inside the GM mask defined by mask_gm.\n Each voxel below this threshold will be selected.\n [0.2]\n --fa_thr_csf FA_THR_CSF\n If supplied, use this threshold to select CSF voxels\n from the FA inside the CSF mask defined by mask_csf.\n Each voxel below this threshold will be selected.\n [0.1]\n --md_thr_gm MD_THR_GM\n If supplied, use this threshold to select GM voxels\n from the MD inside the GM mask defined by mask_gm.\n Each voxel below this threshold will be selected.\n [0.0007]\n --md_thr_csf MD_THR_CSF\n If supplied, use this threshold to select CSF voxels\n from the MD inside the CSF mask defined by mask_csf.\n Each voxel below this threshold will be selected.\n [0.003]\n --min_nvox MIN_NVOX Minimal number of voxels needed for each tissue masks\n in order to proceed to frf estimation. [100]\n --tolerance TOLERANCE\n The tolerated gap between the b-values to extract and\n the current b-value. [20]\n --skip_b0_check By default, we supervise that at least one b0 exists\n in your data (i.e. b-values below the default\n --tolerance). Use this option to allow continuing even\n if the minimum b-value is suspiciously high. Use with\n care, and only if you understand your data.\n --dti_bval_limit DTI_BVAL_LIMIT\n The highest b-value taken for the DTI model. [1200]\n --roi_radii ROI_RADII [ROI_RADII ...]\n If supplied, use those radii to select a cuboid roi to\n estimate the response functions. The roi will be a\n cuboid spanning from the middle of the volume in each\n direction with the different radii. The type is either\n an int (e.g. --roi_radii 10) or an array-like (3,)\n (e.g. --roi_radii 20 30 10). [[20]]\n --roi_center tuple(3) tuple(3) tuple(3)\n If supplied, use this center to span the cuboid roi\n using roi_radii. [center of the 3D volume] (e.g.\n --roi_center 66 79 79)\n --wm_frf_mask file Path to the output WM frf mask file, the voxels used\n to compute the WM frf.\n --gm_frf_mask file Path to the output GM frf mask file, the voxels used\n to compute the GM frf.\n --csf_frf_mask file Path to the output CSF frf mask file, the voxels used\n to compute the CSF frf.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "individual", - "specific", - "unique", - "variety", - "different" - ], - [ - "total", - "number" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "matter", - "question", - "thinking", - "understand" - ], - [ - "individuals", - "those" - ], - [ - "work", - "and" - ], - [ - "brain", - "tissue" - ], - [ - "considered", - "are" - ], - [ - "order", - "order" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "methods", - "use" - ], - [ - "direction", - "direction" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "highest", - "level", - "highest" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "tool", - "useful" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "inferior", - "superior", - "superior" - ], - [ - "diffusion", - "diffusion" - ], - [ - "imaging", - "mri" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "contrast", - "typical" - ], - [ - "action", - "response" - ], - [ - "held", - "in" - ], - [ - "defined", - "function", - "functional", - "functions", - "function" - ], - [ - "long", - "with" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "clear", - "held", - "work", - "taken" - ], - [ - "future", - "current" - ], - [ - "exist", - "exists" - ], - [ - "defined", - "function", - "defined" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "middle", - "middle" - ], - [ - "area", - "work", - "where" - ], - [ - "individual", - "each" - ], - [ - "blue", - "dark", - "green", - "grey", - "white", - "gray" - ], - [ - "lack", - "minimal" - ], - [ - "level", - "above" - ], - [ - "order", - "allow" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "total", - "100" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "function", - "functions", - "functions" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "based", - "based" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_frf_set_diffusivities", - "docstring": "Replace the fiber response function in the FRF file.\nUse this script when you want to use a fixed response function\nand keep the mean b0.\n\nThe FRF file is obtained from scil_frf_ssst.py or scil_frf_msmt.py in the case\nof multi-shell data.\n\nFormerly: scil_set_response_function.py", - "help": "usage: scil_frf_set_diffusivities.py [-h] [--no_factor]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n input new_frf output\n\nReplace the fiber response function in the FRF file.\nUse this script when you want to use a fixed response function\nand keep the mean b0.\n\nThe FRF file is obtained from scil_frf_ssst.py or scil_frf_msmt.py in the case\nof multi-shell data.\n\nFormerly: scil_set_response_function.py\n\npositional arguments:\n input Path of the FRF file.\n new_frf New response function given as a tuple. We will replace the \n response function in frf_file with this fiber response \n function x 10**-4 (e.g. 15,4,4). \n If multi-shell, write the first shell,then the second shell, \n and the third, etc. (e.g. 15,4,4,13,5,5,12,5,5).\n output Path of the new FRF file.\n\noptions:\n -h, --help show this help message and exit\n --no_factor If supplied, the fiber response function is\n evaluated without the x 10**-4 factor. [False].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "future", - "will" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "action", - "response" - ], - [ - "held", - "in" - ], - [ - "defined", - "function", - "functional", - "functions", - "function" - ], - [ - "long", - "with" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "methods", - "using" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "examined", - "evaluated" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "false", - "false" - ], - [ - "matter", - "question", - "case" - ] - ], - "keywords": [] - }, - { - "name": "scil_frf_ssst", - "docstring": "Compute a single Fiber Response Function from a DWI.\n\nA DTI fit is made, and voxels containing a single fiber population are\nfound using a threshold on the FA.\n\nFormerly: scil_compute_ssst_frf.py", - "help": "usage: scil_frf_ssst.py [-h] [--mask MASK] [--mask_wm MASK_WM]\n [--fa_thresh FA_THRESH]\n [--min_fa_thresh MIN_FA_THRESH] [--min_nvox MIN_NVOX]\n [--roi_radii ROI_RADII [ROI_RADII ...]]\n [--roi_center tuple(3) tuple(3) tuple(3)]\n [--b0_threshold thr] [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec frf_file\n\nCompute a single Fiber Response Function from a DWI.\n\nA DTI fit is made, and voxels containing a single fiber population are\nfound using a threshold on the FA.\n\nFormerly: scil_compute_ssst_frf.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n frf_file Path to the output FRF file, in .txt format, saved by Numpy.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to a binary mask. Only the data inside the mask will be used \n for computations and reconstruction. Useful if no white matter mask \n is available.\n --mask_wm MASK_WM Path to a binary white matter mask. Only the data inside this mask \n and above the threshold defined by --fa_thresh will be used to estimate the \n fiber response function.\n --fa_thresh FA_THRESH\n If supplied, use this threshold as the initial threshold to select \n single fiber voxels. [0.7]\n --min_fa_thresh MIN_FA_THRESH\n If supplied, this is the minimal value that will be tried when looking \n for single fiber voxels. [0.5]\n --min_nvox MIN_NVOX Minimal number of voxels needing to be identified as single fiber voxels \n in the automatic estimation. [300]\n --roi_radii ROI_RADII [ROI_RADII ...]\n If supplied, use those radii to select a cuboid roi to estimate the \n response functions. The roi will be a cuboid spanning from the middle of \n the volume in each direction with the different radii. The type is either \n an int (e.g. --roi_radii 10) or an array-like (3,) (e.g. --roi_radii 20 30 10). [[20]]\n --roi_center tuple(3) tuple(3) tuple(3)\n If supplied, use this center to span the roi of size roi_radius. [center of the 3D volume]\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences: [1] Tournier et al. NeuroImage 2007\n", - "synonyms": [ - [ - "individual", - "specific", - "unique", - "variety", - "different" - ], - [ - "total", - "number" - ], - [ - "step", - "continue" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "individuals", - "those" - ], - [ - "matter", - "question", - "thinking", - "understand" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "fibre", - "fiber" - ], - [ - "unknown", - "identified" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "direction", - "direction" - ], - [ - "work", - "find" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "tool", - "useful" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "action", - "response" - ], - [ - "held", - "in" - ], - [ - "defined", - "function", - "functional", - "functions", - "function" - ], - [ - "long", - "with" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "population", - "population" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "exist", - "exists" - ], - [ - "defined", - "function", - "defined" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "thinking", - "working", - "looking" - ], - [ - "considered", - "is" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "middle", - "middle" - ], - [ - "individual", - "each" - ], - [ - "larger", - "size", - "size" - ], - [ - "lack", - "minimal" - ], - [ - "level", - "above" - ], - [ - "order", - "allow" - ], - [ - "clear", - "work", - "made" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "function", - "functions", - "functions" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_get_version", - "docstring": "Give you information about your current scilpy installation.\nThis is useful for non-developers to give you the information\nneeded to reproduce your results, or to help debugging.\n\nIf you are experiencing a bug, please run this script and\nsend the output to the scilpy developers.", - "help": "usage: scil_get_version.py [-h] [--show_dependencies]\n [-v [{DEBUG,INFO,WARNING}]]\n\nGive you information about your current scilpy installation.\nThis is useful for non-developers to give you the information\nneeded to reproduce your results, or to help debugging.\n\nIf you are experiencing a bug, please run this script and\nsend the output to the scilpy developers.\n\noptions:\n -h, --help show this help message and exit\n --show_dependencies Show the dependencies of scilpy.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "thinking", - "you" - ], - [ - "result", - "results" - ], - [ - "long", - "a" - ], - [ - "tool", - "useful" - ], - [ - "work", - "and" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "methods", - "using" - ], - [ - "future", - "current" - ], - [ - "considered", - "are" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "is" - ], - [ - "clear", - "give" - ] - ], - "keywords": [] - }, - { - "name": "scil_gradients_apply_transform", - "docstring": "Transform bvecs using an affine/rigid transformation.\n\nFormerly: scil_apply_transform_to_bvecs.py.", - "help": "usage: scil_gradients_apply_transform.py [-h] [--inverse]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bvecs in_transfo out_bvecs\n\nTransform bvecs using an affine/rigid transformation.\n\nFormerly: scil_apply_transform_to_bvecs.py.\n\npositional arguments:\n in_bvecs Path of the bvec file, in FSL format\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_bvecs Output filename of the transformed bvecs.\n\noptions:\n -h, --help show this help message and exit\n --inverse Apply the inverse transformation.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "applied", - "apply" - ], - [ - "methods", - "using" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "matrices", - "matrix" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "left", - "result", - "when" - ], - [ - "held", - "in" - ], - [ - "considered", - "is" - ] - ], - "keywords": [] - }, - { - "name": "scil_gradients_convert", - "docstring": "Script to convert gradient tables between FSL and MRtrix formats.\n\nFormerly: scil_convert_gradients_mrtrix_to_fsl.py or\nscil_convert_gradients_fsl_to_mrtrix.py", - "help": "usage: scil_gradients_convert.py [-h] (--input_fsl | --input_mrtrix) [-f]\n [-v [{DEBUG,INFO,WARNING}]]\n GRADIENT_FILE(S) [GRADIENT_FILE(S) ...]\n output\n\nScript to convert gradient tables between FSL and MRtrix formats.\n\nFormerly: scil_convert_gradients_mrtrix_to_fsl.py or\nscil_convert_gradients_fsl_to_mrtrix.py\n\npositional arguments:\n GRADIENT_FILE(S) Path(s) to the gradient file(s). Either FSL (.bval, .bvec) or MRtrix (.b).\n output Basename of output without extension. Extension(s) will be added automatically (.b for MRtrix, .bval/.bvec for FSL.\n\noptions:\n -h, --help show this help message and exit\n --input_fsl FSL format.\n --input_mrtrix MRtrix format.\n -f Force overwriting of the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "methods", - "using" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_gradients_generate_sampling", - "docstring": "Generate multi-shell gradient sampling with various processing options. Helps\naccelerate gradients, optimize duty cycle and avoid artefacts.\n\nMulti-shell gradient sampling is generated as in [1]. The bvecs are then\nflipped to maximize spread for eddy current correction, b0s are interleaved at\nequal spacing and the non-b0 samples are finally shuffled to minimize the total\ndiffusion gradient amplitude over a few TR.\n\nFormerly: scil_generate_gradient_sampling.py", - "help": "usage: scil_gradients_generate_sampling.py [-h] [--eddy] [--duty]\n [--no_b0_start NO_B0_START | --b0_every B0_EVERY]\n [--b0_end] [--b0_value B0_VALUE]\n [--b0_philips]\n (--bvals bvals [bvals ...] | --b_lin_max B_LIN_MAX | --q_lin_max Q_LIN_MAX)\n (--fsl | --mrtrix)\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n nb_samples_per_shell\n [nb_samples_per_shell ...]\n out_basename\n\nGenerate multi-shell gradient sampling with various processing options. Helps\naccelerate gradients, optimize duty cycle and avoid artefacts.\n\nMulti-shell gradient sampling is generated as in [1]. The bvecs are then\nflipped to maximize spread for eddy current correction, b0s are interleaved at\nequal spacing and the non-b0 samples are finally shuffled to minimize the total\ndiffusion gradient amplitude over a few TR.\n\nFormerly: scil_generate_gradient_sampling.py\n\npositional arguments:\n nb_samples_per_shell Number of samples on each non b0 shell. \n If multishell, provide a number per shell.\n out_basename Gradient sampling output basename (don't include extension).\n Please add options --fsl and/or --mrtrix below.\n\noptions:\n -h, --help show this help message and exit\n --eddy If set, we apply eddy optimization.\n B-vectors are flipped to be well spread without symmetry.\n --duty If set, we apply duty cycle optimization. \n B-vectors are shuffled to reduce consecutive colinearity in the samples. [False]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nb0 acquisitions:\n Default if you add no option is to have a b0 at the start.\n\n --no_b0_start NO_B0_START\n If set, do not add a b0 at the beginning. \n --b0_every B0_EVERY Interleave a b0 every n=b0_every values. Starts after the first b0 \n (cannot be used with --no_b0_start). Must be an integer >= 1.\n --b0_end If set, adds a b0 as last sample.\n --b0_value B0_VALUE b-value of the b0s. [0.0]\n --b0_philips If set, replaces values of b0s bvecs by existing bvecs for Philips handling.\n\nNon-b0 acquisitions:\n --bvals bvals [bvals ...]\n bval of each non-b0 shell.\n --b_lin_max B_LIN_MAX\n b-max for linear bval distribution in *b*.\n --q_lin_max Q_LIN_MAX\n b-max for linear bval distribution in *q*; \n the square root of b-values will be linearly distributed..\n\nSave as:\n --fsl Save in FSL format (.bvec/.bval).\n --mrtrix Save in MRtrix format (.b).\n\nReferences: [1] Emmanuel Caruyer, Christophe Lenglet, Guillermo Sapiro,\nRachid Deriche. Design of multishell gradient sampling with uniform coverage\nin diffusion MRI. Magnetic Resonance in Medicine, Wiley, 2013, 69 (6),\npp. 1534-1540. \n \n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "create", - "generate" - ], - [ - "average", - "per" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "algorithm", - "optimization" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "order", - "set" - ], - [ - "long", - "over" - ], - [ - "long", - "have" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "processing", - "processing" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "applied", - "apply" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "possibility", - "avoid" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "specific", - "variety", - "various" - ], - [ - "diffusion", - "diffusion" - ], - [ - "imaging", - "mri" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "last" - ], - [ - "held", - "in" - ], - [ - "variety", - "include" - ], - [ - "long", - "with" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "create", - "provide" - ], - [ - "methods", - "using" - ], - [ - "future", - "current" - ], - [ - "left", - "after" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "individual", - "each" - ], - [ - "step", - "start" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "false", - "false" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "increase", - "reduce" - ], - [ - "considered", - "experience", - "large", - "long", - "result", - "variety", - "work", - "working", - "well" - ], - [ - "increase", - "total", - "total" - ], - [ - "large", - "long", - "few" - ], - [ - "exist", - "cannot" - ] - ], - "keywords": [] - }, - { - "name": "scil_gradients_modify_axes", - "docstring": "Flip (ex, x --> -x) or swap (ex, x <-> y) chosen axes of the gradient sampling\nmatrix. Result will be saved in the same format as input gradient sampling\nfile.\n\nFormerly: scil_flip_gradients.py or scil_swap_gradient_axis.py", - "help": "usage: scil_gradients_modify_axes.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_gradient_sampling_file\n out_gradient_sampling_file\n {1,2,3,-1,-2,-3} {1,2,3,-1,-2,-3}\n {1,2,3,-1,-2,-3}\n\nFlip (ex, x --> -x) or swap (ex, x <-> y) chosen axes of the gradient sampling\nmatrix. Result will be saved in the same format as input gradient sampling\nfile.\n\nFormerly: scil_flip_gradients.py or scil_swap_gradient_axis.py\n\npositional arguments:\n in_gradient_sampling_file\n Path to gradient sampling file. (.bvec or .b)\n out_gradient_sampling_file\n Where to save the flipped gradient sampling file.Extension (.bvec or .b) must be the same as in_gradient_sampling_file\n {1,2,3,-1,-2,-3} The final order of the axes, compared to original order: x=1 y=2 z=3.\n Ex: to only flip y: 1 -2 3.\n Ex: to only swap x and y: 2 1 3.\n Ex: to first flip x, then permute all three axes: 3 -1 2.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "order", - "order" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "lack", - "loss", - "result", - "result" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "methods", - "using" - ], - [ - "matrices", - "matrix" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "represent", - "chosen" - ], - [ - "area", - "work", - "where" - ], - [ - "average", - "compared" - ], - [ - "work", - "all" - ] - ], - "keywords": [] - }, - { - "name": "scil_gradients_round_bvals", - "docstring": "Select b-values on specific b-value shells.\n\nWith the --tolerance argument, this is useful for sampling schemes where\nb-values of a shell are not all identical. Adjust the tolerance to vary the\naccepted interval around the targetted b-value.\n\nFor example, a b-value of 2000 and a tolerance of 20 will select all b-values\nbetween [1980, 2020] and round them to the value of 2000.\n\n>> scil_gradients_round_bvals.py bvals 0 1000 2000 newbvals --tolerance 20\n\nFormerly: scil_resample_bvals.py", - "help": "usage: scil_gradients_round_bvals.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bval shells [shells ...] out_bval\n tolerance\n\nSelect b-values on specific b-value shells.\n\nWith the --tolerance argument, this is useful for sampling schemes where\nb-values of a shell are not all identical. Adjust the tolerance to vary the\naccepted interval around the targetted b-value.\n\nFor example, a b-value of 2000 and a tolerance of 20 will select all b-values\nbetween [1980, 2020] and round them to the value of 2000.\n\n>> scil_gradients_round_bvals.py bvals 0 1000 2000 newbvals --tolerance 20\n\nFormerly: scil_resample_bvals.py\n\npositional arguments:\n in_bval The b-values in FSL format.\n shells The list of expected shells. For example 0 1000 2000.\n All b-values in the b_val file should correspond to one given shell (up to the tolerance).\n out_bval The name of the output b-values.\n tolerance The tolerated gap between the b-values to extract and the \n actual b-values. Expecting an integer value. Comparison is \n strict: a b-value of 1010 with a tolerance of 10 is NOT \n included in shell 1000. Suggestion: 20.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "meaning", - "name" - ], - [ - "question", - "argument" - ], - [ - "specific", - "specific" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "tool", - "useful" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "contrast", - "comparison" - ], - [ - "methods", - "using" - ], - [ - "increase", - "expected" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "specific", - "actual" - ], - [ - "area", - "work", - "where" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ] - ], - "keywords": [] - }, - { - "name": "scil_gradients_validate_correct", - "docstring": "Detect sign flips and/or axes swaps in the gradients table from a fiber\ncoherence index [1]. The script takes as input the principal direction(s)\nat each voxel, the b-vectors and the fractional anisotropy map and outputs\na corrected b-vectors file.\n\nA typical pipeline could be:\n>>> scil_dti_metrics.py dwi.nii.gz bval bvec --not_all --fa fa.nii.gz\n --evecs peaks.nii.gz\n>>> scil_gradients_validate_correct.py bvec peaks_v1.nii.gz fa.nii.gz bvec_corr\n\nNote that peaks_v1.nii.gz is the file containing the direction associated\nto the highest eigenvalue at each voxel.\n\nIt is also possible to use a file containing multiple principal directions per\nvoxel, given that they are sorted by decreasing amplitude. In that case, the\nfirst direction (with the highest amplitude) will be chosen for validation.\nOnly 4D data is supported, so the directions must be stored in a single\ndimension. For example, peaks.nii.gz from scil_fodf_metrics.py could be used.\n\nFormerly: scil_validate_and_correct_bvecs.py", - "help": "usage: scil_gradients_validate_correct.py [-h] [--mask MASK]\n [--fa_threshold FA_THRESHOLD]\n [--column_wise]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bvec in_peaks in_FA out_bvec\n\nDetect sign flips and/or axes swaps in the gradients table from a fiber\ncoherence index [1]. The script takes as input the principal direction(s)\nat each voxel, the b-vectors and the fractional anisotropy map and outputs\na corrected b-vectors file.\n\nA typical pipeline could be:\n>>> scil_dti_metrics.py dwi.nii.gz bval bvec --not_all --fa fa.nii.gz\n --evecs peaks.nii.gz\n>>> scil_gradients_validate_correct.py bvec peaks_v1.nii.gz fa.nii.gz bvec_corr\n\nNote that peaks_v1.nii.gz is the file containing the direction associated\nto the highest eigenvalue at each voxel.\n\nIt is also possible to use a file containing multiple principal directions per\nvoxel, given that they are sorted by decreasing amplitude. In that case, the\nfirst direction (with the highest amplitude) will be chosen for validation.\nOnly 4D data is supported, so the directions must be stored in a single\ndimension. For example, peaks.nii.gz from scil_fodf_metrics.py could be used.\n\nFormerly: scil_validate_and_correct_bvecs.py\n\npositional arguments:\n in_bvec Path to bvec file.\n in_peaks Path to peaks file.\n in_FA Path to the fractional anisotropy file.\n out_bvec Path to corrected bvec file (FSL format).\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to an optional mask. If set, FA and Peaks will only be used inside the mask.\n --fa_threshold FA_THRESHOLD\n FA threshold. Only voxels with FA higher than fa_threshold will be considered. [0.2]\n --column_wise Specify if input peaks are column-wise (..., 3, N) instead of row-wise (..., N, 3).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Schilling KG, Yeh FC, Nath V, Hansen C, Williams O, Resnick S, Anderson AW,\nLandman BA. A fiber coherence index for quality control of B-table orientation\nin diffusion MRI scans. Magn Reson Imaging. 2019 May;58:82-89.\ndoi: 10.1016/j.mri.2019.01.018.\n", - "synonyms": [ - [ - "average", - "per" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "orientation", - "orientation" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "considered", - "are" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "validation", - "validation" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "methods", - "use" - ], - [ - "direction", - "direction" - ], - [ - "order", - "set" - ], - [ - "direction", - "directions" - ], - [ - "high", - "higher", - "increase", - "level", - "higher" - ], - [ - "highest", - "level", - "highest" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "imaging", - "scans" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "imaging", - "mri" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "contrast", - "typical" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "clear", - "long", - "matter", - "result", - "work", - "so" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "methods", - "using" - ], - [ - "order", - "work", - "instead" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "long", - "than" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "supported", - "supported" - ], - [ - "imaging", - "imaging" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "result", - "may" - ], - [ - "represent", - "chosen" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "lack", - "quality" - ], - [ - "matter", - "question", - "case" - ], - [ - "maps", - "map" - ], - [ - "principal", - "principal" - ], - [ - "clear", - "result", - "work", - "could" - ] - ], - "keywords": [] - }, - { - "name": "scil_gradients_validate_correct_eddy", - "docstring": "Validate and correct gradients from eddy outputs\nWith full AP-PA eddy outputs a full bvec bval (2x nb of dirs and bval)\nthat doesnt fit with the output dwi (1x nb of dir)\n\nFormerly: scil_validate_and_correct_eddy_gradients.py", - "help": "usage: scil_gradients_validate_correct_eddy.py [-h]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bvec in_bval nb_dirs\n out_bvec out_bval\n\nValidate and correct gradients from eddy outputs\nWith full AP-PA eddy outputs a full bvec bval (2x nb of dirs and bval)\nthat doesnt fit with the output dwi (1x nb of dir)\n\nFormerly: scil_validate_and_correct_eddy_gradients.py\n\npositional arguments:\n in_bvec In bvec file.\n in_bval In bval file.\n nb_dirs Number of directions per DWI.\n out_bvec Out bvec file.\n out_bval Out bval file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "direction", - "directions" - ], - [ - "long", - "a" - ], - [ - "clear", - "left", - "out" - ], - [ - "average", - "per" - ], - [ - "work", - "and" - ], - [ - "held", - "on" - ], - [ - "methods", - "using" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "left", - "result", - "when" - ], - [ - "held", - "in" - ], - [ - "considered", - "is" - ], - [ - "long", - "full" - ], - [ - "long", - "with" - ] - ], - "keywords": [] - }, - { - "name": "scil_header_print_info", - "docstring": "Print the raw header from the provided file or only the specified keys.\nSupport trk, nii and mgz files.\n\nFormerly: scil_print_header.py", - "help": "usage: scil_header_print_info.py [-h] [--keys KEYS [KEYS ...]]\n [--print_affine] [-v [{DEBUG,INFO,WARNING}]]\n in_file\n\nPrint the raw header from the provided file or only the specified keys.\nSupport trk, nii and mgz files.\n\nFormerly: scil_print_header.py\n\npositional arguments:\n in_file Input file (trk, nii and mgz).\n\noptions:\n -h, --help show this help message and exit\n --keys KEYS [KEYS ...]\n Print only the specified keys.\n --print_affine Print nibabel's affine.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "methods", - "using" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "is" - ], - [ - "parameters", - "specified" - ] - ], - "keywords": [] - }, - { - "name": "scil_header_validate_compatibility", - "docstring": "Will compare all input files against the first one for the compatibility\nof their spatial attributes.\n\nSpatial attributes are: affine, dimensions, voxel sizes and voxel order.\n\nFormerly: scil_verify_space_attributes_compatibility.py", - "help": "usage: scil_header_validate_compatibility.py [-h] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n in_files [in_files ...]\n\nWill compare all input files against the first one for the compatibility\nof their spatial attributes.\n\nSpatial attributes are: affine, dimensions, voxel sizes and voxel order.\n\nFormerly: scil_verify_space_attributes_compatibility.py\n\npositional arguments:\n in_files List of file to compare (trk, tck and nii/nii.gz).\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "voxel", - "voxel" - ], - [ - "work", - "and" - ], - [ - "held", - "on" - ], - [ - "methods", - "using" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "considered", - "are" - ], - [ - "work", - "all" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "order", - "order" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "future", - "will" - ], - [ - "action", - "against" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "size", - "sizes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "left", - "result", - "when" - ], - [ - "spatial", - "temporal", - "spatial" - ], - [ - "considered", - "is" - ] - ], - "keywords": [] - }, - { - "name": "scil_json_convert_entries_to_xlsx", - "docstring": "Convert a final aggregated json file to an Excel spreadsheet.\nTypically used during the tractometry pipeline.\n\nFormerly: scil_convert_json_to_xlsx.py", - "help": "usage: scil_json_convert_entries_to_xlsx.py [-h] [--no_sort_subs]\n [--no_sort_bundles]\n [--ignore_bundles FILE]\n [--stats_over_population]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_json out_xlsx\n\nConvert a final aggregated json file to an Excel spreadsheet.\nTypically used during the tractometry pipeline.\n\nFormerly: scil_convert_json_to_xlsx.py\n\npositional arguments:\n in_json File containing the json stats (.json).\n out_xlsx Output Excel file for the stats (.xlsx).\n\noptions:\n -h, --help show this help message and exit\n --no_sort_subs If set, subjects won't be sorted alphabetically.\n --no_sort_bundles If set, bundles won't be sorted alphabetically.\n --ignore_bundles FILE\n Path to a text file containing a list of bundles to ignore (.txt).\n One bundle, corresponding to keys in the json, per line.\n --stats_over_population\n If set, consider the input stats to be over an entire population and not subject-based.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "bundles", - "bundles" - ], - [ - "average", - "per" - ], - [ - "work", - "and" - ], - [ - "subject", - "subjects", - "subjects" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "order", - "set" - ], - [ - "long", - "over" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "population", - "population" - ], - [ - "methods", - "using" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "based", - "based" - ], - [ - "bundles", - "bundle" - ], - [ - "matter", - "question", - "subject", - "subjects", - "subject" - ], - [ - "considered", - "possibility", - "question", - "step", - "consider" - ] - ], - "keywords": [] - }, - { - "name": "scil_json_harmonize_entries", - "docstring": "This script will harmonize a json file by adding missing keys and values\nthat differs between the different layers of the dictionary.\n\nThis is used only (for now) in Aggregate_All_* portion of tractometry-flow,\nto counter the problem of missing bundles/metrics/lesions between subjects.\n\nThe most common use case is when specific subjects have missing bundles\nwhich will cause a panda array to be incomplete, and thus crash. Finding out\nthe union of all bundles/metrics/lesions will allow to create a complete json\n(but with NaN for missing values).\n\nFormerly: scil_harmonize_json.py", - "help": "usage: scil_json_harmonize_entries.py [-h] [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file out_file\n\n This script will harmonize a json file by adding missing keys and values\nthat differs between the different layers of the dictionary.\n\nThis is used only (for now) in Aggregate_All_* portion of tractometry-flow,\nto counter the problem of missing bundles/metrics/lesions between subjects.\n\nThe most common use case is when specific subjects have missing bundles\nwhich will cause a panda array to be incomplete, and thus crash. Finding out\nthe union of all bundles/metrics/lesions will allow to create a complete json\n(but with NaN for missing values).\n\nFormerly: scil_harmonize_json.py\n\npositional arguments:\n in_file Input file (json).\n out_file Output file (json).\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "individual", - "specific", - "unique", - "variety", - "different" - ], - [ - "considered", - "highly", - "long", - "work", - "most" - ], - [ - "bundles", - "bundles" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "long", - "work", - "working", - "now" - ], - [ - "subject", - "subjects", - "subjects" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "long", - "have" - ], - [ - "specific", - "specific" - ], - [ - "long", - "a" - ], - [ - "clear", - "left", - "out" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "forms", - "specific", - "common" - ], - [ - "create", - "create" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "clear", - "adding" - ], - [ - "methods", - "using" - ], - [ - "question", - "problem" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "considered", - "form", - "meaning", - "order", - "result", - "thus" - ], - [ - "order", - "allow" - ], - [ - "result", - "cause" - ], - [ - "work", - "all" - ], - [ - "matter", - "question", - "case" - ], - [ - "possibility", - "finding" - ] - ], - "keywords": [] - }, - { - "name": "scil_json_merge_entries", - "docstring": "Merge multiple json files into a single one.\nTypically used during the tractometry pipeline.\n\nWithout option it will simply merge all entries at the top level, the top\nlevel must not have any conflicting keys.\n\n--keep_separate option will add a parent for each file, its basename will\nbecome the key.\n\n--no_list option will merge all entries at the top level, if there is a\nconflict the lowest level will be extended with the new values (if list) or\nadded (if value)\n\n--add_parent_key option will add a parent key before merging all entries.\n\n--remove_parent_key option will remove the parent key before merging all\nentries.\n\n--recursive option will merge all entries (scalar) at the lowest layers as a\nlist.\n\n--average_last_layer option will average all entries (scalar) at the lowest\nlayers, but instead of creating a list it creates a mean/std level.\n\nFormerly: scil_merge_json.py", - "help": "usage: scil_json_merge_entries.py [-h] [--keep_separate] [--no_list]\n [--add_parent_key ADD_PARENT_KEY]\n [--remove_parent_key] [--recursive]\n [--average_last_layer] [--indent INDENT]\n [--sort_keys] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_json [in_json ...] out_json\n\n Merge multiple json files into a single one.\nTypically used during the tractometry pipeline.\n\nWithout option it will simply merge all entries at the top level, the top\nlevel must not have any conflicting keys.\n\n--keep_separate option will add a parent for each file, its basename will\nbecome the key.\n\n--no_list option will merge all entries at the top level, if there is a\nconflict the lowest level will be extended with the new values (if list) or\nadded (if value)\n\n--add_parent_key option will add a parent key before merging all entries.\n\n--remove_parent_key option will remove the parent key before merging all\nentries.\n\n--recursive option will merge all entries (scalar) at the lowest layers as a\nlist.\n\n--average_last_layer option will average all entries (scalar) at the lowest\nlayers, but instead of creating a list it creates a mean/std level.\n\nFormerly: scil_merge_json.py\n\npositional arguments:\n in_json List of json files to merge (.json).\n out_json Output json file (.json).\n\noptions:\n -h, --help show this help message and exit\n --keep_separate Merge entries as separate keys based on filename.\n --no_list Merge entries knowing there is no conflict.\n --add_parent_key ADD_PARENT_KEY\n Merge all entries under a single parent.\n --remove_parent_key Merge ignoring parent key (e.g for population).\n --recursive Merge all entries at the lowest layers.\n --average_last_layer Average all entries at the lowest layers.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "average", - "higher", - "highest", - "lowest" - ], - [ - "left", - "into" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "create", - "creates" - ], - [ - "create", - "creating" - ], - [ - "long", - "have" - ], - [ - "long", - "a" - ], - [ - "key", - "main", - "key" - ], - [ - "thinking", - "knowing" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "action", - "clear", - "considered", - "future", - "matter", - "possibility", - "potential", - "question", - "result", - "specific", - "any" - ], - [ - "long", - "with" - ], - [ - "average", - "average" - ], - [ - "population", - "population" - ], - [ - "methods", - "using" - ], - [ - "thinking", - "simply" - ], - [ - "order", - "work", - "instead" - ], - [ - "left", - "before" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "clear", - "long", - "question", - "result", - "work", - "there" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "based", - "based" - ], - [ - "considered", - "become" - ] - ], - "keywords": [] - }, - { - "name": "scil_labels_combine", - "docstring": "Script to combine labels from multiple volumes. If there is overlap, it will\noverwrite them based on the input order.\n\n >>> scil_labels_combine.py out_labels.nii.gz\n --volume_ids animal_labels.nii 20\n --volume_ids DKT_labels.nii.gz 44 53\n --out_labels_indices 20 44 53\n >>> scil_labels_combine.py slf_labels.nii.gz\n --volume_ids a2009s_aseg.nii.gz all\n --volume_ids clean/s1__DKT.nii.gz 1028 2028\n\nFormerly: scil_combine_labels.py.", - "help": "usage: scil_labels_combine.py [-h] --volume_ids VOLUME_IDS [VOLUME_IDS ...]\n [--out_labels_ids OUT_LABELS_IDS [OUT_LABELS_IDS ...]\n | --unique | --group_in_m]\n [--background BACKGROUND] [--merge_groups]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n output\n\nScript to combine labels from multiple volumes. If there is overlap, it will\noverwrite them based on the input order.\n\n >>> scil_labels_combine.py out_labels.nii.gz\n --volume_ids animal_labels.nii 20\n --volume_ids DKT_labels.nii.gz 44 53\n --out_labels_indices 20 44 53\n >>> scil_labels_combine.py slf_labels.nii.gz\n --volume_ids a2009s_aseg.nii.gz all\n --volume_ids clean/s1__DKT.nii.gz 1028 2028\n\nFormerly: scil_combine_labels.py.\n\npositional arguments:\n output Combined labels volume output.\n\noptions:\n -h, --help show this help message and exit\n --volume_ids VOLUME_IDS [VOLUME_IDS ...]\n List of volumes directly followed by their labels:\n --volume_ids atlasA id1a id2a \n --volume_ids atlasB id1b id2b ... \n \"all\" can be used instead of id numbers.\n --out_labels_ids OUT_LABELS_IDS [OUT_LABELS_IDS ...]\n List of labels indices for output images.\n --unique If set, output id with unique labels, excluding first background value.\n --group_in_m Add (x * 10 000) to each volume labels, where x is the input volume order number.\n --background BACKGROUND\n Background id, excluded from output [0],\n the value is used as output background value.\n --merge_groups Each group from the --volume_ids option will be merged as a single labels.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "work", - "and" - ], - [ - "connectome", - "connectome" - ], - [ - "order", - "order" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "order", - "set" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "future", - "will" - ], - [ - "result", - "followed" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "methods", - "using" - ], - [ - "order", - "work", - "instead" - ], - [ - "unique", - "variety", - "unique" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "area", - "work", - "where" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "based", - "group" - ], - [ - "volume", - "volumes", - "volumes" - ], - [ - "total", - "combined" - ], - [ - "clear", - "long", - "question", - "result", - "work", - "there" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "based", - "based" - ] - ], - "keywords": [] - }, - { - "name": "scil_labels_dilate", - "docstring": "Dilate regions (with or without masking) from a labeled volume:\n- \"label_to_dilate\" are regions that will dilate over\n \"label_to_fill\" if close enough to it (\"distance\").\n- \"label_to_dilate\", by default (None) will be all\n non-\"label_to_fill\" and non-\"label_not_to_dilate\".\n- \"label_not_to_dilate\" will not be changed, but will not dilate.\n- \"mask\" is where the dilation is allowed (constrained)\n in addition to \"background_label\" (logical AND)\n\n>>> scil_labels_dilate.py wmparc_t1.nii.gz wmparc_dil.nii.gz \\\n --label_to_fill 0 5001 5002 \\\n --label_not_to_dilate 4 43 10 11 12 49 50 51\n\nFormerly: scil_dilate_labels.py", - "help": "usage: scil_labels_dilate.py [-h] [--distance DISTANCE]\n [--labels_to_dilate LABELS_TO_DILATE [LABELS_TO_DILATE ...]]\n [--labels_to_fill LABELS_TO_FILL [LABELS_TO_FILL ...]]\n [--labels_not_to_dilate LABELS_NOT_TO_DILATE [LABELS_NOT_TO_DILATE ...]]\n [--mask MASK] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file out_file\n\nDilate regions (with or without masking) from a labeled volume:\n- \"label_to_dilate\" are regions that will dilate over\n \"label_to_fill\" if close enough to it (\"distance\").\n- \"label_to_dilate\", by default (None) will be all\n non-\"label_to_fill\" and non-\"label_not_to_dilate\".\n- \"label_not_to_dilate\" will not be changed, but will not dilate.\n- \"mask\" is where the dilation is allowed (constrained)\n in addition to \"background_label\" (logical AND)\n\n>>> scil_labels_dilate.py wmparc_t1.nii.gz wmparc_dil.nii.gz \\\n --label_to_fill 0 5001 5002 \\\n --label_not_to_dilate 4 43 10 11 12 49 50 51\n\nFormerly: scil_dilate_labels.py\n\npositional arguments:\n in_file Path of the volume (nii or nii.gz).\n out_file Output filename of the dilated labels.\n\noptions:\n -h, --help show this help message and exit\n --distance DISTANCE Maximal distance to dilate (in mm) [2.0].\n --labels_to_dilate LABELS_TO_DILATE [LABELS_TO_DILATE ...]\n Label list to dilate. By default it dilates all \n labels not in labels_to_fill nor in labels_not_to_dilate.\n --labels_to_fill LABELS_TO_FILL [LABELS_TO_FILL ...]\n Background id / labels to be filled [[0]],\n the first one is given as output background value.\n --labels_not_to_dilate LABELS_NOT_TO_DILATE [LABELS_NOT_TO_DILATE ...]\n Label list not to dilate.\n --mask MASK Only dilate values inside the mask.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "connectome", - "connectome" - ], - [ - "considered", - "are" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "areas", - "region", - "regions", - "regions" - ], - [ - "long", - "over" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "create", - "enough" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "total", - "50" - ], - [ - "methods", - "using" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "area", - "work", - "where" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "order", - "allowed" - ], - [ - "step", - "start" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "large", - "larger", - "variety", - "work", - "addition" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "matter", - "nor" - ] - ], - "keywords": [] - }, - { - "name": "scil_labels_remove", - "docstring": "Script to remove specific labels from an atlas volume.\n\n >>> scil_labels_remove.py DKT_labels.nii out_labels.nii.gz -i 5001 5002\n\nFormerly: scil_remove_labels.py", - "help": "usage: scil_labels_remove.py [-h] -i INDICES [INDICES ...]\n [--background BACKGROUND]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_labels out_labels\n\nScript to remove specific labels from an atlas volume.\n\n >>> scil_labels_remove.py DKT_labels.nii out_labels.nii.gz -i 5001 5002\n\nFormerly: scil_remove_labels.py\n\npositional arguments:\n in_labels Input labels volume.\n out_labels Output labels volume.\n\noptions:\n -h, --help show this help message and exit\n -i INDICES [INDICES ...], --indices INDICES [INDICES ...]\n List of labels indices to remove.\n --background BACKGROUND\n Integer used for removed labels [0].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \n", - "synonyms": [ - [ - "volume", - "volumes", - "volume" - ], - [ - "work", - "and" - ], - [ - "connectome", - "connectome" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "specific", - "specific" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "thinking", - "i" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "atlas", - "atlas" - ], - [ - "methods", - "using" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ] - ], - "keywords": [] - }, - { - "name": "scil_labels_split_volume_by_ids", - "docstring": "Split a label image into multiple images where the name of the output images\nis the id of the label (ex. 35.nii.gz, 36.nii.gz, ...). If the --range option\nis not provided, all labels of the image are extracted. The label 0 is\nconsidered as the background and is ignored.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_ids.py", - "help": "usage: scil_labels_split_volume_by_ids.py [-h] [--out_dir OUT_DIR]\n [--out_prefix OUT_PREFIX]\n [-r min max min max]\n [--background BACKGROUND]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_labels\n\nSplit a label image into multiple images where the name of the output images\nis the id of the label (ex. 35.nii.gz, 36.nii.gz, ...). If the --range option\nis not provided, all labels of the image are extracted. The label 0 is\nconsidered as the background and is ignored.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_ids.py\n\npositional arguments:\n in_labels Path of the input label file, in a format supported by Nibabel.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Put all ouptput images in a specific directory.\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n -r min max min max, --range min max min max\n Specifies a subset of labels to split, formatted as min max. Ex: -r 3 5 will give files _3, _4, _5.\n --background BACKGROUND\n Background value. Will not be saved as a separate label. Default: 0.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "left", - "into" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "meaning", - "name" - ], - [ - "specific", - "specific" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "clear", - "left", - "work", - "put" - ], - [ - "methods", - "using" - ], - [ - "supported", - "supported" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "clear", - "give" - ], - [ - "area", - "work", - "where" - ], - [ - "individual", - "each" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "considered", - "key", - "work", - "important" - ] - ], - "keywords": [] - }, - { - "name": "scil_labels_split_volume_from_lut", - "docstring": "Split a label image into multiple images where the name of the output images\nis taken from a lookup table (ex: left-lateral-occipital.nii.gz,\nright-thalamus.nii.gz, ...). Only the labels included in the lookup table\nare extracted.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_labels.py", - "help": "usage: scil_labels_split_volume_from_lut.py [-h] [--out_dir OUT_DIR]\n [--out_prefix OUT_PREFIX]\n (--scilpy_lut {freesurfer_subcortical,dk_aggregate_structures,freesurfer_desikan_killiany} | --custom_lut CUSTOM_LUT)\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_label\n\nSplit a label image into multiple images where the name of the output images\nis taken from a lookup table (ex: left-lateral-occipital.nii.gz,\nright-thalamus.nii.gz, ...). Only the labels included in the lookup table\nare extracted.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_labels.py\n\npositional arguments:\n in_label Path of the input label file, in a format supported by Nibabel.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Put all ouptput images in a specific directory.\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n --scilpy_lut {freesurfer_subcortical,dk_aggregate_structures,freesurfer_desikan_killiany}\n Lookup table, in the file scilpy/data/LUT, used to name the output files.\n --custom_lut CUSTOM_LUT\n Path of the lookup table file, used to name the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "left", - "into" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "meaning", - "name" - ], - [ - "left", - "left" - ], - [ - "specific", - "specific" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "lobe", - "occipital", - "parietal", - "occipital" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "clear", - "left", - "work", - "put" - ], - [ - "methods", - "using" - ], - [ - "clear", - "held", - "work", - "taken" - ], - [ - "lateral", - "posterior", - "lateral" - ], - [ - "left", - "right" - ], - [ - "supported", - "supported" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "cortex", - "thalamus", - "thalamus" - ], - [ - "considered", - "is" - ], - [ - "area", - "work", - "where" - ], - [ - "individual", - "each" - ], - [ - "work", - "all" - ], - [ - "considered", - "key", - "work", - "important" - ] - ], - "keywords": [] - }, - { - "name": "scil_lesions_info", - "docstring": "This script will output informations about lesion load in bundle(s).\nThe input can either be streamlines, binary bundle map, or a bundle voxel\nlabel map.\n\nTo be considered a valid lesion, the lesion volume must be at least\nmin_lesion_vol mm3. This avoid the detection of thousand of single voxel\nlesions if an automatic lesion segmentation tool is used.\n\nFormerly: scil_analyse_lesions_load.py", - "help": "usage: scil_lesions_info.py [-h]\n [--bundle BUNDLE | --bundle_mask BUNDLE_MASK | --bundle_labels_map BUNDLE_LABELS_MAP]\n [--min_lesion_vol MIN_LESION_VOL]\n [--out_lesion_atlas FILE]\n [--out_lesion_stats FILE]\n [--out_streamlines_stats FILE] [--indent INDENT]\n [--sort_keys] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_lesion out_json\n\nThis script will output informations about lesion load in bundle(s).\nThe input can either be streamlines, binary bundle map, or a bundle voxel\nlabel map.\n\nTo be considered a valid lesion, the lesion volume must be at least\nmin_lesion_vol mm3. This avoid the detection of thousand of single voxel\nlesions if an automatic lesion segmentation tool is used.\n\nFormerly: scil_analyse_lesions_load.py\n\npositional arguments:\n in_lesion Binary mask of the lesion(s) (.nii.gz).\n out_json Output file for lesion information (.json).\n\noptions:\n -h, --help show this help message and exit\n --bundle BUNDLE Path of the bundle file (.trk).\n --bundle_mask BUNDLE_MASK\n Path of the bundle binary mask (.nii.gz).\n --bundle_labels_map BUNDLE_LABELS_MAP\n Path of the bundle labels map (.nii.gz).\n --min_lesion_vol MIN_LESION_VOL\n Minimum lesion volume in mm3 [7].\n --out_lesion_atlas FILE\n Save the labelized lesion(s) map (.nii.gz).\n --out_lesion_stats FILE\n Save the lesion-wise volume measure (.json).\n --out_streamlines_stats FILE\n Save the lesion-wise streamline count (.json).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "work", - "and" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "invalid", - "valid", - "valid" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "possibility", - "avoid" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "tool", - "tool" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "streamline", - "streamline" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "maps", - "map" - ], - [ - "bundles", - "bundle" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_mti_adjust_B1_header", - "docstring": "Correct B1 map header problem, by applying the scaling (slope) and setting\nthe slope to 1.", - "help": "usage: scil_mti_adjust_B1_header.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_B1_map out_B1_map in_B1_json\n\nCorrect B1 map header problem, by applying the scaling (slope) and setting\nthe slope to 1.\n\npositional arguments:\n in_B1_map Path to input B1 map file.\n out_B1_map Path to output B1 map file.\n in_B1_json Json file of the B1 map.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "held", - "on" - ], - [ - "methods", - "using" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "question", - "problem" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "maps", - "map" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "is" - ], - [ - "applied", - "applying" - ] - ], - "keywords": [] - }, - { - "name": "scil_mti_maps_MT", - "docstring": "This script computes two myelin indices maps from the Magnetization Transfer\n(MT) images.\nMagnetization Transfer is a contrast mechanism in tissue resulting from the\nproton exchange between non-aqueous protons (from macromolecules and their\nclosely associated water molecules, the \"bound\" pool) and protons in the free\nwater pool called aqueous protons. This exchange attenuates the MRI signal,\nintroducing microstructure-dependent contrast. MT's effect reflects the\nrelative density of macromolecules such as proteins and lipids, it has been\nassociated with myelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse to saturating the\nprotons on non-aqueous molecules a frequency irradiation. The MT maps are\nobtained using three or four contrasts: a single positive frequency image\nand/or a single negative frequency image, and two unsaturated contrasts as\nreference. These two references should be acquired with predominant PD\n(proton density) and T1 weighting at different excitation flip angles\n(a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all input must have a matching json file with the same filename\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script.\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\n\nThe output consists of a MT_native_maps folder containing the 2 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n The MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n The MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\n\nThe final maps from MT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 1 or 2 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n\n>>> scil_mti_maps_MT.py path/to/output/directory\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_positive path/to/echo*pos.nii.gz --in_negative path/to/echo*neg.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.", - "help": "usage: scil_mti_maps_MT.py [-h] [--out_prefix OUT_PREFIX] [--mask MASK]\n --in_positive IN_POSITIVE [IN_POSITIVE ...]\n --in_negative IN_NEGATIVE [IN_NEGATIVE ...]\n --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...]\n [--in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]]\n [--extended] [--filtering]\n [--in_jsons PD_json T1_json | --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time]\n [--in_B1_map IN_B1_MAP]\n [--B1_correction_method {empiric,model_based}]\n [--B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]]\n [--B1_nominal B1_NOMINAL]\n [--B1_smooth_dims B1_SMOOTH_DIMS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_dir\n\nThis script computes two myelin indices maps from the Magnetization Transfer\n(MT) images.\nMagnetization Transfer is a contrast mechanism in tissue resulting from the\nproton exchange between non-aqueous protons (from macromolecules and their\nclosely associated water molecules, the \"bound\" pool) and protons in the free\nwater pool called aqueous protons. This exchange attenuates the MRI signal,\nintroducing microstructure-dependent contrast. MT's effect reflects the\nrelative density of macromolecules such as proteins and lipids, it has been\nassociated with myelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse to saturating the\nprotons on non-aqueous molecules a frequency irradiation. The MT maps are\nobtained using three or four contrasts: a single positive frequency image\nand/or a single negative frequency image, and two unsaturated contrasts as\nreference. These two references should be acquired with predominant PD\n(proton density) and T1 weighting at different excitation flip angles\n(a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all input must have a matching json file with the same filename\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script.\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\nThe output consists of a MT_native_maps folder containing the 2 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n The MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n The MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\nThe final maps from MT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 1 or 2 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n>>> scil_mti_maps_MT.py path/to/output/directory\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_positive path/to/echo*pos.nii.gz --in_negative path/to/echo*neg.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.\n\npositional arguments:\n out_dir Path to output folder.\n\noptions:\n -h, --help show this help message and exit\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n --mask MASK Path to the binary brain mask.\n --extended If set, outputs the folder Complementary_maps.\n --filtering Gaussian filtering to remove Gibbs ringing. Not recommended.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nContrast maps:\n Path to echoes corresponding to contrast images. All constrasts must have \n the same number of echoes and coregistered between them. Use * to include all echoes. \n The in_mtoff_pd input and at least one of in_positive or in_negative are required.\n\n --in_positive IN_POSITIVE [IN_POSITIVE ...]\n Path to all echoes corresponding to the positive frequency \n saturation pulse.\n --in_negative IN_NEGATIVE [IN_NEGATIVE ...]\n Path to all echoes corresponding to the negative frequency \n saturation pulse.\n --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...]\n Path to all echoes corresponding to the predominant PD \n (proton density) weighting images with no saturation pulse.\n --in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]\n Path to all echoes corresponding to the predominant T1 \n weighting images with no saturation pulse. This one is optional, \n since it is only needed for the calculation of MTsat. \n Acquisition parameters should also be set with this image.\n\nAcquisition parameters:\n Acquisition parameters required for MTsat and ihMTsat calculation. \n These are the excitation flip angles (a_PD, a_T1), in DEGREES, and \n repetition times (TR_PD, TR_T1) of the PD and T1 images, in SECONDS. \n Can be given through json files (--in_jsons) or directly (--in_acq_parameters).\n\n --in_jsons PD_json T1_json\n Path to MToff PD json file and MToff T1 json file, in that order. \n The acquisition parameters will be extracted from these files. \n Must come from a Philips acquisition, otherwise, use in_acq_parameters.\n --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time\n Acquisition parameters in that order: flip angle of mtoff_PD, \n flip angle of mtoff_T1, repetition time of mtoff_PD, \n repetition time of mtoff_T1\n\nB1 correction:\n --in_B1_map IN_B1_MAP\n Path to B1 coregister map to MT contrasts.\n --B1_correction_method {empiric,model_based}\n Choice of B1 correction method. Choose between empiric and model-based. \n Note that the model-based method requires a B1 fitvalues file. \n Both method will only correct the saturation measures. [empiric]\n --B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]\n Path to B1 fitvalues files obtained externally. Should be one .mat \n file per input MT-on image, given in this specific order: \n positive frequency saturation, negative frequency saturation.\n --B1_nominal B1_NOMINAL\n Nominal value for the B1 map. For Philips, should be 100. [100]\n --B1_smooth_dims B1_SMOOTH_DIMS\n Dimension of the squared window used for B1 smoothing, in number of voxels. [5]\n", - "synonyms": [ - [ - "individual", - "specific", - "unique", - "variety", - "different" - ], - [ - "positive", - "negative" - ], - [ - "methods", - "method" - ], - [ - "image", - "images" - ], - [ - "order", - "set" - ], - [ - "long", - "have" - ], - [ - "specific", - "specific" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "variety", - "include" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "maps", - "map" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "comprised", - "comprising", - "consists" - ], - [ - "average", - "per" - ], - [ - "work", - "also" - ], - [ - "result", - "following" - ], - [ - "left", - "from" - ], - [ - "positive", - "positive" - ], - [ - "thinking", - "you" - ], - [ - "step", - "steps" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "step", - "work", - "come" - ], - [ - "imaging", - "mri" - ], - [ - "future", - "will" - ], - [ - "create", - "create" - ], - [ - "considered", - "involved", - "work", - "been" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "signal", - "signal" - ], - [ - "result", - "resulting" - ], - [ - "analysis", - "methodology", - "methods", - "processes", - "methods" - ], - [ - "contrast", - "contrasts" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "result", - "effect" - ], - [ - "considered", - "specific", - "variety", - "such" - ], - [ - "left", - "off" - ], - [ - "order", - "required" - ], - [ - "brain", - "tissue" - ], - [ - "considered", - "are" - ], - [ - "action", - "clear", - "future", - "result", - "step", - "change" - ], - [ - "represent", - "representing" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "methods", - "using" - ], - [ - "left", - "after" - ], - [ - "nuclei", - "protons" - ], - [ - "increase", - "total", - "amount" - ], - [ - "degree", - "degrees" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "left", - "before" - ], - [ - "individual", - "each" - ], - [ - "long", - "two" - ], - [ - "based", - "based" - ], - [ - "dominant", - "predominant" - ], - [ - "contrast", - "contrast" - ], - [ - "total", - "number" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "order", - "order" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "result", - "work", - "working", - "time" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "binary", - "binary" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "weighted", - "weighted" - ], - [ - "work", - "working", - "done" - ], - [ - "total", - "100" - ], - [ - "maps", - "maps" - ], - [ - "result", - "since" - ] - ], - "keywords": [] - }, - { - "name": "scil_mti_maps_ihMT", - "docstring": "This script computes four myelin indices maps from the Magnetization Transfer\n(MT) and inhomogeneous Magnetization Transfer (ihMT) images. Magnetization\nTransfer is a contrast mechanism in tissue resulting from the proton exchange\nbetween non-aqueous protons (from macromolecules and their closely associated\nwater molecules, the \"bound\" pool) and protons in the free water pool called\naqueous protons. This exchange attenuates the MRI signal, introducing\nmicrostructure-dependent contrast. MT's effect reflects the relative density\nof macromolecules such as proteins and lipids, it has been associated with\nmyelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse prior to image\nacquisition (a prepulse), saturating the protons on non-aqueous molecules,\nby applying different frequency irradiation. The two MT maps and two ihMT maps\nare obtained using six contrasts: single positive frequency image, single\nnegative frequency image, dual alternating positive/negative frequency image,\ndual alternating negative/positive frequency image (saturated images);\nand two unsaturated contrasts as reference. These two references should be\nacquired with predominant PD (proton density) and T1 weighting at different\nexcitation flip angles (a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\n\nThe output consists of a ihMT_native_maps folder containing the 4 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n - ihMTR.nii.gz : inhomogeneous Magnetization Transfer Ratio map\n The (ih)MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n - ihMTsat.nii.gz : inhomogeneous Magnetization Transfer saturation map\n The (ih)MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - altnp.nii.gz : dual alternating negative and positive frequency image\n - altpn.nii.gz : dual alternating positive and negative frequency image\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_d.nii.gz : MTsat computed from the mean dual frequency images\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\n\nThe final maps from ihMT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 3 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation, dual frequency saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n\n>>> scil_mti_maps_ihMT.py path/to/output/directory\n --in_altnp path/to/echo*altnp.nii.gz --in_altpn path/to/echo*altpn.nii.gz\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_negative path/to/echo*neg.nii.gz --in_positive path/to/echo*pos.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.", - "help": "usage: scil_mti_maps_ihMT.py [-h] [--out_prefix OUT_PREFIX] [--mask MASK]\n --in_altnp IN_ALTNP [IN_ALTNP ...] --in_altpn\n IN_ALTPN [IN_ALTPN ...] --in_negative IN_NEGATIVE\n [IN_NEGATIVE ...] --in_positive IN_POSITIVE\n [IN_POSITIVE ...] --in_mtoff_pd IN_MTOFF_PD\n [IN_MTOFF_PD ...]\n [--in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]]\n [--extended] [--filtering]\n [--in_jsons PD_json T1_json | --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time]\n [--in_B1_map IN_B1_MAP]\n [--B1_correction_method {empiric,model_based}]\n [--B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]]\n [--B1_nominal B1_NOMINAL]\n [--B1_smooth_dims B1_SMOOTH_DIMS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_dir\n\nThis script computes four myelin indices maps from the Magnetization Transfer\n(MT) and inhomogeneous Magnetization Transfer (ihMT) images. Magnetization\nTransfer is a contrast mechanism in tissue resulting from the proton exchange\nbetween non-aqueous protons (from macromolecules and their closely associated\nwater molecules, the \"bound\" pool) and protons in the free water pool called\naqueous protons. This exchange attenuates the MRI signal, introducing\nmicrostructure-dependent contrast. MT's effect reflects the relative density\nof macromolecules such as proteins and lipids, it has been associated with\nmyelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse prior to image\nacquisition (a prepulse), saturating the protons on non-aqueous molecules,\nby applying different frequency irradiation. The two MT maps and two ihMT maps\nare obtained using six contrasts: single positive frequency image, single\nnegative frequency image, dual alternating positive/negative frequency image,\ndual alternating negative/positive frequency image (saturated images);\nand two unsaturated contrasts as reference. These two references should be\nacquired with predominant PD (proton density) and T1 weighting at different\nexcitation flip angles (a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\nThe output consists of a ihMT_native_maps folder containing the 4 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n - ihMTR.nii.gz : inhomogeneous Magnetization Transfer Ratio map\n The (ih)MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n - ihMTsat.nii.gz : inhomogeneous Magnetization Transfer saturation map\n The (ih)MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - altnp.nii.gz : dual alternating negative and positive frequency image\n - altpn.nii.gz : dual alternating positive and negative frequency image\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_d.nii.gz : MTsat computed from the mean dual frequency images\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\nThe final maps from ihMT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 3 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation, dual frequency saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n>>> scil_mti_maps_ihMT.py path/to/output/directory\n --in_altnp path/to/echo*altnp.nii.gz --in_altpn path/to/echo*altpn.nii.gz\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_negative path/to/echo*neg.nii.gz --in_positive path/to/echo*pos.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.\n\npositional arguments:\n out_dir Path to output folder.\n\noptions:\n -h, --help show this help message and exit\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n --mask MASK Path to the binary brain mask.\n --extended If set, outputs the folder Complementary_maps.\n --filtering Gaussian filtering to remove Gibbs ringing. Not recommended.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nContrast maps:\n Path to echoes corresponding to contrast images. All constrasts must have \n the same number of echoes and coregistered between them. Use * to include all echoes.\n\n --in_altnp IN_ALTNP [IN_ALTNP ...]\n Path to all echoes corresponding to the alternation of \n negative and positive frequency saturation pulse.\n --in_altpn IN_ALTPN [IN_ALTPN ...]\n Path to all echoes corresponding to the alternation of \n positive and negative frequency saturation pulse.\n --in_negative IN_NEGATIVE [IN_NEGATIVE ...]\n Path to all echoes corresponding to the negative frequency \n saturation pulse.\n --in_positive IN_POSITIVE [IN_POSITIVE ...]\n Path to all echoes corresponding to the positive frequency \n saturation pulse.\n --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...]\n Path to all echoes corresponding to the predominant PD \n (proton density) weighting images with no saturation pulse.\n --in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]\n Path to all echoes corresponding to the predominant T1 \n weighting images with no saturation pulse. This one is optional, \n since it is only needed for the calculation of MTsat and ihMTsat. \n Acquisition parameters should also be set with this image.\n\nAcquisition parameters:\n Acquisition parameters required for MTsat and ihMTsat calculation. \n These are the excitation flip angles (a_PD, a_T1), in DEGREES, and \n repetition times (TR_PD, TR_T1) of the PD and T1 images, in SECONDS. \n Can be given through json files (--in_jsons) or directly (--in_acq_parameters).\n\n --in_jsons PD_json T1_json\n Path to MToff PD json file and MToff T1 json file, in that order. \n The acquisition parameters will be extracted from these files. \n Must come from a Philips acquisition, otherwise, use in_acq_parameters.\n --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time\n Acquisition parameters in that order: flip angle of mtoff_PD, \n flip angle of mtoff_T1, repetition time of mtoff_PD, \n repetition time of mtoff_T1\n\nB1 correction:\n --in_B1_map IN_B1_MAP\n Path to B1 coregister map to MT contrasts.\n --B1_correction_method {empiric,model_based}\n Choice of B1 correction method. Choose between empiric and model-based. \n Note that the model-based method requires a B1 fitvalues file. \n Both method will only correct the saturation measures. [empiric]\n --B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]\n Path to B1 fitvalues files obtained externally. Should be one .mat \n file per input MT-on image, given in this specific order: \n positive frequency saturation, negative frequency saturation.\n --B1_nominal B1_NOMINAL\n Nominal value for the B1 map. For Philips, should be 100. [100]\n --B1_smooth_dims B1_SMOOTH_DIMS\n Dimension of the squared window used for B1 smoothing, in number of voxels. [5]\n", - "synonyms": [ - [ - "individual", - "specific", - "unique", - "variety", - "different" - ], - [ - "positive", - "negative" - ], - [ - "methods", - "method" - ], - [ - "image", - "images" - ], - [ - "order", - "set" - ], - [ - "long", - "have" - ], - [ - "specific", - "specific" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "variety", - "include" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "maps", - "map" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "comprised", - "comprising", - "consists" - ], - [ - "average", - "per" - ], - [ - "work", - "also" - ], - [ - "result", - "following" - ], - [ - "left", - "from" - ], - [ - "positive", - "positive" - ], - [ - "thinking", - "you" - ], - [ - "step", - "steps" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "step", - "work", - "come" - ], - [ - "imaging", - "mri" - ], - [ - "future", - "will" - ], - [ - "create", - "create" - ], - [ - "considered", - "involved", - "work", - "been" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "signal", - "signal" - ], - [ - "result", - "resulting" - ], - [ - "applied", - "applying" - ], - [ - "analysis", - "methodology", - "methods", - "processes", - "methods" - ], - [ - "contrast", - "contrasts" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "result", - "effect" - ], - [ - "considered", - "specific", - "variety", - "such" - ], - [ - "left", - "off" - ], - [ - "order", - "required" - ], - [ - "brain", - "tissue" - ], - [ - "considered", - "are" - ], - [ - "action", - "clear", - "future", - "result", - "step", - "change" - ], - [ - "represent", - "representing" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "methods", - "using" - ], - [ - "left", - "after" - ], - [ - "nuclei", - "protons" - ], - [ - "increase", - "total", - "amount" - ], - [ - "degree", - "degrees" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "left", - "before" - ], - [ - "individual", - "each" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "long", - "two" - ], - [ - "based", - "based" - ], - [ - "dominant", - "predominant" - ], - [ - "contrast", - "contrast" - ], - [ - "total", - "number" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "order", - "order" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "result", - "work", - "working", - "time" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "binary", - "binary" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "weighted", - "weighted" - ], - [ - "work", - "working", - "done" - ], - [ - "total", - "100" - ], - [ - "maps", - "maps" - ], - [ - "result", - "since" - ] - ], - "keywords": [] - }, - { - "name": "scil_plot_stats_per_point", - "docstring": "Plot all mean/std per point for a subject or population json file from\ntractometry-flow.\nWARNING: For population, the displayed STDs is only showing the variation\nof the means. It does not account intra-subject STDs.\n\nFormerly: scil_plot_mean_std_per_point.py", - "help": "usage: scil_plot_stats_per_point.py [-h] [--stats_over_population]\n [--nb_pts NB_PTS] [--display_means]\n [--fill_color FILL_COLOR | --dict_colors DICT_COLORS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_json out_dir\n\nPlot all mean/std per point for a subject or population json file from\ntractometry-flow.\nWARNING: For population, the displayed STDs is only showing the variation\nof the means. It does not account intra-subject STDs.\n\nFormerly: scil_plot_mean_std_per_point.py\n\npositional arguments:\n in_json JSON file containing the mean/std per point. For example, can be created using scil_bundle_mean_std.py.\n out_dir Output directory.\n\noptions:\n -h, --help show this help message and exit\n --stats_over_population\n If set, consider the input stats to be over an entire population and not subject-based.\n --nb_pts NB_PTS Force the number of divisions for the bundles.\n Avoid unequal plots across datasets, replace missing data with zeros.\n --display_means Display the subjects means as semi-transparent line.\n Poor results when the number of subject is high.\n --fill_color FILL_COLOR\n Hexadecimal RGB color filling the region between mean +/- std. The hexadecimal RGB color should be formatted as 0xRRGGBB.\n --dict_colors DICT_COLORS\n Dictionnary mapping basename to color.Same convention as --color.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "bundles", - "bundles" - ], - [ - "average", - "per" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "difference", - "point" - ], - [ - "subject", - "subjects", - "subjects" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "order", - "set" - ], - [ - "long", - "over" - ], - [ - "maps", - "mapping" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "possibility", - "avoid" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "indicating", - "showing" - ], - [ - "population", - "population" - ], - [ - "methods", - "using" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "areas", - "across" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "matter", - "question", - "does" - ], - [ - "result", - "results" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "based", - "based" - ], - [ - "area", - "areas", - "region", - "regions", - "region" - ], - [ - "create", - "created" - ], - [ - "matter", - "question", - "subject", - "subjects", - "subject" - ], - [ - "meaning", - "order", - "result", - "step", - "true", - "means" - ], - [ - "considered", - "possibility", - "question", - "step", - "consider" - ] - ], - "keywords": [] - }, - { - "name": "scil_qball_metrics", - "docstring": "Script to compute the Constant Solid Angle (CSA) or Analytical Q-ball model,\nthe generalized fractional anisotropy (GFA) and the peaks of the model.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Descoteaux et al MRM 2007, Aganj et al MRM 2009] for details and\n[Cote et al MEDIA 2013] for quantitative comparisons.\n\nFormerly: scil_compute_qball_metrics.py", - "help": "usage: scil_qball_metrics.py [-h] [-f] [--sh_order SH_ORDER] [--mask MASK]\n [--use_qball] [--not_all] [--gfa GFA]\n [--peaks PEAKS] [--peak_indices PEAK_INDICES]\n [--sh SH] [--nufo NUFO] [--a_power A_POWER]\n [--b0_threshold thr] [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval in_bvec\n\nScript to compute the Constant Solid Angle (CSA) or Analytical Q-ball model,\nthe generalized fractional anisotropy (GFA) and the peaks of the model.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Descoteaux et al MRM 2007, Aganj et al MRM 2009] for details and\n[Cote et al MEDIA 2013] for quantitative comparisons.\n\nFormerly: scil_compute_qball_metrics.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n\noptions:\n -h, --help show this help message and exit\n -f Force overwriting of the output files.\n --sh_order SH_ORDER Spherical harmonics order. Must be a positive even number [4].\n --mask MASK Path to a binary mask. Only data inside the mask will be used for computations and reconstruction [None].\n --use_qball If set, qball will be used as the odf reconstruction model instead of CSA.\n --not_all If set, will only save the files specified using the following flags.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nFile flags:\n --gfa GFA Output filename for the generalized fractional anisotropy [gfa.nii.gz].\n --peaks PEAKS Output filename for the extracted peaks [peaks.nii.gz].\n --peak_indices PEAK_INDICES\n Output filename for the generated peaks indices on the sphere [peaks_indices.nii.gz].\n --sh SH Output filename for the spherical harmonics coefficients [sh.nii.gz].\n --nufo NUFO Output filename for the NUFO map [nufo.nii.gz].\n --a_power A_POWER Output filename for the anisotropic power map[anisotropic_power.nii.gz].\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "step", - "continue" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "matter", - "question", - "thinking", - "understand" - ], - [ - "work", - "and" - ], - [ - "result", - "following" - ], - [ - "considered", - "are" - ], - [ - "methodology", - "analytical" - ], - [ - "order", - "order" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "positive", - "positive" - ], - [ - "work", - "find" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "specific", - "specific" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "parameters", - "specified" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "order", - "work", - "instead" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "exist", - "exists" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "lack", - "minimal" - ], - [ - "order", - "allow" - ], - [ - "step", - "start" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "maps", - "map" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_rgb_convert", - "docstring": "Converts a RGB image encoded as a 4D image to a RGB image encoded as\na 3D image, or vice versa.\n\nTypically, most software tools used in the SCIL (including MI-Brain) use\nthe former, while Trackvis uses the latter.\n\nInput\n-Case 1: 4D image where the 4th dimension contains 3 values.\n-Case 2: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value.\n\nOutput\n-Case 1: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value (uint8).\n-Case 2: 4D image where the 4th dimension contains 3 values (uint8).\n\nFormerly: scil_convert_rgb.py", - "help": "usage: scil_rgb_convert.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image\n\nConverts a RGB image encoded as a 4D image to a RGB image encoded as\na 3D image, or vice versa.\n\nTypically, most software tools used in the SCIL (including MI-Brain) use\nthe former, while Trackvis uses the latter.\n\nInput\n-Case 1: 4D image where the 4th dimension contains 3 values.\n-Case 2: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value.\n\nOutput\n-Case 1: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value (uint8).\n-Case 2: 4D image where the 4th dimension contains 3 values (uint8).\n\nFormerly: scil_convert_rgb.py\n\npositional arguments:\n in_image name of input RGB image.\n Either 4D or 3D image.\n out_image name of output RGB image.\n Either 3D or 4D image.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "considered", - "highly", - "long", - "work", - "most" - ], - [ - "work", - "and" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "long", - "a" - ], - [ - "application", - "systems", - "software" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "considered", - "form", - "latter" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "methods", - "using" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "area", - "work", - "where" - ], - [ - "individual", - "each" - ], - [ - "methods", - "tool", - "tools" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "matter", - "question", - "case" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_sh_convert", - "docstring": "Convert a SH file between the two of the following basis choices:\n'descoteaux07', 'descoteaux07_legacy', 'tournier07' or 'tournier07_legacy'.\nUsing the sh_basis argument, both the input and the output SH bases must be\ngiven, in the order. For more information about the bases, see\nhttps://docs.dipy.org/stable/theory/sh_basis.html.\n\nFormerly: scil_convert_sh_basis.py", - "help": "usage: scil_sh_convert.py [-h] [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_sh out_sh\n {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n\nConvert a SH file between the two of the following basis choices:\n'descoteaux07', 'descoteaux07_legacy', 'tournier07' or 'tournier07_legacy'.\nUsing the sh_basis argument, both the input and the output SH bases must be\ngiven, in the order. For more information about the bases, see\nhttps://docs.dipy.org/stable/theory/sh_basis.html.\n\nFormerly: scil_convert_sh_basis.py\n\npositional arguments:\n in_sh Input SH filename. (nii or nii.gz)\n out_sh Output SH filename. (nii or nii.gz)\n {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Both the input and output bases are required, in that order.\n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy', 'tournier07']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n\noptions:\n -h, --help show this help message and exit\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "order", - "required" - ], - [ - "work", - "and" - ], - [ - "result", - "following" - ], - [ - "considered", - "are" - ], - [ - "order", - "order" - ], - [ - "long", - "work", - "more" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "question", - "argument" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "long", - "two" - ], - [ - "step", - "start" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_sh_fusion", - "docstring": "Merge a list of Spherical Harmonics files.\n\nThis merges the coefficients of multiple Spherical Harmonics files by taking,\nfor each coefficient, the one with the largest magnitude.\n\nCan be used to merge fODFs computed from different shells into 1, while\nconserving the most relevant information.\n\nBased on [1] and [2].\n\nFormerly: scil_merge_sh.py", - "help": "usage: scil_sh_fusion.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_shs [in_shs ...] out_sh\n\nMerge a list of Spherical Harmonics files.\n\nThis merges the coefficients of multiple Spherical Harmonics files by taking,\nfor each coefficient, the one with the largest magnitude.\n\nCan be used to merge fODFs computed from different shells into 1, while\nconserving the most relevant information.\n\nBased on [1] and [2].\n\nFormerly: scil_merge_sh.py\n\npositional arguments:\n in_shs List of SH files.\n out_sh output SH file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReference:\n[1] Garyfallidis, E., Zucchelli, M., Houde, J-C., Descoteaux, M.\n How to perform best ODF reconstruction from the Human Connectome\n Project sampling scheme?\n ISMRM 2014.\n\n[2] Khachaturian, M. H., Wisco, J. J., & Tuch, D. S. (2007). Boosting the\n sampling efficiency of q\u2010ball imaging using multiple wavevector fusion.\n Magnetic Resonance in Medicine: An Official Journal of the International\n Society for Magnetic Resonance in Medicine, 57(2), 289-296.\n", - "synonyms": [ - [ - "individual", - "specific", - "unique", - "variety", - "different" - ], - [ - "animal", - "human", - "human" - ], - [ - "considered", - "highly", - "long", - "work", - "most" - ], - [ - "work", - "and" - ], - [ - "connectome", - "connectome" - ], - [ - "left", - "into" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "project", - "project" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "matter", - "question", - "thinking", - "true", - "work", - "working", - "how" - ], - [ - "long", - "with" - ], - [ - "specific", - "relevant" - ], - [ - "methods", - "using" - ], - [ - "imaging", - "imaging" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "individual", - "each" - ], - [ - "global", - "international" - ], - [ - "large", - "largest" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "based", - "based" - ] - ], - "keywords": [] - }, - { - "name": "scil_sh_to_aodf", - "docstring": "Script to estimate asymmetric ODFs (aODFs) from a spherical harmonics image.\n\nTwo methods are available:\n * Unified filtering [1] combines four asymmetric filtering methods into\n a single equation and relies on a combination of four gaussian filters.\n * Cosine filtering [2] is a simpler implementation using cosine distance\n for assigning weights to neighbours.\n\nUnified filtering can be accelerated using OpenCL with the option --use_opencl.\nMake sure you have pyopencl installed before using this option. By default, the\nOpenCL program will run on the cpu. To use a gpu instead, also specify the\noption --device gpu.", - "help": "usage: scil_sh_to_aodf.py [-h] [--out_sym OUT_SYM]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--method {unified,cosine}]\n [--sigma_spatial SIGMA_SPATIAL]\n [--sigma_align SIGMA_ALIGN]\n [--sigma_range SIGMA_RANGE]\n [--sigma_angle SIGMA_ANGLE] [--disable_spatial]\n [--disable_align] [--disable_range]\n [--include_center] [--win_hwidth WIN_HWIDTH]\n [--sharpness SHARPNESS] [--device {cpu,gpu}]\n [--use_opencl] [--patch_size PATCH_SIZE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh out_sh\n\nScript to estimate asymmetric ODFs (aODFs) from a spherical harmonics image.\n\nTwo methods are available:\n * Unified filtering [1] combines four asymmetric filtering methods into\n a single equation and relies on a combination of four gaussian filters.\n * Cosine filtering [2] is a simpler implementation using cosine distance\n for assigning weights to neighbours.\n\nUnified filtering can be accelerated using OpenCL with the option --use_opencl.\nMake sure you have pyopencl installed before using this option. By default, the\nOpenCL program will run on the cpu. To use a gpu instead, also specify the\noption --device gpu.\n\npositional arguments:\n in_sh Path to the input file.\n out_sh File name for averaged signal.\n\noptions:\n -h, --help show this help message and exit\n --out_sym OUT_SYM Name of optional symmetric output. [None]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Sphere used for the SH to SF projection. [repulsion200]\n --method {unified,cosine}\n Method for estimating asymmetric ODFs [unified].\n One of:\n 'unified': Unified filtering [1].\n 'cosine' : Cosine-based filtering [2].\n --device {cpu,gpu} Device to use for execution. [cpu]\n --use_opencl Accelerate code using OpenCL (requires pyopencl\n and a working OpenCL implementation).\n --patch_size PATCH_SIZE\n OpenCL patch size. [40]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nShared filter arguments:\n --sigma_spatial SIGMA_SPATIAL\n Standard deviation for spatial distance. [1.0]\n\nUnified filter arguments:\n --sigma_align SIGMA_ALIGN\n Standard deviation for alignment filter. [0.8]\n --sigma_range SIGMA_RANGE\n Standard deviation for range filter\n *relative to SF range of image*. [0.2]\n --sigma_angle SIGMA_ANGLE\n Standard deviation for angular filter\n (disabled by default).\n --disable_spatial Disable spatial filtering.\n --disable_align Disable alignment filtering.\n --disable_range Disable range filtering.\n --include_center Include center voxel in neighourhood.\n --win_hwidth WIN_HWIDTH\n Filtering window half-width. Defaults to 3*sigma_spatial.\n\nCosine filter arguments:\n --sharpness SHARPNESS\n Specify sharpness factor to use for\n weighted average. [1.0]\n\n[1] Poirier and Descoteaux, 2024, \"A Unified Filtering Method for Estimating\n Asymmetric Orientation Distribution Functions\", Neuroimage, vol. 287,\n https://doi.org/10.1016/j.neuroimage.2024.120516\n\n[2] Poirier et al, 2021, \"Investigating the Occurrence of Asymmetric Patterns\n in White Matter Fiber Orientation Distribution Functions\", ISMRM 2021\n (abstract 0865)\n", - "synonyms": [ - [ - "methods", - "method" - ], - [ - "orientation", - "orientation" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "form", - "combination" - ], - [ - "considered", - "are" - ], - [ - "involved", - "work", - "working", - "working" - ], - [ - "left", - "into" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "fibre", - "fiber" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "examining", - "involved", - "investigating" - ], - [ - "occurrence", - "occurrence" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "long", - "have" - ], - [ - "left", - "half" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "total", - "40" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "create", - "future", - "step", - "work", - "make" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "variety", - "include" - ], - [ - "long", - "with" - ], - [ - "average", - "average" - ], - [ - "methods", - "using" - ], - [ - "order", - "work", - "instead" - ], - [ - "signal", - "signal" - ], - [ - "left", - "before" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "projection", - "projection" - ], - [ - "considered", - "is" - ], - [ - "project", - "program" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "analysis", - "methodology", - "methods", - "processes", - "methods" - ], - [ - "larger", - "size", - "size" - ], - [ - "weighted", - "weighted" - ], - [ - "long", - "two" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "function", - "functions", - "functions" - ], - [ - "based", - "based" - ], - [ - "clear", - "matter", - "question", - "thinking", - "sure" - ], - [ - "spatial", - "temporal", - "spatial" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_sh_to_rish", - "docstring": "Compute the RISH (Rotationally Invariant Spherical Harmonics) features of an SH\nsignal [1].\n\nEach RISH feature map is the total energy of its associated order.\nMathematically, it is the sum of the squared SH coefficients of the SH order.\n\nThis script supports both symmetrical and asymmetrical SH images as input, of\nany SH order.\n\nEach RISH feature will be saved as a separate file.\n\n[1] Mirzaalian, Hengameh, et al. \"Harmonizing diffusion MRI data across\nmultiple sites and scanners.\" MICCAI 2015.\nhttps://scholar.harvard.edu/files/hengameh/files/miccai2015.pdf\n\nFormerly: scil_compute_rish_from_sh.py", - "help": "usage: scil_sh_to_rish.py [-h] [--full_basis] [--mask MASK]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh out_prefix\n\nCompute the RISH (Rotationally Invariant Spherical Harmonics) features of an SH\nsignal [1].\n\nEach RISH feature map is the total energy of its associated order.\nMathematically, it is the sum of the squared SH coefficients of the SH order.\n\nThis script supports both symmetrical and asymmetrical SH images as input, of\nany SH order.\n\nEach RISH feature will be saved as a separate file.\n\n[1] Mirzaalian, Hengameh, et al. \"Harmonizing diffusion MRI data across\nmultiple sites and scanners.\" MICCAI 2015.\nhttps://scholar.harvard.edu/files/hengameh/files/miccai2015.pdf\n\nFormerly: scil_compute_rish_from_sh.py\n\npositional arguments:\n in_sh Path of the sh image. They can be formatted in any sh basis, but we \n expect it to be a symmetrical one. Else, provide --full_basis.\n out_prefix Prefix of the output RISH files to save. Suffixes will be \n based on the sh orders.\n\noptions:\n -h, --help show this help message and exit\n --full_basis Input SH image uses a full SH basis (asymmetrical).\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computation.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "order", - "order" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "image", - "images" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "imaging", - "mri" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "action", - "clear", - "considered", - "future", - "matter", - "possibility", - "potential", - "question", - "result", - "specific", - "any" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "supported", - "supports" - ], - [ - "create", - "provide" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "signal", - "signal" - ], - [ - "areas", - "across" - ], - [ - "matter", - "thinking", - "else" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "based", - "based" - ], - [ - "maps", - "map" - ], - [ - "unique", - "features" - ], - [ - "increase", - "total", - "total" - ], - [ - "long", - "full" - ] - ], - "keywords": [] - }, - { - "name": "scil_sh_to_sf", - "docstring": "Script to sample SF values from a Spherical Harmonics signal. Outputs a Nifti\nfile with the SF values and an associated .bvec file with the chosen\ndirections.\n\nIf converting from SH to a DWI-like SF volume, --in_bval and --in_b0 need\nto be provided to concatenate the b0 image to the SF, and to generate the new\nbvals file. Otherwise, no .bval file will be created.\n\nFormerly: scil_compute_sf_from_sh.py", - "help": "usage: scil_sh_to_sf.py [-h]\n (--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} | --in_bvec IN_BVEC)\n [--dtype {float32,float64}] [--in_bval IN_BVAL]\n [--in_b0 IN_B0] [--out_bval OUT_BVAL]\n [--out_bvec OUT_BVEC] [--b0_scaling]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--full_basis] [--b0_threshold thr] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh out_sf\n\nScript to sample SF values from a Spherical Harmonics signal. Outputs a Nifti\nfile with the SF values and an associated .bvec file with the chosen\ndirections.\n\nIf converting from SH to a DWI-like SF volume, --in_bval and --in_b0 need\nto be provided to concatenate the b0 image to the SF, and to generate the new\nbvals file. Otherwise, no .bval file will be created.\n\nFormerly: scil_compute_sf_from_sh.py\n\npositional arguments:\n in_sh Path of the SH volume.\n out_sf Name of the output SF file to save (bvals/bvecs will be automatically named when necessary).\n\noptions:\n -h, --help show this help message and exit\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Sphere used for the SH to SF projection. \n --in_bvec IN_BVEC Directions used for the SH to SF projection. \n If given, --in_bval must also be provided.\n --dtype {float32,float64}\n Datatype to use for SF computation and output array.'[float32]'\n --in_bval IN_BVAL b-value file, in FSL format, used to assign a b-value to the \n output SF and generate a `.bval` file.\n - If used, --out_bval is required.\n - The output bval will contain one b-value per point in the SF \n output (i.e. one per point on the --sphere or one per --in_bvec.)\n - The values of the output bval will all be set to the same b-value:\n the average of your in_bval. (Any b0 found in this file, i.e \n b-values under --b0_threshold, will be removed beforehand.)\n - To add b0s to both the SF volume and the --out_bval file, use --in_b0.\n --in_b0 IN_B0 b0 volume to concatenate to the final SF volume.\n --out_bval OUT_BVAL Optional output bval file.\n --out_bvec OUT_BVEC Optional output bvec file.\n --b0_scaling Scale resulting SF by the b0 image (--in_b0 mustbe given).\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --full_basis If true, use a full basis for the input SH coefficients.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n Default if not set is 20.\n This value is used with option --in_bval only: any b0 found in the in_bval will be removed.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "create", - "generate" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "order", - "required" - ], - [ - "average", - "per" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "considered", - "are" - ], - [ - "difference", - "point" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "direction", - "directions" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "action", - "clear", - "considered", - "future", - "matter", - "possibility", - "potential", - "question", - "result", - "specific", - "any" - ], - [ - "long", - "with" - ], - [ - "average", - "average" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "methods", - "using" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "meaning", - "true", - "true" - ], - [ - "signal", - "signal" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "projection", - "projection" - ], - [ - "image", - "image" - ], - [ - "result", - "resulting" - ], - [ - "considered", - "is" - ], - [ - "represent", - "chosen" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "create", - "lack", - "step", - "work", - "working", - "need" - ], - [ - "step", - "start" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "create", - "created" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "long", - "full" - ], - [ - "order", - "necessary" - ] - ], - "keywords": [] - }, - { - "name": "scil_stats_group_comparison", - "docstring": "Run group comparison statistics on metrics from tractometry\n1) Separate the sample given a particular variable (group_by) into groups\n\n2) Does Shapiro-Wilk test of normality for every sample\nhttps://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test\n\n3) Does Levene or Bartlett (depending on normality) test of variance\nhomogeneity Levene:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm\nBartlett:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm\n\n4) Test the group difference for every measure with the correct test depending\n on the sample (Student, Welch, Mannwhitneyu, ANOVA, Kruskall-Wallis)\nStudent :\nhttps://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test\nWelch :\nhttps://en.wikipedia.org/wiki/Welch%27s_t-test\nMann-Whitney U :\nhttps://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test\nANOVA :\nhttp://www.biostathandbook.com/onewayanova.html\nKruskall-Wallis :\nhttps://en.wikipedia.org/wiki/Kruskal%E2%80%93Wallis_one-way_analysis_of_variance\n\n\n5) If the group difference test is positive and number of group is greater than\n 2, test the group difference two by two.\n\n6) Generate the result for all metrics and bundles\n\nFormerly: scil_group_comparison.py", - "help": "usage: scil_stats_group_comparison.py [-h] [--out_dir OUT_DIR]\n [--out_json OUT_JSON]\n [--bundles BUNDLES [BUNDLES ...]]\n [--metrics METRICS [METRICS ...]]\n [--values VALUES [VALUES ...]]\n [--alpha_error ALPHA_ERROR]\n [--generate_graph] [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n IN_JSON IN_PARTICIPANTS GROUP_BY\n\nRun group comparison statistics on metrics from tractometry\n1) Separate the sample given a particular variable (group_by) into groups\n\n2) Does Shapiro-Wilk test of normality for every sample\nhttps://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test\n\n3) Does Levene or Bartlett (depending on normality) test of variance\nhomogeneity Levene:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm\nBartlett:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm\n\n4) Test the group difference for every measure with the correct test depending\n on the sample (Student, Welch, Mannwhitneyu, ANOVA, Kruskall-Wallis)\nStudent :\nhttps://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test\nWelch :\nhttps://en.wikipedia.org/wiki/Welch%27s_t-test\nMann-Whitney U :\nhttps://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test\nANOVA :\nhttp://www.biostathandbook.com/onewayanova.html\nKruskall-Wallis :\nhttps://en.wikipedia.org/wiki/Kruskal%E2%80%93Wallis_one-way_analysis_of_variance\n\n5) If the group difference test is positive and number of group is greater than\n 2, test the group difference two by two.\n\n6) Generate the result for all metrics and bundles\n\nFormerly: scil_group_comparison.py\n\npositional arguments:\n IN_JSON Input JSON file from tractometry nextflow pipeline or equivalent.\n IN_PARTICIPANTS Input tsv participants file.See doc in https://scilpy.readthedocs.io/en/latest/documentation/construct_participants_tsv_file.html.\n GROUP_BY Variable that will be used to compare group together.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Name of the output folder path. [stats]\n --out_json OUT_JSON The name of the result json output file otherwise it will be printed.\n --bundles BUNDLES [BUNDLES ...], -b BUNDLES [BUNDLES ...]\n Bundle(s) in which you want to do stats. [all]\n --metrics METRICS [METRICS ...], -m METRICS [METRICS ...]\n Metric(s) on which you want to do stats. [all]\n --values VALUES [VALUES ...], --va VALUES [VALUES ...]\n Value(s) on which you want to do stats (mean, std). [all]\n --alpha_error ALPHA_ERROR, -a ALPHA_ERROR\n Type 1 error for all the test. [0.05]\n --generate_graph, --gg\n Generate a simple plot of every metric across groups.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "bundles", - "bundles" - ], - [ - "create", - "generate" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "work", - "and" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "left", - "into" - ], - [ - "working", - "together" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "lack", - "loss", - "result", - "result" - ], - [ - "meaning", - "name" - ], - [ - "positive", - "positive" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "considered", - "knowledge", - "question", - "specific", - "subject", - "unique", - "particular" - ], - [ - "long", - "with" - ], - [ - "contrast", - "comparison" - ], - [ - "difference", - "difference" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "long", - "than" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "variance", - "variance" - ], - [ - "areas", - "across" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "participants", - "participants" - ], - [ - "based", - "group" - ], - [ - "variable", - "variable" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "matter", - "question", - "does" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "long", - "two" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "total", - "80" - ], - [ - "error", - "error" - ], - [ - "bundles", - "bundle" - ], - [ - "greater", - "greater" - ] - ], - "keywords": [] - }, - { - "name": "scil_surface_apply_transform", - "docstring": "Script to apply a transform to a surface (FreeSurfer or VTK supported),\nusing output from ANTs registration tools (i.e. affine.txt, warp.nii.gz).\n\nExample usage from T1 to b0 using ANTs transforms:\n> ConvertTransformFile 3 output0GenericAffine.mat vtk_transfo.txt --hm\n> scil_surface_apply_transform.py lh_white_lps.vtk affine.txt lh_white_b0.vtk\\\n --ants_warp warp.nii.gz\n\nImportant: The input surface needs to be in *T1 world LPS* coordinates\n(aligned over the T1 in MI-Brain).\n\nThe script will use the linear affine first and then the warp image.\nThe resulting surface will be in *b0 world LPS* coordinates\n(aligned over the b0 in MI-Brain).\n\nFormerly: scil_apply_transform_to_surface.py.", - "help": "usage: scil_surface_apply_transform.py [-h] [--ants_warp ANTS_WARP]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface ants_affine out_surface\n\nScript to apply a transform to a surface (FreeSurfer or VTK supported),\nusing output from ANTs registration tools (i.e. affine.txt, warp.nii.gz).\n\nExample usage from T1 to b0 using ANTs transforms:\n> ConvertTransformFile 3 output0GenericAffine.mat vtk_transfo.txt --hm\n> scil_surface_apply_transform.py lh_white_lps.vtk affine.txt lh_white_b0.vtk\\\n --ants_warp warp.nii.gz\n\nImportant: The input surface needs to be in *T1 world LPS* coordinates\n(aligned over the T1 in MI-Brain).\n\nThe script will use the linear affine first and then the warp image.\nThe resulting surface will be in *b0 world LPS* coordinates\n(aligned over the b0 in MI-Brain).\n\nFormerly: scil_apply_transform_to_surface.py.\n\npositional arguments:\n in_surface Input surface (.vtk).\n ants_affine Affine transform from ANTs (.txt or .mat).\n out_surface Output surface (.vtk).\n\noptions:\n -h, --help show this help message and exit\n --ants_warp ANTS_WARP\n Warp image from ANTs (Nifti image).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "long", - "over" - ], - [ - "long", - "a" - ], - [ - "applied", - "apply" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "methods", - "using" - ], - [ - "supported", - "supported" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "result", - "resulting" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "methods", - "tool", - "tools" - ], - [ - "considered", - "key", - "work", - "important" - ] - ], - "keywords": [] - }, - { - "name": "scil_surface_convert", - "docstring": "Script to convert surface formats\n\nSupported formats:\n \".vtk\", \".vtp\", \".ply\", \".stl\", \".xml\", \".obj\"\n and FreeSurfer surfaces\n\n> scil_surface_convert.py surf.vtk converted_surf.ply\n\nFormerly: scil_convert_surface.py", - "help": "usage: scil_surface_convert.py [-h] [--xform XFORM] [--to_lps]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface out_surface\n\nScript to convert surface formats\n\nSupported formats:\n \".vtk\", \".vtp\", \".ply\", \".stl\", \".xml\", \".obj\"\n and FreeSurfer surfaces\n\n> scil_surface_convert.py surf.vtk converted_surf.ply\n\nFormerly: scil_convert_surface.py\n\npositional arguments:\n in_surface Input a surface (FreeSurfer or supported by VTK).\n out_surface Output surface (formats supported by VTK).\n\noptions:\n -h, --help show this help message and exit\n --xform XFORM Path of the copy-paste output from mri_info \n Using: mri_info $input >> log.txt, \n The file log.txt would be this parameter\n --to_lps Flip for Surface/MI-Brain LPS\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", - "synonyms": [ - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "work", - "and" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "parameter", - "parameters", - "parameter" - ], - [ - "order", - "set" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "methods", - "using" - ], - [ - "supported", - "supported" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ] - ], - "keywords": [] - }, - { - "name": "scil_surface_flip", - "docstring": "Script to flip a given surface (FreeSurfer or VTK supported).\n\nCan flip surface coordinates around a single or multiple axes\nCan also be used to reverse the orientation of the surface normals.\n\nFormerly: scil_flip_surface.py", - "help": "usage: scil_surface_flip.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface out_surface {x,y,z,n} [{x,y,z,n} ...]\n\nScript to flip a given surface (FreeSurfer or VTK supported).\n\nCan flip surface coordinates around a single or multiple axes\nCan also be used to reverse the orientation of the surface normals.\n\nFormerly: scil_flip_surface.py\n\npositional arguments:\n in_surface Input surface (.vtk).\n out_surface Output flipped surface (.vtk).\n {x,y,z,n} The axes you want to flip. eg: to flip the x and y axes use: x y. to reverse the surface normals use: n\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", - "synonyms": [ - [ - "orientation", - "orientation" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "methods", - "using" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "supported", - "supported" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ] - ], - "keywords": [] - }, - { - "name": "scil_surface_smooth", - "docstring": "Script to smooth a surface with a Laplacian blur.\n\nFor a standard FreeSurfer white matter mesh a step_size from 0.1 to 10\nis recommended\n\nSmoothing time = step_size * nb_steps\n small amount of smoothing [step_size 1, nb_steps 10]\n moderate amount of smoothing [step_size 10, nb_steps 100]\n large amount of smoothing [step_size 100, nb_steps 1000]\n\nFormerly: scil_smooth_surface.py", - "help": "usage: scil_surface_smooth.py [-h] [-m VTS_MASK] [-n NB_STEPS] [-s STEP_SIZE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface out_surface\n\nScript to smooth a surface with a Laplacian blur.\n\nFor a standard FreeSurfer white matter mesh a step_size from 0.1 to 10\nis recommended\n\nSmoothing time = step_size * nb_steps\n small amount of smoothing [step_size 1, nb_steps 10]\n moderate amount of smoothing [step_size 10, nb_steps 100]\n large amount of smoothing [step_size 100, nb_steps 1000]\n\nFormerly: scil_smooth_surface.py\n\npositional arguments:\n in_surface Input surface (.vtk).\n out_surface Output smoothed surface (.vtk).\n\noptions:\n -h, --help show this help message and exit\n -m VTS_MASK, --vts_mask VTS_MASK\n Vertex mask no smoothing where mask equals 0 (.npy).\n -n NB_STEPS, --nb_steps NB_STEPS\n Number of steps for laplacian smooth [2].\n -s STEP_SIZE, --step_size STEP_SIZE\n Laplacian smooth step size [5.0].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "large", - "larger", - "small" - ], - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "order", - "set" - ], - [ - "step", - "steps" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "large", - "larger", - "large" - ], - [ - "long", - "with" - ], - [ - "long", - "result", - "work", - "working", - "time" - ], - [ - "action", - "process", - "step", - "step" - ], - [ - "methods", - "using" - ], - [ - "increase", - "total", - "amount" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "area", - "work", - "where" - ], - [ - "larger", - "size", - "size" - ], - [ - "total", - "100" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ] - ], - "keywords": [] - }, - { - "name": "scil_tracking_local", - "docstring": "Local streamline HARDI tractography.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nAlgo 'eudx': select the peak from the spherical function (SF) most closely\naligned to the previous direction, and follow an average of it and the previous\ndirection [1].\n\nAlgo 'det': select the orientation corresponding to the maximum of the\nspherical function.\n\nAlgo 'prob': select a direction drawn from the empirical distribution function\ndefined from the SF.\n\nAlgo 'ptt': select the propagation direction using Parallel-Transport\nTractography (PTT) framework, see [2] for more details.\n\nNOTE: eudx can be used with pre-computed peaks from fodf as well as\nevecs_v1.nii.gz from scil_dti_metrics.py (experimental).\n\nNOTE: If tracking with PTT, the step-size should be smaller than usual,\ni.e 0.1-0.2mm or lower. The maximum angle between segments (theta) should\nbe between 10 and 20 degrees.\n\nThe local tracking algorithm can also run on the GPU using the --use_gpu\noption (experimental). By default, GPU tracking behaves the same as\nDIPY. Below is a list of known divergences between the CPU and GPU\nimplementations:\n * Backend: The CPU implementation uses DIPY's LocalTracking and the\n GPU implementation uses an in-house OpenCL implementation.\n * Algo: For the GPU implementation, the only available algorithm is\n Algo 'prob'.\n * SH interpolation: For GPU tracking, SH interpolation can be set to either\n nearest neighbour or trilinear (default). With DIPY, the only available\n method is trilinear.\n * Forward tracking: For GPU tracking, the `--forward_only` flag can be used\n to disable backward tracking. This option isn't available for CPU\n tracking.\n\nAll the input nifti files must be in isotropic resolution.\n\n\nReferences\n----------\n[1]: Garyfallidis, E. (2012). Towards an accurate brain tractography\n[PhD thesis]. University of Cambridge. United Kingdom.\n\n[2]: Aydogan, D. B., & Shi, Y. (2020). Parallel transport tractography.\nIEEE transactions on medical imaging, 40(2), 635-647.\n\nFormerly: scil_compute_local_tracking.py", - "help": "usage: scil_tracking_local.py [-h] [--step STEP_SIZE] [--min_length m]\n [--max_length M] [--theta THETA]\n [--sfthres sf_th]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--npv NPV | --nt NT] [--sh_to_pmf]\n [--algo {det,prob,ptt,eudx}]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--sub_sphere SUB_SPHERE]\n [--probe_length PROBE_LENGTH]\n [--probe_radius PROBE_RADIUS]\n [--probe_quality PROBE_QUALITY]\n [--probe_count PROBE_COUNT]\n [--support_exponent SUPPORT_EXPONENT]\n [--use_gpu] [--sh_interp {trilinear,nearest}]\n [--forward_only] [--batch_size BATCH_SIZE]\n [--compress [COMPRESS_TH]] [-f] [--save_seeds]\n [--seed SEED] [-v [{DEBUG,INFO,WARNING}]]\n in_odf in_seed in_mask out_tractogram\n\nLocal streamline HARDI tractography.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nAlgo 'eudx': select the peak from the spherical function (SF) most closely\naligned to the previous direction, and follow an average of it and the previous\ndirection [1].\n\nAlgo 'det': select the orientation corresponding to the maximum of the\nspherical function.\n\nAlgo 'prob': select a direction drawn from the empirical distribution function\ndefined from the SF.\n\nAlgo 'ptt': select the propagation direction using Parallel-Transport\nTractography (PTT) framework, see [2] for more details.\n\nNOTE: eudx can be used with pre-computed peaks from fodf as well as\nevecs_v1.nii.gz from scil_dti_metrics.py (experimental).\n\nNOTE: If tracking with PTT, the step-size should be smaller than usual,\ni.e 0.1-0.2mm or lower. The maximum angle between segments (theta) should\nbe between 10 and 20 degrees.\n\nThe local tracking algorithm can also run on the GPU using the --use_gpu\noption (experimental). By default, GPU tracking behaves the same as\nDIPY. Below is a list of known divergences between the CPU and GPU\nimplementations:\n * Backend: The CPU implementation uses DIPY's LocalTracking and the\n GPU implementation uses an in-house OpenCL implementation.\n * Algo: For the GPU implementation, the only available algorithm is\n Algo 'prob'.\n * SH interpolation: For GPU tracking, SH interpolation can be set to either\n nearest neighbour or trilinear (default). With DIPY, the only available\n method is trilinear.\n * Forward tracking: For GPU tracking, the `--forward_only` flag can be used\n to disable backward tracking. This option isn't available for CPU\n tracking.\n\nAll the input nifti files must be in isotropic resolution.\n\nReferences\n----------\n[1]: Garyfallidis, E. (2012). Towards an accurate brain tractography\n[PhD thesis]. University of Cambridge. United Kingdom.\n\n[2]: Aydogan, D. B., & Shi, Y. (2020). Parallel transport tractography.\nIEEE transactions on medical imaging, 40(2), 635-647.\n\nFormerly: scil_compute_local_tracking.py\n\npositional arguments:\n in_odf File containing the orientation diffusion function \n as spherical harmonics file (.nii.gz). Ex: ODF or fODF.\n in_seed Seeding mask (.nii.gz).\n in_mask Tracking mask (.nii.gz).\n Tracking will stop outside this mask. The last point of each \n streamline (triggering the stopping criteria) IS added to the streamline.\n out_tractogram Tractogram output file (must be .trk or .tck).\n\noptions:\n -h, --help show this help message and exit\n\nTracking options:\n --step STEP_SIZE Step size in mm. [0.5]\n --min_length m Minimum length of a streamline in mm. [10.0]\n --max_length M Maximum length of a streamline in mm. [300.0]\n --theta THETA Maximum angle between 2 steps. If the angle is too big, streamline is \n stopped and the following point is NOT included.\n [\"eudx\"=60, \"det\"=45, \"prob\"=20, \"ptt\"=20]\n --sfthres sf_th Spherical function relative threshold. [0.1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --sh_to_pmf If set, map sherical harmonics to spherical function (pmf) before \n tracking (faster, requires more memory)\n --algo {det,prob,ptt,eudx}\n Algorithm to use. [prob]\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Dipy sphere; set of possible directions.\n Default: [repulsion724]\n --sub_sphere SUB_SPHERE\n Subdivides each face of the sphere into 4^s new faces. [0]\n\nSeeding options:\n When no option is provided, uses --npv 1.\n\n --npv NPV Number of seeds per voxel.\n --nt NT Total number of seeds to use.\n\nPTT options:\n --probe_length PROBE_LENGTH\n The length of the probes. Smaller value\n yields more dispersed fibers. [1.0]\n --probe_radius PROBE_RADIUS\n The radius of the probe. A large probe_radius\n helps mitigate noise in the pmf but it might\n make it harder to sample thin and intricate\n connections, also the boundary of fiber\n bundles might be eroded. [0]\n --probe_quality PROBE_QUALITY\n The quality of the probe. This parameter sets\n the number of segments to split the cylinder\n along the length of the probe (minimum=2) [3]\n --probe_count PROBE_COUNT\n The number of probes. This parameter sets the\n number of parallel lines used to model the\n cylinder (minimum=1). [1]\n --support_exponent SUPPORT_EXPONENT\n Data support exponent, used for rejection\n sampling. [3]\n\nGPU options:\n --use_gpu Enable GPU tracking (experimental).\n --sh_interp {trilinear,nearest}\n SH image interpolation method. [trilinear]\n --forward_only Perform forward tracking only.\n --batch_size BATCH_SIZE\n Approximate size of GPU batches (number\n of streamlines to track in parallel). [10000]\n\nOutput options:\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.\n A rule of thumb is to set it to 0.1mm for deterministic \n streamlines and to 0.2mm for probabilitic streamlines.[0.1]\n -f Force overwriting of the output files.\n --save_seeds If set, save the seeds used for the tracking \n in the data_per_streamline property.\n Hint: you can then use scil_tractogram_seed_density_map.\n --seed SEED Random number generator seed.\n\nLogging options:\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "methods", - "method" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "long", - "work", - "more" - ], - [ - "fibre", - "fiber" - ], - [ - "order", - "set" - ], - [ - "direction", - "directions" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "create", - "future", - "step", - "work", - "make" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "diffusion", - "diffusion" - ], - [ - "threshold", - "threshold" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "experiment", - "experimental" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "long", - "than" - ], - [ - "imaging", - "imaging" - ], - [ - "step", - "follow" - ], - [ - "step", - "forward" - ], - [ - "represent", - "chosen" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "maps", - "map" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "question", - "might" - ], - [ - "memory", - "memory" - ], - [ - "average", - "per" - ], - [ - "orientation", - "orientation" - ], - [ - "work", - "also" - ], - [ - "considered", - "known" - ], - [ - "result", - "following" - ], - [ - "difference", - "point" - ], - [ - "left", - "from" - ], - [ - "thinking", - "you" - ], - [ - "step", - "steps" - ], - [ - "long", - "a" - ], - [ - "total", - "40" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "future", - "will" - ], - [ - "random", - "random" - ], - [ - "held", - "last" - ], - [ - "average", - "average" - ], - [ - "view", - "see" - ], - [ - "result", - "resulting" - ], - [ - "order", - "rule" - ], - [ - "large", - "big" - ], - [ - "connect", - "connected", - "connection", - "connections", - "connections" - ], - [ - "matter", - "question", - "does" - ], - [ - "streamline", - "streamline" - ], - [ - "studies", - "university" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "lack", - "quality" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "data", - "tracking", - "tracking" - ], - [ - "considered", - "highly", - "long", - "work", - "most" - ], - [ - "left", - "into" - ], - [ - "papers", - "paper" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "thinking", - "i" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "direction", - "towards" - ], - [ - "defined", - "function", - "functional", - "functions", - "function" - ], - [ - "methods", - "using" - ], - [ - "area", - "main", - "along" - ], - [ - "defined", - "function", - "defined" - ], - [ - "degree", - "degrees" - ], - [ - "left", - "before" - ], - [ - "individual", - "each" - ], - [ - "higher", - "lower" - ], - [ - "total", - "number" - ], - [ - "bundles", - "bundles" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "long", - "too" - ], - [ - "seeding", - "seeding" - ], - [ - "parameter", - "parameters", - "parameter" - ], - [ - "methods", - "use" - ], - [ - "direction", - "direction" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "large", - "larger", - "large" - ], - [ - "probabilistic", - "deterministic" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "action", - "process", - "step", - "step" - ], - [ - "algorithm", - "algorithm" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "reliable", - "accurate" - ], - [ - "fibres", - "fibers" - ], - [ - "larger", - "size", - "size" - ], - [ - "total", - "60" - ], - [ - "considered", - "experience", - "large", - "long", - "result", - "variety", - "work", - "working", - "well" - ], - [ - "large", - "larger", - "smaller" - ], - [ - "increase", - "total", - "total" - ] - ], - "keywords": [] - }, - { - "name": "scil_tracking_local_dev", - "docstring": "Local streamline HARDI tractography using scilpy-only methods -- no dipy (i.e\nno cython). The goal of this is to have a python-only version that can be\nmodified more easily by our team when testing new algorithms and parameters,\nand that can be used as parent classes in sub-projects of our lab such as in\ndwi_ml.\n\nWARNING. MUCH SLOWER THAN scil_tracking_local.py. We recommand using multi-\nprocessing with option --nb_processes.\n\nSimilar to scil_tracking_local:\n The tracking direction is chosen in the aperture cone defined by the\n previous tracking direction and the angular constraint.\n - Algo 'det': the maxima of the spherical function (SF) the most closely\n aligned to the previous direction.\n - Algo 'prob': a direction drawn from the empirical distribution function\n defined from the SF.\n\nContrary to scil_tracking_local:\n - Algo 'eudx' is not yet available!\n - Input nifti files do not necessarily need to be in isotropic resolution.\n - The script works with asymmetric input ODF.\n - The interpolation for the tracking mask and spherical function can be\n one of 'nearest' or 'trilinear'.\n - Runge-Kutta integration is supported for the step function.\n\nA few notes on Runge-Kutta integration.\n 1. Runge-Kutta integration is used to approximate the next tracking\n direction by estimating directions from future tracking steps. This\n works well for deterministic tracking. However, in the context of\n probabilistic tracking, the next tracking directions cannot be estimated\n in advance, because they are picked randomly from a distribution. It is\n therefore recommanded to keep the rk_order to 1 for probabilistic\n tracking.\n 2. As a rule of thumb, doubling the rk_order will double the computation\n time in the worst case.\n\nReferences: [1] Girard, G., Whittingstall K., Deriche, R., and\n Descoteaux, M. (2014). Towards quantitative connectivity analysis:\n reducing tractography biases. Neuroimage, 98, 266-278.\n\nFormerly: scil_compute_local_tracking_dev.py", - "help": "usage: scil_tracking_local_dev.py [-h] [--step STEP_SIZE] [--min_length m]\n [--max_length M] [--theta THETA]\n [--sfthres sf_th]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--npv NPV | --nt NT] [--algo {det,prob}]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--sub_sphere SUB_SPHERE]\n [--sfthres_init sf_th] [--rk_order K]\n [--max_invalid_nb_points MAX]\n [--forward_only]\n [--sh_interp {nearest,trilinear}]\n [--mask_interp {nearest,trilinear}]\n [--keep_last_out_point]\n [--n_repeats_per_seed N_REPEATS_PER_SEED]\n [--rng_seed RNG_SEED] [--skip SKIP]\n [--processes NBR] [--compress [COMPRESS_TH]]\n [-f] [--save_seeds]\n [-v [{DEBUG,INFO,WARNING}]]\n in_odf in_seed in_mask out_tractogram\n\nLocal streamline HARDI tractography using scilpy-only methods -- no dipy (i.e\nno cython). The goal of this is to have a python-only version that can be\nmodified more easily by our team when testing new algorithms and parameters,\nand that can be used as parent classes in sub-projects of our lab such as in\ndwi_ml.\n\nWARNING. MUCH SLOWER THAN scil_tracking_local.py. We recommand using multi-\nprocessing with option --nb_processes.\n\nSimilar to scil_tracking_local:\n The tracking direction is chosen in the aperture cone defined by the\n previous tracking direction and the angular constraint.\n - Algo 'det': the maxima of the spherical function (SF) the most closely\n aligned to the previous direction.\n - Algo 'prob': a direction drawn from the empirical distribution function\n defined from the SF.\n\nContrary to scil_tracking_local:\n - Algo 'eudx' is not yet available!\n - Input nifti files do not necessarily need to be in isotropic resolution.\n - The script works with asymmetric input ODF.\n - The interpolation for the tracking mask and spherical function can be\n one of 'nearest' or 'trilinear'.\n - Runge-Kutta integration is supported for the step function.\n\nA few notes on Runge-Kutta integration.\n 1. Runge-Kutta integration is used to approximate the next tracking\n direction by estimating directions from future tracking steps. This\n works well for deterministic tracking. However, in the context of\n probabilistic tracking, the next tracking directions cannot be estimated\n in advance, because they are picked randomly from a distribution. It is\n therefore recommanded to keep the rk_order to 1 for probabilistic\n tracking.\n 2. As a rule of thumb, doubling the rk_order will double the computation\n time in the worst case.\n\nReferences: [1] Girard, G., Whittingstall K., Deriche, R., and\n Descoteaux, M. (2014). Towards quantitative connectivity analysis:\n reducing tractography biases. Neuroimage, 98, 266-278.\n\nFormerly: scil_compute_local_tracking_dev.py\n\npositional arguments:\n in_odf File containing the orientation diffusion function \n as spherical harmonics file (.nii.gz). Ex: ODF or fODF.\n in_seed Seeding mask (.nii.gz).\n in_mask Tracking mask (.nii.gz).\n Tracking will stop outside this mask. The last point of each \n streamline (triggering the stopping criteria) IS added to the streamline.\n out_tractogram Tractogram output file (must be .trk or .tck).\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nTracking options:\n --step STEP_SIZE Step size in mm. [0.5]\n --min_length m Minimum length of a streamline in mm. [10.0]\n --max_length M Maximum length of a streamline in mm. [300.0]\n --theta THETA Maximum angle between 2 steps. If the angle is too big, streamline is \n stopped and the following point is NOT included.\n [\"eudx\"=60, \"det\"=45, \"prob\"=20, \"ptt\"=20]\n --sfthres sf_th Spherical function relative threshold. [0.1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --algo {det,prob} Algorithm to use. [prob]\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Dipy sphere; set of possible directions.\n Default: [repulsion724]\n --sub_sphere SUB_SPHERE\n Subdivides each face of the sphere into 4^s new faces. [0]\n --sfthres_init sf_th Spherical function relative threshold value for the \n initial direction. [0.5]\n --rk_order K The order of the Runge-Kutta integration used for the step function.\n For more information, refer to the note in the script description. [1]\n --max_invalid_nb_points MAX\n Maximum number of steps without valid direction, \n ex: if threshold on ODF or max angles are reached.\n Default: 0, i.e. do not add points following an invalid direction.\n --forward_only If set, tracks in one direction only (forward) given the \n initial seed. The direction is randomly drawn from the ODF.\n --sh_interp {nearest,trilinear}\n Spherical harmonic interpolation: nearest-neighbor \n or trilinear. [trilinear]\n --mask_interp {nearest,trilinear}\n Mask interpolation: nearest-neighbor or trilinear. [nearest]\n --keep_last_out_point\n If set, keep the last point (once out of the tracking mask) of \n the streamline. Default: discard them. This is the default in \n Dipy too. Note that points obtained after an invalid direction \n (ex when angle is too sharp or sh_threshold not reached) are \n never added.\n --n_repeats_per_seed N_REPEATS_PER_SEED\n By default, each seed position is used only once. This option\n allows for tracking from the exact same seed n_repeats_per_seed\n times. [1]\n\nSeeding options:\n When no option is provided, uses --npv 1.\n\n --npv NPV Number of seeds per voxel.\n --nt NT Total number of seeds to use.\n\nRandom seeding options:\n --rng_seed RNG_SEED Initial value for the random number generator. [0]\n --skip SKIP Skip the first N random number. \n Useful if you want to create new streamlines to add to \n a previously created tractogram with a fixed --rng_seed.\n Ex: If tractogram_1 was created with -nt 1,000,000, \n you can create tractogram_2 with \n --skip 1,000,000.\n\nMemory options:\n --processes NBR Number of sub-processes to start. \n Default: [1]\n\nOutput options:\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.\n A rule of thumb is to set it to 0.1mm for deterministic \n streamlines and to 0.2mm for probabilitic streamlines.[0.1]\n -f Force overwriting of the output files.\n --save_seeds If set, save the seeds used for the tracking \n in the data_per_streamline property.\n Hint: you can then use scil_tractogram_seed_density_map.\n", - "synonyms": [ - [ - "long", - "work", - "more" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "invalid", - "valid", - "invalid" - ], - [ - "order", - "set" - ], - [ - "long", - "have" - ], - [ - "processing", - "processing" - ], - [ - "direction", - "directions" - ], - [ - "increase", - "limiting", - "reducing" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "diffusion", - "diffusion" - ], - [ - "threshold", - "threshold" - ], - [ - "long", - "with" - ], - [ - "application", - "allows" - ], - [ - "future", - "our" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "contrast", - "form", - "forms", - "larger", - "result", - "specific", - "variety", - "similar" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "long", - "than" - ], - [ - "step", - "forward" - ], - [ - "represent", - "chosen" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "create", - "created" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "large", - "long", - "few" - ], - [ - "exist", - "cannot" - ], - [ - "memory", - "memory" - ], - [ - "average", - "per" - ], - [ - "orientation", - "orientation" - ], - [ - "result", - "following" - ], - [ - "considered", - "result", - "therefore" - ], - [ - "difference", - "point" - ], - [ - "left", - "from" - ], - [ - "thinking", - "you" - ], - [ - "step", - "steps" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "future", - "will" - ], - [ - "random", - "random" - ], - [ - "create", - "create" - ], - [ - "held", - "last" - ], - [ - "clear", - "considered", - "future", - "lack", - "long", - "matter", - "question", - "result", - "work", - "because" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "result", - "resulting" - ], - [ - "order", - "rule" - ], - [ - "left", - "long", - "work", - "once" - ], - [ - "considered", - "subsequently", - "was" - ], - [ - "large", - "big" - ], - [ - "analysis", - "methodology", - "methods", - "processes", - "methods" - ], - [ - "streamline", - "streamline" - ], - [ - "connectivity", - "connectivity" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "future", - "held", - "step", - "next" - ], - [ - "data", - "tracking", - "tracking" - ], - [ - "considered", - "specific", - "variety", - "such" - ], - [ - "algorithm", - "algorithms" - ], - [ - "considered", - "highly", - "long", - "work", - "most" - ], - [ - "considered", - "are" - ], - [ - "left", - "into" - ], - [ - "papers", - "paper" - ], - [ - "attention", - "experience", - "long", - "result", - "work", - "much" - ], - [ - "total", - "estimated" - ], - [ - "meaning", - "refer" - ], - [ - "tool", - "useful" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "thinking", - "i" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "project", - "projects" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "direction", - "towards" - ], - [ - "defined", - "function", - "functional", - "functions", - "function" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "methods", - "using" - ], - [ - "left", - "after" - ], - [ - "defined", - "function", - "defined" - ], - [ - "exist", - "necessarily" - ], - [ - "individual", - "each" - ], - [ - "create", - "lack", - "step", - "work", - "working", - "need" - ], - [ - "step", - "start" - ], - [ - "matter", - "question", - "case" - ], - [ - "clear", - "considered", - "result", - "however" - ], - [ - "total", - "number" - ], - [ - "subsequently", - "previously" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "order", - "order" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "long", - "too" - ], - [ - "seeding", - "seeding" - ], - [ - "methods", - "use" - ], - [ - "direction", - "direction" - ], - [ - "invalid", - "valid", - "valid" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "clear", - "left", - "out" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "probabilistic", - "deterministic" - ], - [ - "long", - "result", - "work", - "working", - "time" - ], - [ - "action", - "process", - "step", - "step" - ], - [ - "algorithm", - "algorithm" - ], - [ - "streamlines", - "streamlines" - ], - [ - "work", - "works" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "supported", - "supported" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "future", - "possibility", - "potential", - "future" - ], - [ - "considered", - "is" - ], - [ - "larger", - "size", - "size" - ], - [ - "total", - "60" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "step", - "true", - "work", - "yet" - ], - [ - "considered", - "experience", - "large", - "long", - "result", - "variety", - "work", - "working", - "well" - ], - [ - "increase", - "total", - "total" - ], - [ - "probabilistic", - "probabilistic" - ] - ], - "keywords": [] - }, - { - "name": "scil_tracking_pft", - "docstring": "Local streamline HARDI tractography including Particle Filtering tracking.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nThe tracking is done inside partial volume estimation maps and uses the\nparticle filtering tractography (PFT) algorithm. See\nscil_tracking_pft_maps.py to generate PFT required maps.\n\nStreamlines longer than min_length and shorter than max_length are kept.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\nDefault parameters as suggested in [1].\n\nAlgo 'det': the maxima of the spherical function (SF) the most closely aligned\nto the previous direction.\nAlgo 'prob': a direction drawn from the empirical distribution function defined\nfrom the SF.\n\nFor streamline compression, a rule of thumb is to set it to 0.1mm for the\ndeterministic algorithm and 0.2mm for probabilitic algorithm.\n\nAll the input nifti files must be in isotropic resolution.\n\nFormerly: scil_compute_pft.py", - "help": "usage: scil_tracking_pft.py [-h] [--algo {det,prob}] [--step STEP_SIZE]\n [--min_length MIN_LENGTH]\n [--max_length MAX_LENGTH] [--theta THETA] [--act]\n [--sfthres SF_THRESHOLD]\n [--sfthres_init SF_THRESHOLD_INIT]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--npv NPV | --nt NT] [--particles PARTICLES]\n [--back BACK_TRACKING]\n [--forward FORWARD_TRACKING] [--all] [--seed SEED]\n [-f] [--save_seeds] [--compress [COMPRESS_TH]]\n [-v [{DEBUG,INFO,WARNING}]]\n in_sh in_seed in_map_include map_exclude_file\n out_tractogram\n\nLocal streamline HARDI tractography including Particle Filtering tracking.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nThe tracking is done inside partial volume estimation maps and uses the\nparticle filtering tractography (PFT) algorithm. See\nscil_tracking_pft_maps.py to generate PFT required maps.\n\nStreamlines longer than min_length and shorter than max_length are kept.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\nDefault parameters as suggested in [1].\n\nAlgo 'det': the maxima of the spherical function (SF) the most closely aligned\nto the previous direction.\nAlgo 'prob': a direction drawn from the empirical distribution function defined\nfrom the SF.\n\nFor streamline compression, a rule of thumb is to set it to 0.1mm for the\ndeterministic algorithm and 0.2mm for probabilitic algorithm.\n\nAll the input nifti files must be in isotropic resolution.\n\nFormerly: scil_compute_pft.py\n\npositional arguments:\n in_sh Spherical harmonic file (.nii.gz).\n in_seed Seeding mask (.nii.gz).\n in_map_include The probability map (.nii.gz) of ending the\n streamline and including it in the output (CMC, PFT [1])\n map_exclude_file The probability map (.nii.gz) of ending the\n streamline and excluding it in the output (CMC, PFT [1]).\n out_tractogram Tractogram output file (must be .trk or .tck).\n\nGeneric options:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nTracking options:\n --algo {det,prob} Algorithm to use (must be \"det\" or \"prob\"). [prob]\n --step STEP_SIZE Step size in mm. [0.2]\n --min_length MIN_LENGTH\n Minimum length of a streamline in mm. [10.0]\n --max_length MAX_LENGTH\n Maximum length of a streamline in mm. [300.0]\n --theta THETA Maximum angle between 2 steps. [\"det\"=45, \"prob\"=20]\n --act If set, uses anatomically-constrained tractography (ACT) \n instead of continuous map criterion (CMC).\n --sfthres SF_THRESHOLD\n Spherical function relative threshold. [0.1]\n --sfthres_init SF_THRESHOLD_INIT\n Spherical function relative threshold value for the \n initial direction. [0.5]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n\nSeeding options:\n When no option is provided, uses --npv 1.\n\n --npv NPV Number of seeds per voxel.\n --nt NT Total number of seeds to use.\n\nPFT options:\n --particles PARTICLES\n Number of particles to use for PFT. [15]\n --back BACK_TRACKING Length of PFT back tracking (mm). [2.0]\n --forward FORWARD_TRACKING\n Length of PFT forward tracking (mm). [1.0]\n\nOutput options:\n --all If set, keeps \"excluded\" streamlines.\n NOT RECOMMENDED, except for debugging.\n --seed SEED Random number generator seed.\n -f Force overwriting of the output files.\n --save_seeds If set, save the seeds used for the tracking \n in the data_per_streamline property.\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.[0.1]\n\nReferences: [1] Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M. (2014). Towards quantitative connectivity analysis: reducing tractography biases. Neuroimage, 98, 266-278.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "considered", - "highly", - "long", - "work", - "most" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "create", - "generate" - ], - [ - "order", - "required" - ], - [ - "average", - "per" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "probability", - "probability" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "seeding", - "seeding" - ], - [ - "methods", - "use" - ], - [ - "direction", - "direction" - ], - [ - "order", - "set" - ], - [ - "step", - "steps" - ], - [ - "long", - "a" - ], - [ - "increase", - "limiting", - "reducing" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "left", - "back" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "random", - "random" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "direction", - "towards" - ], - [ - "defined", - "function", - "functional", - "functions", - "function" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "probabilistic", - "deterministic" - ], - [ - "action", - "process", - "step", - "step" - ], - [ - "algorithm", - "algorithm" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "order", - "work", - "instead" - ], - [ - "possibility", - "proposed", - "suggested" - ], - [ - "defined", - "function", - "defined" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "result", - "resulting" - ], - [ - "considered", - "is" - ], - [ - "order", - "rule" - ], - [ - "step", - "forward" - ], - [ - "represent", - "chosen" - ], - [ - "anatomically", - "anatomically" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "larger", - "size", - "size" - ], - [ - "matter", - "question", - "does" - ], - [ - "streamline", - "streamline" - ], - [ - "work", - "working", - "done" - ], - [ - "connectivity", - "connectivity" - ], - [ - "long", - "longer" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "maps", - "maps" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "maps", - "map" - ], - [ - "data", - "tracking", - "tracking" - ], - [ - "increase", - "total", - "total" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_tracking_pft_maps", - "docstring": "Compute include and exclude maps, and the seeding interface mask from partial\nvolume estimation (PVE) maps. Maps should have values in [0,1], gm+wm+csf=1 in\nall voxels of the brain, gm+wm+csf=0 elsewhere.\n\nReferences: Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M.\n(2014). Towards quantitative connectivity analysis: reducing tractography\nbiases. Neuroimage.\n\nFormerly: scil_compute_maps_for_particle_filter_tracking.py", - "help": "usage: scil_tracking_pft_maps.py [-h] [--include filename]\n [--exclude filename] [--interface filename]\n [-t THRESHOLD] [-f]\n [-v [{DEBUG,INFO,WARNING}]]\n in_wm in_gm in_csf\n\nCompute include and exclude maps, and the seeding interface mask from partial\nvolume estimation (PVE) maps. Maps should have values in [0,1], gm+wm+csf=1 in\nall voxels of the brain, gm+wm+csf=0 elsewhere.\n\nReferences: Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M.\n(2014). Towards quantitative connectivity analysis: reducing tractography\nbiases. Neuroimage.\n\nFormerly: scil_compute_maps_for_particle_filter_tracking.py\n\npositional arguments:\n in_wm White matter PVE map (nifti). From normal FAST output, has a PVE_2 name suffix.\n in_gm Grey matter PVE map (nifti). From normal FAST output, has a PVE_1 name suffix.\n in_csf Cerebrospinal fluid PVE map (nifti). From normal FAST output, has a PVE_0 name suffix.\n\noptions:\n -h, --help show this help message and exit\n --include filename Output include map (nifti). [map_include.nii.gz]\n --exclude filename Output exclude map (nifti). [map_exclude.nii.gz]\n --interface filename Output interface seeding mask (nifti). [interface.nii.gz]\n -t THRESHOLD Minimum gm and wm PVE values in a voxel to be into the interface. [0.1]\n -f Force overwriting of the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "volume", - "volumes", - "volume" - ], - [ - "work", - "and" - ], - [ - "left", - "into" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "seeding", - "seeding" - ], - [ - "meaning", - "name" - ], - [ - "long", - "have" - ], - [ - "long", - "a" - ], - [ - "increase", - "limiting", - "reducing" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "direction", - "towards" - ], - [ - "variety", - "include" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "methods", - "using" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "grey", - "grey" - ], - [ - "connectivity", - "connectivity" - ], - [ - "work", - "all" - ], - [ - "maps", - "maps" - ], - [ - "maps", - "map" - ] - ], - "keywords": [] - }, - { - "name": "scil_tracking_pft_maps_edit", - "docstring": "Modify PFT maps to allow PFT tracking in given mask (e.g edema).\n\nFormerly: scil_add_tracking_mask_to_pft_maps.py.", - "help": "usage: scil_tracking_pft_maps_edit.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n map_include map_exclude additional_mask\n map_include_corr map_exclude_corr\n\nModify PFT maps to allow PFT tracking in given mask (e.g edema).\n\nFormerly: scil_add_tracking_mask_to_pft_maps.py.\n\npositional arguments:\n map_include PFT map include.\n map_exclude PFT map exclude.\n additional_mask Allow PFT tracking in this mask.\n map_include_corr Corrected PFT map include output file name.\n map_exclude_corr Corrected PFT map exclude output file name.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "allow" - ], - [ - "work", - "and" - ], - [ - "held", - "on" - ], - [ - "methods", - "using" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "maps", - "maps" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "maps", - "map" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "left", - "result", - "when" - ], - [ - "data", - "tracking", - "tracking" - ], - [ - "held", - "in" - ], - [ - "considered", - "is" - ], - [ - "variety", - "include" - ], - [ - "meaning", - "name" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_apply_transform", - "docstring": "Transform a tractogram using an affine/rigid transformation and nonlinear\ndeformation (optional).\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nApplying transformation to a tractogram can lead to invalid streamlines (out of\nthe bounding box), and thus three strategies are available:\n1) Do nothing, may crash at saving if invalid streamlines are present.\n [This is the default]\n2) --keep_invalid, save invalid streamlines. Leave it to the user to run\n scil_tractogram_remove_invalid.py if needed.\n3) --remove_invalid, automatically remove invalid streamlines before saving.\n Should not remove more than a few streamlines. Typically, the streamlines\n that are rejected are the ones reaching the limits of the brain, ex, near\n the pons.\n4) --cut_invalid, automatically cut invalid streamlines before saving, i.e. the\n streamlines are kept but the points out of the bounding box are cut.\n\nExample:\nTo apply a transformation from ANTs to a tractogram, if the ANTs command was\nMOVING->REFERENCE...\n1) To apply the original transformation:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --inverse\n --in_deformation 1InverseWarp.nii.gz\n\n2) To apply the inverse transformation, i.e. REFERENCE->MOVING:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --in_deformation 1Warp.nii.gz\n --reverse_operation\n\nFormerly: scil_apply_transform_to_tractogram.py", - "help": "usage: scil_tractogram_apply_transform.py [-h] [--inverse]\n [--in_deformation file]\n [--reverse_operation]\n [--cut_invalid | --remove_invalid | --keep_invalid]\n [--no_empty] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_moving_tractogram in_target_file\n in_transfo out_tractogram\n\nTransform a tractogram using an affine/rigid transformation and nonlinear\ndeformation (optional).\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nApplying transformation to a tractogram can lead to invalid streamlines (out of\nthe bounding box), and thus three strategies are available:\n1) Do nothing, may crash at saving if invalid streamlines are present.\n [This is the default]\n2) --keep_invalid, save invalid streamlines. Leave it to the user to run\n scil_tractogram_remove_invalid.py if needed.\n3) --remove_invalid, automatically remove invalid streamlines before saving.\n Should not remove more than a few streamlines. Typically, the streamlines\n that are rejected are the ones reaching the limits of the brain, ex, near\n the pons.\n4) --cut_invalid, automatically cut invalid streamlines before saving, i.e. the\n streamlines are kept but the points out of the bounding box are cut.\n\nExample:\nTo apply a transformation from ANTs to a tractogram, if the ANTs command was\nMOVING->REFERENCE...\n1) To apply the original transformation:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --inverse\n --in_deformation 1InverseWarp.nii.gz\n\n2) To apply the inverse transformation, i.e. REFERENCE->MOVING:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --in_deformation 1Warp.nii.gz\n --reverse_operation\n\nFormerly: scil_apply_transform_to_tractogram.py\n\npositional arguments:\n in_moving_tractogram Path of the tractogram to be transformed.\n Bounding box validity will not be checked (could \n contain invalid streamlines).\n in_target_file Path of the reference target file (trk or nii).\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_tractogram Output tractogram filename (transformed data).\n\noptions:\n -h, --help show this help message and exit\n --no_empty Do not write file if there is no streamline.\n You may save an empty file if you use remove_invalid.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nTransformation options:\n --inverse Apply the inverse linear transformation.\n --in_deformation file\n Path to the file containing a deformation field.\n --reverse_operation Apply the transformation in reverse (see doc), warp\n first, then linear.\n\nManagement of invalid streamlines:\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_invalid Remove the streamlines landing out of the bounding box.\n --keep_invalid Keep the streamlines landing out of the bounding box.\n", - "synonyms": [ - [ - "application", - "database", - "user" - ], - [ - "anatomy", - "anatomy" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "long", - "work", - "more" - ], - [ - "brain", - "brain" - ], - [ - "limiting", - "limits" - ], - [ - "proposed", - "rejected" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "invalid", - "valid", - "invalid" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "clear", - "considered", - "create", - "form", - "manner", - "matter", - "result", - "subject", - "thinking", - "true", - "view", - "work", - "rather" - ], - [ - "methods", - "use" - ], - [ - "area", - "near" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "clear", - "left", - "out" - ], - [ - "held", - "on" - ], - [ - "applied", - "apply" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "matter", - "question", - "true", - "nothing" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "matter", - "question", - "thinking", - "true", - "work", - "working", - "how" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "left", - "before" - ], - [ - "matrices", - "matrix" - ], - [ - "step", - "follow" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "result", - "may" - ], - [ - "considered", - "is" - ], - [ - "considered", - "subsequently", - "was" - ], - [ - "applied", - "applying" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "streamline", - "streamline" - ], - [ - "considered", - "form", - "meaning", - "order", - "result", - "thus" - ], - [ - "clear", - "long", - "question", - "result", - "work", - "there" - ], - [ - "connection", - "connections", - "link" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "left", - "leave" - ], - [ - "clear", - "result", - "work", - "could" - ], - [ - "assigned", - "command" - ], - [ - "large", - "long", - "few" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_apply_transform_to_hdf5", - "docstring": "Transform tractogram(s) contained in the hdf5 output from a connectivity\nscript, using an affine/rigid transformation and nonlinear deformation\n(optional).\n\nSee scil_tractogram_apply_transform.py to apply directly to a tractogram.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nOr use >> scil_tractogram_apply_transform.py --help\n\nFormerly: scil_apply_transform_to_hdf5.py", - "help": "usage: scil_tractogram_apply_transform_to_hdf5.py [-h] [--inverse]\n [--in_deformation file]\n [--reverse_operation]\n [--cut_invalid | --remove_invalid | --keep_invalid]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_hdf5 in_target_file\n in_transfo out_hdf5\n\nTransform tractogram(s) contained in the hdf5 output from a connectivity\nscript, using an affine/rigid transformation and nonlinear deformation\n(optional).\n\nSee scil_tractogram_apply_transform.py to apply directly to a tractogram.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nOr use >> scil_tractogram_apply_transform.py --help\n\nFormerly: scil_apply_transform_to_hdf5.py\n\npositional arguments:\n in_hdf5 Path of the hdf5 containing the moving tractogram, to be transformed. (.h5 extension).\n in_target_file Path of the reference target file (.trk or .nii).\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_hdf5 Output tractogram filename (transformed data).\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nTransformation options:\n --inverse Apply the inverse linear transformation.\n --in_deformation file\n Path to the file containing a deformation field.\n --reverse_operation Apply the transformation in reverse (see doc), warp\n first, then linear.\n\nManagement of invalid streamlines:\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_invalid Remove the streamlines landing out of the bounding box.\n --keep_invalid Keep the streamlines landing out of the bounding box.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "long", - "work", - "more" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "invalid", - "valid", - "invalid" - ], - [ - "clear", - "considered", - "create", - "form", - "manner", - "matter", - "result", - "subject", - "thinking", - "true", - "view", - "work", - "rather" - ], - [ - "methods", - "use" - ], - [ - "long", - "a" - ], - [ - "clear", - "left", - "out" - ], - [ - "held", - "on" - ], - [ - "applied", - "apply" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "matter", - "question", - "thinking", - "true", - "work", - "working", - "how" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "matrices", - "matrix" - ], - [ - "step", - "follow" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "connectivity", - "connectivity" - ], - [ - "connection", - "connections", - "link" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_assign_custom_color", - "docstring": "The script uses scalars from an anatomy, data_per_point or data_per_streamline\n(e.g. commit_weights) to visualize them on the streamlines.\nSaves the RGB values in the data_per_point 'color' with 3 values per point:\n(color_x, color_y, color_z).\n\nIf called with .tck, the output will always be .trk, because data_per_point has\nno equivalent in tck file.\n\nIf used with a visualization software like MI-Brain\n(https://github.com/imeka/mi-brain), the 'color' dps is applied by default at\nloading time.\n\nCOLORING METHOD\nThis script maps the raw values from these sources to RGB using a colormap.\n --use_dpp: The data from each point is converted to a color.\n --use_dps: The same color is applied to all points of the streamline.\n --from_anatomy: The voxel's color is used for the points of the streamlines\n crossing it. See also scil_tractogram_project_map_to_streamlines.py. You\n can have more options to project maps to dpp, and then use --use_dpp here.\n --along_profile: The data used here is each point's position in the\n streamline. To have nice results, you should first uniformize head/tail.\n See scil_tractogram_uniformize_endpoints.py.\n --local_angle.\n\nCOLORING OPTIONS\nA minimum and a maximum range can be provided to clip values. If the range of\nvalues is too large for intuitive visualization, a log transform can be\napplied.\n\nIf the data provided from --use_dps, --use_dpp and --from_anatomy are integer\nlabels, they can be mapped using a LookUp Table (--LUT).\nThe file provided as a LUT should be either .txt or .npy and if the size is\nN=20, then the data provided should be between 1-20.\n\nA custom colormap can be provided using --colormap. It should be a string\ncontaining a colormap name OR multiple Matplotlib named colors separated by -.\nThe colormap used for mapping values to colors can be saved to a png/jpg image\nusing the --out_colorbar option.\n\nSee also: scil_tractogram_assign_uniform_color.py, for simplified options.\n\nFormerly: scil_assign_custom_color_to_tractogram.py", - "help": "", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "true", - "always" - ], - [ - "methods", - "method" - ], - [ - "average", - "per" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "applied", - "applied" - ], - [ - "considered", - "are" - ], - [ - "long", - "work", - "more" - ], - [ - "difference", - "point" - ], - [ - "brain", - "brain" - ], - [ - "left", - "from" - ], - [ - "clear", - "long", - "too" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "long", - "have" - ], - [ - "thinking", - "you" - ], - [ - "maps", - "mapping" - ], - [ - "long", - "a" - ], - [ - "application", - "systems", - "software" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "project", - "project" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "held", - "in" - ], - [ - "large", - "larger", - "large" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "long", - "result", - "work", - "working", - "time" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "clear", - "considered", - "future", - "lack", - "long", - "matter", - "question", - "result", - "work", - "because" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "streamlines", - "streamlines" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "larger", - "size", - "size" - ], - [ - "streamline", - "streamline" - ], - [ - "result", - "results" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "maps", - "maps" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_assign_uniform_color", - "docstring": "Assign an hexadecimal RGB color to one or more Trackvis (.trk) tractogram.\n(If called with .tck, the output will always be .trk, because data_per_point\nhas no equivalent in tck file.)\n\nSaves the RGB values in the data_per_point 'color' with values\n(color_x, color_y, color_z).\n\nThe hexadecimal RGB color should be formatted as 0xRRGGBB or \"#RRGGBB\".\n\nSee also: scil_tractogram_assign_custom_color.py\n\nFormerly: scil_assign_uniform_color_to_tractograms.py", - "help": "usage: scil_tractogram_assign_uniform_color.py [-h]\n (--fill_color str | --dict_colors file.json)\n (--out_suffix [suffix] | --out_tractogram file.trk)\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_tractograms\n [in_tractograms ...]\n\nAssign an hexadecimal RGB color to one or more Trackvis (.trk) tractogram.\n(If called with .tck, the output will always be .trk, because data_per_point\nhas no equivalent in tck file.)\n\nSaves the RGB values in the data_per_point 'color' with values\n(color_x, color_y, color_z).\n\nThe hexadecimal RGB color should be formatted as 0xRRGGBB or \"#RRGGBB\".\n\nSee also: scil_tractogram_assign_custom_color.py\n\nFormerly: scil_assign_uniform_color_to_tractograms.py\n\npositional arguments:\n in_tractograms Input tractograms (.trk or .tck).\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nColoring Methods:\n --fill_color str Can be hexadecimal (ie. either \"#RRGGBB\" or 0xRRGGBB).\n --dict_colors file.json\n Json file: dictionnary mapping each tractogram's basename to a color.\n Do not put your file's extension in your dict.\n Same convention as --fill_color.\n\nOutput options:\n --out_suffix [suffix]\n Specify suffix to append to input basename.\n Mandatory choice if you run this script on multiple tractograms.\n Mandatory choice with --dict_colors.\n [None]\n --out_tractogram file.trk\n Output filename of colored tractogram (.trk).\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "true", - "always" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "long", - "work", - "more" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "thinking", - "you" - ], - [ - "maps", - "mapping" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "blue", - "colored" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "clear", - "considered", - "future", - "lack", - "long", - "matter", - "question", - "result", - "work", - "because" - ], - [ - "clear", - "left", - "work", - "put" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "analysis", - "methodology", - "methods", - "processes", - "methods" - ], - [ - "individual", - "each" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_commit", - "docstring": "Convex Optimization Modeling for Microstructure Informed Tractography (COMMIT)\nestimates, globally, how a given tractogram explains the DWI in terms of signal\nfit, assuming a certain forward microstructure model. It assigns a weight to\neach streamline, which represents how well it explains the DWI signal globally.\nThe default forward microstructure model is stick-zeppelin-ball, which requires\nmulti-shell data and a peak file (principal fiber directions in each voxel,\ntypically from a field of fODFs).\n\nIt is possible to use the ball-and-stick model for single-shell and multi-shell\ndata. In this case, the peak file is not mandatory. Multi-shell should follow a\n\"NODDI protocol\" (low and high b-values), multiple shells with similar b-values\nshould not be used with COMMIT.\n\nThe output from COMMIT is:\n- fit_NRMSE.nii.gz\n fiting error (Normalized Root Mean Square Error)\n- fit_RMSE.nii.gz\n fiting error (Root Mean Square Error)\n- results.pickle\n Dictionary containing the experiment parameters and final weights\n- compartment_EC.nii.gz\n (est. Extra-Cellular signal fraction)\n- compartment_IC.nii.gz\n (est. Intra-Cellular signal fraction)\n- compartment_ISO.nii.gz\n (est. isotropic signal fraction (freewater comportment)):\n Each of COMMIT compartments\n- streamline_weights.txt\n Text file containing the commit weights for each streamline of the\n input tractogram.\n- streamlines_length.txt\n Text file containing the length (mm) of each streamline.\n- streamline_weights_by_length.txt\n Text file containing the commit weights for each streamline of the\n input tractogram, ordered by their length.\n- tot_streamline_weights\n Text file containing the total commit weights of each streamline.\n Equal to commit_weights * streamlines_length (W_i * L_i)\n- essential.trk / non_essential.trk\n Tractograms containing the streamlines below or equal (essential) and\n above (non_essential) a threshold_weights of 0.\n- decompose_commit.h5\n In the case where the input is a hdf5 file only, we will save an output\n hdf5 with the following information separated into each bundle's dps:\n - streamlines_weights\n - streamline_weights_by_length\n For each bundle, only the essential streamlines are kept.\n\nThis script can divide the input tractogram in two using a threshold to apply\non the streamlines' weight. The threshold used is 0.0, keeping only streamlines\nthat have non-zero weight and that contribute to explain the DWI signal.\nStreamlines with 0 weight are essentially not necessary according to COMMIT.\n\nCOMMIT2 is available only for HDF5 data from\nscil_tractogram_segment_bundles_for_connectivity.py and\nwith the --ball_stick option. Use the --commit2 option to activite it, slightly\nlonger computation time. This wrapper offers a simplify way to call COMMIT,\nbut does not allow to use (or fine-tune) every parameter. If you want to use\nCOMMIT with full access to all parameters,\nvisit: https://github.com/daducci/COMMIT\n\nWhen tunning parameters, such as --iso_diff, --para_diff, --perp_diff or\n--lambda_commit_2 you should evaluate the quality of results by:\n - Looking at the 'density' (GTM) of the connnectome (essential tractogram)\n - Confirm the quality of WM bundles reconstruction (essential tractogram)\n - Inspect the (N)RMSE map and look for peaks or anomalies\n - Compare the density map before and after (essential tractogram)\n\nFormerly: scil_run_commit.py", - "help": "usage: scil_tractogram_commit.py [-h] [--nbr_dir NBR_DIR]\n [--nbr_iter NBR_ITER] [--in_peaks IN_PEAKS]\n [--in_tracking_mask IN_TRACKING_MASK]\n [--commit2]\n [--lambda_commit_2 LAMBDA_COMMIT_2]\n [--ball_stick] [--para_diff PARA_DIFF]\n [--perp_diff PERP_DIFF [PERP_DIFF ...]]\n [--iso_diff ISO_DIFF [ISO_DIFF ...]]\n [--keep_whole_tractogram]\n [--save_kernels DIRECTORY | --load_kernels DIRECTORY]\n [--compute_only] [--tolerance tol]\n [--skip_b0_check] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram in_dwi in_bval in_bvec out_dir\n\nConvex Optimization Modeling for Microstructure Informed Tractography (COMMIT)\nestimates, globally, how a given tractogram explains the DWI in terms of signal\nfit, assuming a certain forward microstructure model. It assigns a weight to\neach streamline, which represents how well it explains the DWI signal globally.\nThe default forward microstructure model is stick-zeppelin-ball, which requires\nmulti-shell data and a peak file (principal fiber directions in each voxel,\ntypically from a field of fODFs).\n\nIt is possible to use the ball-and-stick model for single-shell and multi-shell\ndata. In this case, the peak file is not mandatory. Multi-shell should follow a\n\"NODDI protocol\" (low and high b-values), multiple shells with similar b-values\nshould not be used with COMMIT.\n\nThe output from COMMIT is:\n- fit_NRMSE.nii.gz\n fiting error (Normalized Root Mean Square Error)\n- fit_RMSE.nii.gz\n fiting error (Root Mean Square Error)\n- results.pickle\n Dictionary containing the experiment parameters and final weights\n- compartment_EC.nii.gz\n (est. Extra-Cellular signal fraction)\n- compartment_IC.nii.gz\n (est. Intra-Cellular signal fraction)\n- compartment_ISO.nii.gz\n (est. isotropic signal fraction (freewater comportment)):\n Each of COMMIT compartments\n- streamline_weights.txt\n Text file containing the commit weights for each streamline of the\n input tractogram.\n- streamlines_length.txt\n Text file containing the length (mm) of each streamline.\n- streamline_weights_by_length.txt\n Text file containing the commit weights for each streamline of the\n input tractogram, ordered by their length.\n- tot_streamline_weights\n Text file containing the total commit weights of each streamline.\n Equal to commit_weights * streamlines_length (W_i * L_i)\n- essential.trk / non_essential.trk\n Tractograms containing the streamlines below or equal (essential) and\n above (non_essential) a threshold_weights of 0.\n- decompose_commit.h5\n In the case where the input is a hdf5 file only, we will save an output\n hdf5 with the following information separated into each bundle's dps:\n - streamlines_weights\n - streamline_weights_by_length\n For each bundle, only the essential streamlines are kept.\n\nThis script can divide the input tractogram in two using a threshold to apply\non the streamlines' weight. The threshold used is 0.0, keeping only streamlines\nthat have non-zero weight and that contribute to explain the DWI signal.\nStreamlines with 0 weight are essentially not necessary according to COMMIT.\n\nCOMMIT2 is available only for HDF5 data from\nscil_tractogram_segment_bundles_for_connectivity.py and\nwith the --ball_stick option. Use the --commit2 option to activite it, slightly\nlonger computation time. This wrapper offers a simplify way to call COMMIT,\nbut does not allow to use (or fine-tune) every parameter. If you want to use\nCOMMIT with full access to all parameters,\nvisit: https://github.com/daducci/COMMIT\n\nWhen tunning parameters, such as --iso_diff, --para_diff, --perp_diff or\n--lambda_commit_2 you should evaluate the quality of results by:\n - Looking at the 'density' (GTM) of the connnectome (essential tractogram)\n - Confirm the quality of WM bundles reconstruction (essential tractogram)\n - Inspect the (N)RMSE map and look for peaks or anomalies\n - Compare the density map before and after (essential tractogram)\n\nFormerly: scil_run_commit.py\n\npositional arguments:\n in_tractogram Input tractogram (.trk or .tck or .h5).\n in_dwi Diffusion-weighted image used by COMMIT (.nii.gz).\n in_bval b-values in the FSL format (.bval).\n in_bvec b-vectors in the FSL format (.bvec).\n out_dir Output directory for the COMMIT maps.\n\noptions:\n -h, --help show this help message and exit\n --nbr_dir NBR_DIR Number of directions, on the half of the sphere,\n representing the possible orientations of the response functions [500].\n --nbr_iter NBR_ITER Maximum number of iterations [1000].\n --in_peaks IN_PEAKS Peaks file representing principal direction(s) locally,\n typically coming from fODFs. This file is mandatory for the default \n stick-zeppelin-ball model.\n --in_tracking_mask IN_TRACKING_MASK\n Binary mask where tratography was allowed.\n If not set, uses a binary mask computed from the streamlines.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nCOMMIT2 options:\n --commit2 Run commit2, requires .h5 as input and will force\n ball&stick model.\n --lambda_commit_2 LAMBDA_COMMIT_2\n Specify the clustering prior strength [0.001].\n\nModel options:\n --ball_stick Use the ball&Stick model, disable the zeppelin compartment.\n Only model suitable for single-shell data.\n --para_diff PARA_DIFF\n Parallel diffusivity in mm^2/s.\n Default for both ball_stick and stick_zeppelin_ball: 1.7E-3.\n --perp_diff PERP_DIFF [PERP_DIFF ...]\n Perpendicular diffusivity in mm^2/s.\n Default for ball_stick: None\n Default for stick_zeppelin_ball: [0.51E-3]\n --iso_diff ISO_DIFF [ISO_DIFF ...]\n Istropic diffusivity in mm^2/s.\n Default for ball_stick: [2.0E-3]\n Default for stick_zeppelin_ball: [1.7E-3, 3.0E-3]\n\nTractogram options:\n --keep_whole_tractogram\n Save a tractogram copy with streamlines weights in the data_per_streamline\n [False].\n --compute_only Compute kernels only, --save_kernels must be used.\n\nKernels options:\n --save_kernels DIRECTORY\n Output directory for the COMMIT kernels.\n --load_kernels DIRECTORY\n Input directory where the COMMIT kernels are located.\n\nReferences:\n[1] Daducci, Alessandro, et al. \"COMMIT: convex optimization modeling for\n microstructure informed tractography.\" IEEE transactions on medical\n imaging 34.1 (2014): 246-257.\n[2] Schiavi, Simona, et al. \"A new method for accurate in vivo mapping of\n human brain connections using microstructural and anatomical information.\"\n Science advances 6.31 (2020): eaba8245.\n", - "synonyms": [ - [ - "step", - "continue" - ], - [ - "shape", - "view", - "look" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "methods", - "method" - ], - [ - "clear", - "long", - "step", - "thinking", - "view", - "work", - "working", - "way" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "fibre", - "fiber" - ], - [ - "order", - "set" - ], - [ - "long", - "have" - ], - [ - "direction", - "directions" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "diffusion", - "diffusion" - ], - [ - "threshold", - "threshold" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "matter", - "question", - "thinking", - "true", - "work", - "working", - "how" - ], - [ - "long", - "with" - ], - [ - "contrast", - "form", - "forms", - "larger", - "result", - "specific", - "variety", - "similar" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "exist", - "exists" - ], - [ - "imaging", - "imaging" - ], - [ - "step", - "follow" - ], - [ - "step", - "forward" - ], - [ - "cell", - "cellular" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "literature", - "scientific", - "studies", - "study", - "science" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "false", - "false" - ], - [ - "function", - "functions", - "functions" - ], - [ - "maps", - "map" - ], - [ - "bundles", - "bundle" - ], - [ - "examine", - "evaluate" - ], - [ - "long", - "full" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "result", - "following" - ], - [ - "left", - "from" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "applied", - "apply" - ], - [ - "future", - "will" - ], - [ - "anatomical", - "anatomical" - ], - [ - "represent", - "represents" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "signal", - "signal" - ], - [ - "vivo", - "vivo" - ], - [ - "considered", - "subsequently", - "was" - ], - [ - "connect", - "connected", - "connection", - "connections", - "connections" - ], - [ - "matter", - "question", - "does" - ], - [ - "streamline", - "streamline" - ], - [ - "order", - "allowed" - ], - [ - "long", - "longer" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "lack", - "quality" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "experiment", - "experiment" - ], - [ - "considered", - "specific", - "variety", - "such" - ], - [ - "considered", - "are" - ], - [ - "algorithm", - "optimization" - ], - [ - "left", - "into" - ], - [ - "work", - "find" - ], - [ - "left", - "half" - ], - [ - "represent", - "representing" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "thinking", - "i" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "streamline", - "simplify" - ], - [ - "subject", - "terms" - ], - [ - "methods", - "using" - ], - [ - "left", - "after" - ], - [ - "left", - "before" - ], - [ - "thinking", - "working", - "looking" - ], - [ - "area", - "work", - "where" - ], - [ - "individual", - "each" - ], - [ - "high", - "higher", - "level", - "low" - ], - [ - "lack", - "minimal" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "long", - "two" - ], - [ - "step", - "start" - ], - [ - "matter", - "question", - "case" - ], - [ - "based", - "reported", - "according" - ], - [ - "principal", - "principal" - ], - [ - "order", - "necessary" - ], - [ - "total", - "number" - ], - [ - "animal", - "human", - "human" - ], - [ - "bundles", - "bundles" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "matter", - "question", - "thinking", - "understand" - ], - [ - "work", - "and" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "parameter", - "parameters", - "parameter" - ], - [ - "methods", - "use" - ], - [ - "direction", - "direction" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "maps", - "mapping" - ], - [ - "area", - "located" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "action", - "response" - ], - [ - "held", - "in" - ], - [ - "long", - "result", - "work", - "working", - "time" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "binary", - "binary" - ], - [ - "future", - "current" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "streamlines", - "streamlines" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "individual", - "lack", - "matter", - "result", - "specific", - "subject", - "certain" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "reliable", - "accurate" - ], - [ - "result", - "results" - ], - [ - "weighted", - "weighted" - ], - [ - "level", - "above" - ], - [ - "order", - "allow" - ], - [ - "question", - "explain" - ], - [ - "fundamental", - "essential" - ], - [ - "maps", - "maps" - ], - [ - "error", - "error" - ], - [ - "considered", - "experience", - "large", - "long", - "result", - "variety", - "work", - "working", - "well" - ], - [ - "increase", - "total", - "total" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_compress", - "docstring": "Compress tractogram by removing collinear (or almost) points.\n\nThe compression threshold represents the maximum distance (in mm) to the\noriginal position of the point.\n\nFormerly: scil_compress_streamlines.py", - "help": "usage: scil_tractogram_compress.py [-h] [-e ERROR_RATE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nCompress tractogram by removing collinear (or almost) points.\n\nThe compression threshold represents the maximum distance (in mm) to the\noriginal position of the point.\n\nFormerly: scil_compress_streamlines.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file (trk or tck).\n out_tractogram Path of the output tractogram file (trk or tck).\n\noptions:\n -h, --help show this help message and exit\n -e ERROR_RATE Maximum compression distance in mm [0.1].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "represent", - "represents" - ], - [ - "held", - "on" - ], - [ - "methods", - "using" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "difference", - "point" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "threshold", - "threshold" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "left", - "result", - "when" - ], - [ - "held", - "in" - ], - [ - "considered", - "is" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_compute_TODI", - "docstring": "Compute a Track Orientation Density Image (TODI).\n\nEach segment of the streamlines is weighted by its length (to support\ncompressed streamlines).\n\nThis script can afterwards output a Track Density Image (TDI) or a TODI with SF\nor SH representation, based on streamlines' segments.\n\nFormerly: scil_compute_todi.py", - "help": "usage: scil_tractogram_compute_TODI.py [-h] [--sphere SPHERE] [--mask MASK]\n [--sh_order SH_ORDER]\n [--normalize_per_voxel]\n [--smooth_todi | --asymmetric]\n [--n_steps N_STEPS]\n [--out_mask OUT_MASK]\n [--out_tdi OUT_TDI]\n [--out_todi_sf OUT_TODI_SF]\n [--out_todi_sh OUT_TODI_SH]\n [--reference REFERENCE]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram\n\nCompute a Track Orientation Density Image (TODI).\n\nEach segment of the streamlines is weighted by its length (to support\ncompressed streamlines).\n\nThis script can afterwards output a Track Density Image (TDI) or a TODI with SF\nor SH representation, based on streamlines' segments.\n\nFormerly: scil_compute_todi.py\n\npositional arguments:\n in_tractogram Input streamlines file.\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nComputing options:\n --sphere SPHERE Sphere used for the angular discretization. [repulsion724]\n --mask MASK If set, use the given mask.\n --sh_order SH_ORDER Order of the original SH. [8]\n --normalize_per_voxel\n If set, normalize each SF/SH at each voxel.\n --smooth_todi If set, smooth TODI (angular and spatial).\n --asymmetric If set, compute asymmetric TODI.\n Cannot be used with --smooth_todi.\n --n_steps N_STEPS Number of steps for streamline segments subdivision prior to binning [1].\n\nOutput files. Saves only when filename is set:\n --out_mask OUT_MASK Mask showing where TDI > 0.\n --out_tdi OUT_TDI Output Track Density Image (TDI).\n --out_todi_sf OUT_TODI_SF\n Output TODI, with SF (each directions\n on the sphere, requires a lot of memory)\n --out_todi_sh OUT_TODI_SH\n Output TODI, with SH coefficients.\n\nReferences:\n [1] Dhollander T, Emsell L, Van Hecke W, Maes F, Sunaert S, Suetens P.\n Track orientation density imaging (TODI) and\n track orientation distribution (TOD) based tractography.\n NeuroImage. 2014 Jul 1;94:312-36.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "memory", - "memory" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "orientation", - "orientation" - ], - [ - "work", - "and" - ], - [ - "order", - "order" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "experience", - "thinking", - "lot" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "step", - "steps" - ], - [ - "direction", - "directions" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "indicating", - "showing" - ], - [ - "methods", - "using" - ], - [ - "streamlines", - "streamlines" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "imaging", - "imaging" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "area", - "work", - "where" - ], - [ - "individual", - "each" - ], - [ - "streamline", - "streamline" - ], - [ - "weighted", - "weighted" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "subsequently", - "afterwards" - ], - [ - "based", - "based" - ], - [ - "spatial", - "temporal", - "spatial" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "exist", - "cannot" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_compute_density_map", - "docstring": "Compute a density map from a streamlines file. Can be binary.\n\nThis script correctly handles compressed streamlines.\n\nFormerly: scil_compute_streamlines_density_map.py", - "help": "usage: scil_tractogram_compute_density_map.py [-h] [--binary [FIXED_VALUE]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_img\n\nCompute a density map from a streamlines file. Can be binary.\n\nThis script correctly handles compressed streamlines.\n\nFormerly: scil_compute_streamlines_density_map.py\n\npositional arguments:\n in_bundle Tractogram filename.\n out_img path of the output image file.\n\noptions:\n -h, --help show this help message and exit\n --binary [FIXED_VALUE]\n If set, will store the same value for all intersected voxels, \n creating a binary map.When set without a value, 1 is used (and dtype \n uint8). If a value is given, will be used as the stored value.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "create", - "creating" - ], - [ - "order", - "set" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "streamlines", - "streamlines" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "intersected", - "intersected" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "maps", - "map" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_convert", - "docstring": "Conversion of '.tck', '.trk', '.fib', '.vtk' and 'dpy' files using updated file\nformat standard. TRK file always needs a reference file, a NIFTI, for\nconversion. The FIB file format is in fact a VTK, MITK Diffusion supports it.\n\nFormerly: scil_convert_tractogram.py", - "help": "usage: scil_tractogram_convert.py [-h] [--no_bbox_check]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram output_name\n\nConversion of '.tck', '.trk', '.fib', '.vtk' and 'dpy' files using updated file\nformat standard. TRK file always needs a reference file, a NIFTI, for\nconversion. The FIB file format is in fact a VTK, MITK Diffusion supports it.\n\nFormerly: scil_convert_tractogram.py\n\npositional arguments:\n in_tractogram Tractogram filename. Format must be one of \n trk, tck, vtk, fib, dpy\n output_name Output filename. Format must be one of \n trk, tck, vtk, fib, dpy\n\noptions:\n -h, --help show this help message and exit\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "true", - "always" - ], - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "invalid", - "valid", - "invalid" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "supported", - "supports" - ], - [ - "methods", - "using" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "clear", - "considered", - "difference", - "lack", - "matter", - "question", - "result", - "subject", - "thinking", - "true", - "view", - "fact" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_convert_hdf5_to_trk", - "docstring": "Save connections of a hdf5 created with\n>> scil_tractogram_segment_bundles_for_connectivity.py.\n\nUseful for quality control and visual inspections.\n\nIt can either save all connections (default), individual connections specified\nwith --edge_keys or connections from specific nodes specified with --node_keys.\n\nWith the option --save_empty, a label_lists, as a txt file, must be provided.\nThis option saves existing connections and empty connections.\n\nThe output is a directory containing the thousands of connections:\nout_dir/\n |-- LABEL1_LABEL1.trk\n |-- LABEL1_LABEL2.trk\n |-- [...]\n |-- LABEL90_LABEL90.trk\n\nFormerly: scil_save_connections_from_hdf5.py", - "help": "usage: scil_tractogram_convert_hdf5_to_trk.py [-h] [--include_dps]\n [--edge_keys LABEL1_LABEL2 [LABEL1_LABEL2 ...]\n | --node_keys NODE [NODE ...]]\n [--save_empty labels_list]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_hdf5 out_dir\n\nSave connections of a hdf5 created with\n>> scil_tractogram_segment_bundles_for_connectivity.py.\n\nUseful for quality control and visual inspections.\n\nIt can either save all connections (default), individual connections specified\nwith --edge_keys or connections from specific nodes specified with --node_keys.\n\nWith the option --save_empty, a label_lists, as a txt file, must be provided.\nThis option saves existing connections and empty connections.\n\nThe output is a directory containing the thousands of connections:\nout_dir/\n |-- LABEL1_LABEL1.trk\n |-- LABEL1_LABEL2.trk\n |-- [...]\n |-- LABEL90_LABEL90.trk\n\nFormerly: scil_save_connections_from_hdf5.py\n\npositional arguments:\n in_hdf5 HDF5 filename (.h5) containing decomposed connections.\n out_dir Path of the output directory.\n\noptions:\n -h, --help show this help message and exit\n --include_dps Include the data_per_streamline the metadata.\n --edge_keys LABEL1_LABEL2 [LABEL1_LABEL2 ...]\n Keys to identify the edges (connections) of interest.\n --node_keys NODE [NODE ...]\n Node keys to identify the sub-networks of interest.\n Equivalent to adding any --edge_keys node_LABEL2 or LABEL2_node.\n --save_empty labels_list\n Save empty connections. Then, the list of possible connections is \n not found from the hdf5 but inferred from labels_list, a txt file \n containing a list of nodes saved by the decomposition script.\n *If used together with edge_keys or node_keys, the provided nodes must \n exist in labels_list.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n CAREFUL. The whole output directory will be deleted if it exists.\n", - "synonyms": [ - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "working", - "together" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "specific", - "specific" - ], - [ - "exist", - "exist" - ], - [ - "long", - "a" - ], - [ - "tool", - "useful" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "higher", - "interest" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "variety", - "include" - ], - [ - "action", - "clear", - "considered", - "future", - "matter", - "possibility", - "potential", - "question", - "result", - "specific", - "any" - ], - [ - "long", - "with" - ], - [ - "parameters", - "specified" - ], - [ - "visual", - "visual" - ], - [ - "network", - "networks", - "networks" - ], - [ - "clear", - "adding" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "methods", - "using" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "exist", - "exists" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "individual", - "individuals", - "individual" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "connect", - "connected", - "connection", - "connections", - "connections" - ], - [ - "work", - "all" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "lack", - "quality" - ], - [ - "create", - "created" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_count_streamlines", - "docstring": "Return the number of streamlines in a tractogram. Only support trk and tck in\norder to support the lazy loading from nibabel.\n\nFormerly: scil_count_streamlines.py", - "help": "usage: scil_tractogram_count_streamlines.py [-h] [--print_count_alone]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n in_tractogram\n\nReturn the number of streamlines in a tractogram. Only support trk and tck in\norder to support the lazy loading from nibabel.\n\nFormerly: scil_count_streamlines.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --print_count_alone If true, prints the result only. \n Else, prints the bundle name and count formatted as a json dict.(default)\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "long", - "a" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "held", - "on" - ], - [ - "methods", - "using" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "order", - "order" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "streamlines", - "streamlines" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "meaning", - "true", - "true" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "left", - "from" - ], - [ - "matter", - "thinking", - "else" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "bundles", - "bundle" - ], - [ - "left", - "result", - "when" - ], - [ - "held", - "in" - ], - [ - "considered", - "is" - ], - [ - "lack", - "loss", - "result", - "result" - ], - [ - "meaning", - "name" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_cut_streamlines", - "docstring": "Filters streamlines and only keeps the parts of streamlines within or\nbetween the ROIs. Two options are available.\n\nInput mask:\n\nThe mask has either 1 entity/blob or\n2 entities/blobs (does not support disconnected voxels).\nThe option --biggest_blob can help if you have such a scenario.\n\nThe 1 entity scenario will 'trim' the streamlines so their longest segment is\nwithin the bounding box or a binary mask.\n\nThe 2 entities scenario will cut streamlines so their segment are within the\nbounding box or going from binary mask #1 to binary mask #2.\n\nInput label:\n\nThe label MUST contain 2 labels different from zero.\nLabel values could be anything.\nThe script will cut streamlines going from label 1 to label 2.\n\nBoth inputs and scenarios will erase data_per_point and data_per_streamline.\n\nFormerly: scil_cut_streamlines.py", - "help": "usage: scil_tractogram_cut_streamlines.py [-h] (--mask MASK | --label LABEL)\n [--label_ids LABEL_IDS LABEL_IDS]\n [--resample STEP_SIZE]\n [--biggest_blob]\n [--compress [COMPRESS_TH]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nFilters streamlines and only keeps the parts of streamlines within or\nbetween the ROIs. Two options are available.\n\nInput mask:\n\nThe mask has either 1 entity/blob or\n2 entities/blobs (does not support disconnected voxels).\nThe option --biggest_blob can help if you have such a scenario.\n\nThe 1 entity scenario will 'trim' the streamlines so their longest segment is\nwithin the bounding box or a binary mask.\n\nThe 2 entities scenario will cut streamlines so their segment are within the\nbounding box or going from binary mask #1 to binary mask #2.\n\nInput label:\n\nThe label MUST contain 2 labels different from zero.\nLabel values could be anything.\nThe script will cut streamlines going from label 1 to label 2.\n\nBoth inputs and scenarios will erase data_per_point and data_per_streamline.\n\nFormerly: scil_cut_streamlines.py\n\npositional arguments:\n in_tractogram Input tractogram file.\n out_tractogram Output tractogram file. Note: data_per_point will be discarded, if any!\n\noptions:\n -h, --help show this help message and exit\n --label_ids LABEL_IDS LABEL_IDS\n List of labels indices to use to cut streamlines (2 values).\n --resample STEP_SIZE Resample streamlines to a specific step-size in mm [None].\n --biggest_blob Use the biggest entity and force the 1 ROI scenario.\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.[0.1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMandatory mask options:\n Choose between mask or label input.\n\n --mask MASK Binary mask containing either 1 or 2 blobs.\n --label LABEL Label containing 2 blobs.\n", - "synonyms": [ - [ - "individual", - "specific", - "unique", - "variety", - "different" - ], - [ - "anatomy", - "anatomy" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "matter", - "true", - "anything" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "long", - "have" - ], - [ - "thinking", - "you" - ], - [ - "specific", - "specific" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "action", - "clear", - "considered", - "future", - "matter", - "possibility", - "potential", - "question", - "result", - "specific", - "any" - ], - [ - "clear", - "long", - "matter", - "result", - "work", - "so" - ], - [ - "action", - "process", - "step", - "step" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "streamlines", - "streamlines" - ], - [ - "considered", - "specific", - "variety", - "such" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "result", - "resulting" - ], - [ - "considered", - "is" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "larger", - "size", - "size" - ], - [ - "matter", - "question", - "does" - ], - [ - "streamline", - "streamline" - ], - [ - "long", - "two" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "areas", - "parts" - ], - [ - "step", - "thinking", - "going" - ], - [ - "clear", - "result", - "work", - "could" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_detect_loops", - "docstring": "This script can be used to remove loops in two types of streamline datasets:\n\n - Whole brain: For this type, the script removes streamlines if they\n make a loop with an angle of more than 360 degrees. It's possible to change\n this angle with the --angle option. Warning: Don't use --qb option for a\n whole brain tractography.\n\n - Bundle dataset: For this type, it is possible to remove loops and\n streamlines outside the bundle. For the sharp angle turn, use --qb option.\n\nSee also:\n scil_tractogram_filter_by_anatomy.py\n\nFormerly: scil_detect_streamlines_loops.py", - "help": "usage: scil_tractogram_detect_loops.py [-h]\n [--looping_tractogram out_filename]\n [--qb [threshold]] [--angle ANGLE]\n [--display_counts] [--no_empty]\n [--indent INDENT] [--sort_keys]\n [--processes NBR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script can be used to remove loops in two types of streamline datasets:\n\n - Whole brain: For this type, the script removes streamlines if they\n make a loop with an angle of more than 360 degrees. It's possible to change\n this angle with the --angle option. Warning: Don't use --qb option for a\n whole brain tractography.\n\n - Bundle dataset: For this type, it is possible to remove loops and\n streamlines outside the bundle. For the sharp angle turn, use --qb option.\n\nSee also:\n scil_tractogram_filter_by_anatomy.py\n\nFormerly: scil_detect_streamlines_loops.py\n\npositional arguments:\n in_tractogram Tractogram input file name.\n out_tractogram Output tractogram without loops.\n\noptions:\n -h, --help show this help message and exit\n --looping_tractogram out_filename\n If set, saves detected looping streamlines.\n --qb [threshold] If set, uses QuickBundles to detect outliers (loops, sharp angle \n turns). Given threshold is the maximal streamline to bundle \n distance for a streamline to be considered as a tracking error.\n Default if set: [8.0]\n --angle ANGLE Maximum looping (or turning) angle of\n a streamline in degrees. [360]\n --display_counts Print streamline count before and after filtering\n --no_empty If set, will not save outputs if they are empty.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\nReferences:\n QuickBundles, based on [Garyfallidis12] Frontiers in Neuroscience, 2012.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "considered", - "are" - ], - [ - "long", - "work", - "more" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "long", - "a" - ], - [ - "action", - "clear", - "future", - "result", - "step", - "change" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "create", - "future", - "step", - "work", - "make" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "left", - "after" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "degree", - "degrees" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "clear", - "step", - "turn" - ], - [ - "left", - "before" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "streamline", - "streamline" - ], - [ - "long", - "two" - ], - [ - "step", - "start" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "based", - "based" - ], - [ - "forms", - "specific", - "variety", - "types" - ], - [ - "error", - "error" - ], - [ - "bundles", - "bundle" - ], - [ - "data", - "tracking", - "tracking" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_dpp_math", - "docstring": "Performs an operation on data per point (dpp) from input streamlines.\n\nAlthough the input data always comes from the dpp, the output can be either\na dpp or a data_per_streamline (dps), depending on the chosen options.\nTwo modes of operation are supported: dpp and dps.\n - In dps mode, the operation is performed on dpp across the dimension of\n the streamlines resulting in a single value (or array in the 4D case)\n per streamline, stored as dps.\n - In dpp mode, the operation is performed on each point separately,\n resulting in a new dpp.\n\nIf endpoints_only and dpp mode is set the operation will only be calculated at\nthe streamline endpoints the rest of the values along the streamline will be\nNaN.\n\nIf endpoints_only and dps mode is set operation will be calculated across the\ndata at the endpoints and stored as a single value (or array in the 4D case)\nper streamline.\n\nEndpoint only operation:\ncorrelation: correlation calculated between arrays extracted from streamline\nendpoints (data must be multivalued per point) and dps mode must be set.", - "help": "usage: scil_tractogram_dpp_math.py [-h] --mode {dpp,dps} --in_dpp_name key\n [key ...] --out_keys key [key ...]\n [--endpoints_only] [--keep_all_dpp_dps]\n [--overwrite_dpp_dps]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--no_bbox_check]\n OPERATION INPUT_FILE OUTPUT_FILE\n\nPerforms an operation on data per point (dpp) from input streamlines.\n\nAlthough the input data always comes from the dpp, the output can be either\na dpp or a data_per_streamline (dps), depending on the chosen options.\nTwo modes of operation are supported: dpp and dps.\n - In dps mode, the operation is performed on dpp across the dimension of\n the streamlines resulting in a single value (or array in the 4D case)\n per streamline, stored as dps.\n - In dpp mode, the operation is performed on each point separately,\n resulting in a new dpp.\n\nIf endpoints_only and dpp mode is set the operation will only be calculated at\nthe streamline endpoints the rest of the values along the streamline will be\nNaN.\n\nIf endpoints_only and dps mode is set operation will be calculated across the\ndata at the endpoints and stored as a single value (or array in the 4D case)\nper streamline.\n\nEndpoint only operation:\ncorrelation: correlation calculated between arrays extracted from streamline\nendpoints (data must be multivalued per point) and dps mode must be set.\n\npositional arguments:\n OPERATION The type of operation to be performed on the \n streamlines. Must be one of the following: \n [mean, sum, min, max, correlation.]\n INPUT_FILE Input tractogram containing streamlines and metadata.\n OUTPUT_FILE The file where the remaining streamlines \n are saved.\n\noptions:\n -h, --help show this help message and exit\n --mode {dpp,dps} Set to dps if the operation is to be performed \n across all dimensions resulting in a single value per \n streamline. Set to dpp if the operation is to be \n performed on each point separately resulting in a \n single value per point.\n --in_dpp_name key [key ...]\n Name or list of names of the data_per_point for \n operation to be performed on. If more than one dpp \n is selected, the same operation will be applied \n separately to each one.\n --out_keys key [key ...]\n Name of the resulting data_per_point or \n data_per_streamline to be saved in the output \n tractogram. If more than one --in_dpp_name was used, \n enter the same number of --out_keys values.\n --endpoints_only If set, will only perform operation on endpoints \n If not set, will perform operation on all streamline \n points.\n --keep_all_dpp_dps If set, previous data_per_point will be preserved \n in the output tractogram. Else, only --out_dpp_name \n keys will be saved.\n --overwrite_dpp_dps If set, if --keep_all_dpp_dps is set and some \n --out_keys keys already existed in your \n data_per_point or data_per_streamline, allow \n overwriting old data_per_point.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "true", - "always" - ], - [ - "average", - "per" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "applied", - "applied" - ], - [ - "result", - "following" - ], - [ - "considered", - "are" - ], - [ - "long", - "work", - "more" - ], - [ - "difference", - "point" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "invalid", - "valid", - "invalid" - ], - [ - "meaning", - "name" - ], - [ - "order", - "set" - ], - [ - "long", - "a" - ], - [ - "key", - "main", - "key" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "clear", - "considered", - "contrast", - "long", - "result", - "work", - "although" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "methods", - "using" - ], - [ - "area", - "main", - "along" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "left", - "rest" - ], - [ - "supported", - "supported" - ], - [ - "areas", - "across" - ], - [ - "matter", - "thinking", - "else" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "result", - "resulting" - ], - [ - "considered", - "is" - ], - [ - "considered", - "subsequently", - "was" - ], - [ - "represent", - "chosen" - ], - [ - "area", - "work", - "where" - ], - [ - "individual", - "each" - ], - [ - "streamline", - "streamline" - ], - [ - "order", - "allow" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "long", - "two" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "exist", - "existed" - ], - [ - "matter", - "question", - "case" - ], - [ - "attention", - "comes" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "large", - "work", - "some" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_extract_ushape", - "docstring": "This script extracts streamlines depending on their U-shapeness.\nThis script is a replica of Trackvis method.\n\nWhen ufactor is close to:\n* 0 it defines straight streamlines\n* 1 it defines U-fibers\n* -1 it defines S-fibers\n\nFormerly: scil_extract_ushape.py", - "help": "usage: scil_tractogram_extract_ushape.py [-h] [--minU MINU] [--maxU MAXU]\n [--remaining_tractogram filename]\n [--no_empty] [--display_counts]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script extracts streamlines depending on their U-shapeness.\nThis script is a replica of Trackvis method.\n\nWhen ufactor is close to:\n* 0 it defines straight streamlines\n* 1 it defines U-fibers\n* -1 it defines S-fibers\n\nFormerly: scil_extract_ushape.py\n\npositional arguments:\n in_tractogram Tractogram input file name.\n out_tractogram Output tractogram file name.\n\noptions:\n -h, --help show this help message and exit\n --minU MINU Min ufactor value. [0.5]\n --maxU MAXU Max ufactor value. [1.0]\n --remaining_tractogram filename\n If set, saves remaining streamlines.\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "methods", - "method" - ], - [ - "work", - "and" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "meaning", - "name" - ], - [ - "defined", - "defines" - ], - [ - "order", - "set" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "methods", - "using" - ], - [ - "left", - "after" - ], - [ - "streamlines", - "streamlines" - ], - [ - "left", - "before" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "fibres", - "fibers" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "streamline", - "streamline" - ], - [ - "clear", - "long", - "question", - "result", - "work", - "there" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_filter_by_anatomy", - "docstring": "This script filters streamlines in a tractogram according to their geometrical\nproperties (i.e. limiting their length and looping angle) and their anatomical\nending properties (i.e. the anatomical tissue or region their endpoints lie\nin).\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nThe filtering is performed sequentially in four steps, each step processing the\ndata on the output of the previous step:\n\n Step 1 - Remove streamlines below the minimum length and above the\n maximum length. These thresholds must be set with the ``--minL``\n and ``--maxL`` options.\n Step 2 - Ensure that no streamlines end in the cerebrospinal fluid\n according to the provided parcellation. A binary mask can be used\n alternatively through the ``--csf_bin`` option.\n Step 3 - Ensure that no streamlines end in white matter by ensuring that\n they reach the cortical regions according to the provided\n parcellation. The cortical regions of the parcellation can be\n dilated using the ``--ctx_dilation_radius``.\n Step 4 - Remove streamlines if they make a loop with an angle above a\n certain threshold. It's possible to change this angle with the\n ``-a`` option.\n\nLength and loop-based filtering (steps 1 and 2) will not have practical effects\nif no specific thresholds are provided (but will be still executed), since\ndefault values are 0 for the minimum allowed length and infinite for the\nmaximum allowed length and angle.\n\nThe anatomical region endings filtering requires a parcellation or label image\nfile including the cerebrospinal fluid and gray matter (cortical) regions\naccording to the Desikan-Killiany atlas. Intermediate tractograms (results of\neach step and outliers) and volumes can be saved throughout the process.\n\nExample usages:\n\n# Filter length, looping angle and anatomical ending region\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --minL 20 --maxL 200 -a 300\n# Filter only anatomical ending region, with WM dilation and provided csf mask\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --csf_bin csf_bin.nii.gz --ctx_dilation_radius 2\n\nFormerly: scil_filter_streamlines_anatomically.py\n\nNOTE: As of version 2.0.0, the dilation of the cortical mask has changed; from\nan in-house process to scipy's dilation. Results may differ from previous\nversions.", - "help": "usage: scil_tractogram_filter_by_anatomy.py [-h] [--minL MINL] [--maxL MAXL]\n [--angle ANGLE]\n [--csf_bin CSF_BIN]\n [--dilate_ctx value]\n [--save_intermediate_tractograms]\n [--save_volumes] [--save_counts]\n [--save_rejected] [--no_empty]\n [--indent INDENT] [--sort_keys]\n [--processes NBR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram in_wmparc out_path\n\nThis script filters streamlines in a tractogram according to their geometrical\nproperties (i.e. limiting their length and looping angle) and their anatomical\nending properties (i.e. the anatomical tissue or region their endpoints lie\nin).\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nThe filtering is performed sequentially in four steps, each step processing the\ndata on the output of the previous step:\n\n Step 1 - Remove streamlines below the minimum length and above the\n maximum length. These thresholds must be set with the ``--minL``\n and ``--maxL`` options.\n Step 2 - Ensure that no streamlines end in the cerebrospinal fluid\n according to the provided parcellation. A binary mask can be used\n alternatively through the ``--csf_bin`` option.\n Step 3 - Ensure that no streamlines end in white matter by ensuring that\n they reach the cortical regions according to the provided\n parcellation. The cortical regions of the parcellation can be\n dilated using the ``--ctx_dilation_radius``.\n Step 4 - Remove streamlines if they make a loop with an angle above a\n certain threshold. It's possible to change this angle with the\n ``-a`` option.\n\nLength and loop-based filtering (steps 1 and 2) will not have practical effects\nif no specific thresholds are provided (but will be still executed), since\ndefault values are 0 for the minimum allowed length and infinite for the\nmaximum allowed length and angle.\n\nThe anatomical region endings filtering requires a parcellation or label image\nfile including the cerebrospinal fluid and gray matter (cortical) regions\naccording to the Desikan-Killiany atlas. Intermediate tractograms (results of\neach step and outliers) and volumes can be saved throughout the process.\n\nExample usages:\n\n# Filter length, looping angle and anatomical ending region\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --minL 20 --maxL 200 -a 300\n# Filter only anatomical ending region, with WM dilation and provided csf mask\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --csf_bin csf_bin.nii.gz --ctx_dilation_radius 2\n\nFormerly: scil_filter_streamlines_anatomically.py\n\nNOTE: As of version 2.0.0, the dilation of the cortical mask has changed; from\nan in-house process to scipy's dilation. Results may differ from previous\nversions.\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n in_wmparc Path of the white matter parcellation atlas (.nii or .nii.gz)\n out_path Path to the output files.\n\noptions:\n -h, --help show this help message and exit\n --minL MINL Minimum length of streamlines, in mm. [0.0]\n --maxL MAXL Maximum length of streamlines, in mm. [inf]\n --angle ANGLE Maximum looping (or turning) angle of a streamline, \n in degrees. [inf]\n --csf_bin CSF_BIN Allow CSF endings filtering with this binary\n mask instead of using the atlas (.nii or .nii.gz)\n --dilate_ctx value If set, dilate the cortical labels. Value is the dilation \n radius, in voxels (an integer > 0)\n --save_intermediate_tractograms\n Save accepted and discarded streamlines after each step.\n --save_volumes Save volumetric images (e.g. binarised label \n images, etc) in the filtering process.\n --save_counts Save the streamline counts to a file (.json)\n --save_rejected Save rejected streamlines to output tractogram.\n --no_empty Do not write file if there is no streamlines.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\n References:\n [1] J\u00f6rgens, D., Descoteaux, M., Moreno, R., 2021. Challenges for\n tractogram \ufb01ltering. In: \u00d6zarslan, E., Schultz, T., Zhang, E., Fuster,\n A. (Eds.), Anisotropy Across Fields and Scales. Springer. Mathematics\n and Visualization.\n [2] Legarreta, J., Petit, L., Rheault, F., Theaud, G., Lemaire, C.,\n Descoteaux, M., Jodoin, P.M. Filtering in tractography using\n autoencoders (FINTA). Medical Image Analysis. 2021\n \n", - "synonyms": [ - [ - "long", - "end" - ], - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "brain", - "tissue" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "considered", - "are" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "proposed", - "rejected" - ], - [ - "left", - "from" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "image", - "images" - ], - [ - "areas", - "region", - "regions", - "regions" - ], - [ - "order", - "set" - ], - [ - "limiting", - "limiting" - ], - [ - "long", - "have" - ], - [ - "specific", - "specific" - ], - [ - "step", - "steps" - ], - [ - "processing", - "processing" - ], - [ - "long", - "a" - ], - [ - "action", - "clear", - "future", - "result", - "step", - "change" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "create", - "future", - "step", - "work", - "make" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "thinking", - "i" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "anatomical", - "anatomical" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "atlas", - "atlas" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "cortical", - "cortical" - ], - [ - "action", - "process", - "step", - "step" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "order", - "work", - "instead" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "left", - "after" - ], - [ - "process", - "processes", - "step", - "process" - ], - [ - "streamlines", - "streamlines" - ], - [ - "degree", - "degrees" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "areas", - "across" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "individual", - "lack", - "matter", - "result", - "specific", - "subject", - "certain" - ], - [ - "result", - "may" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "blue", - "dark", - "green", - "grey", - "white", - "gray" - ], - [ - "streamline", - "streamline" - ], - [ - "result", - "results" - ], - [ - "level", - "above" - ], - [ - "order", - "allowed" - ], - [ - "order", - "allow" - ], - [ - "volume", - "volumes", - "volumes" - ], - [ - "clear", - "long", - "question", - "result", - "work", - "there" - ], - [ - "step", - "start" - ], - [ - "thresholds", - "thresholds" - ], - [ - "clear", - "long", - "work", - "still" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "based", - "reported", - "according" - ], - [ - "based", - "based" - ], - [ - "area", - "areas", - "region", - "regions", - "region" - ], - [ - "result", - "since" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_filter_by_length", - "docstring": "Script to filter streamlines based on their lengths.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Filtering by length is its step1)\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_length.py", - "help": "usage: scil_tractogram_filter_by_length.py [-h] [--minL MINL] [--maxL MAXL]\n [--no_empty] [--display_counts]\n [--save_rejected] [--indent INDENT]\n [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nScript to filter streamlines based on their lengths.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Filtering by length is its step1)\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_length.py\n\npositional arguments:\n in_tractogram Streamlines input file name.\n out_tractogram Streamlines output file name.\n\noptions:\n -h, --help show this help message and exit\n --minL MINL Minimum length of streamlines, in mm. [0.0]\n --maxL MAXL Maximum length of streamlines, in mm. [inf]\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering\n --save_rejected Save rejected streamlines to output tractogram.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "proposed", - "rejected" - ], - [ - "left", - "result", - "when" - ], - [ - "meaning", - "name" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "left", - "after" - ], - [ - "streamlines", - "streamlines" - ], - [ - "left", - "before" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "streamline", - "streamline" - ], - [ - "clear", - "long", - "question", - "result", - "work", - "there" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "based", - "based" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_filter_by_orientation", - "docstring": "Script to filter streamlines based on their distance traveled in a specific\ndimension (x, y, or z).\n\nUseful to help differentiate bundles.\n\nExamples: In a brain aligned with x coordinates in left - right axis and y\ncoordinates in anterior-posterior axis, a streamline from the ...\n - corpus callosum will likely travel a very short distance in the y axis.\n - cingulum will likely travel a very short distance in the x axis.\n\nNote: we consider that x, y, z are the coordinates of the streamlines; we\ndo not verify if they are aligned with the brain's orientation.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_orientation.py", - "help": "usage: scil_tractogram_filter_by_orientation.py [-h] [--min_x MIN_X]\n [--max_x MAX_X]\n [--min_y MIN_Y]\n [--max_y MAX_Y]\n [--min_z MIN_Z]\n [--max_z MAX_Z] [--use_abs]\n [--no_empty]\n [--display_counts]\n [--save_rejected filename]\n [--indent INDENT]\n [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_tractogram out_tractogram\n\nScript to filter streamlines based on their distance traveled in a specific\ndimension (x, y, or z).\n\nUseful to help differentiate bundles.\n\nExamples: In a brain aligned with x coordinates in left - right axis and y\ncoordinates in anterior-posterior axis, a streamline from the ...\n - corpus callosum will likely travel a very short distance in the y axis.\n - cingulum will likely travel a very short distance in the x axis.\n\nNote: we consider that x, y, z are the coordinates of the streamlines; we\ndo not verify if they are aligned with the brain's orientation.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_orientation.py\n\npositional arguments:\n in_tractogram Streamlines input file name.\n out_tractogram Streamlines output file name.\n\noptions:\n -h, --help show this help message and exit\n --min_x MIN_X Minimum distance in the first dimension, in mm.[0.0]\n --max_x MAX_X Maximum distance in the first dimension, in mm.[inf]\n --min_y MIN_Y Minimum distance in the second dimension, in mm.[0.0]\n --max_y MAX_Y Maximum distance in the second dimension, in mm.[inf]\n --min_z MIN_Z Minimum distance in the third dimension, in mm.[0.0]\n --max_z MAX_Z Maximum distance in the third dimension, in mm.[inf]\n --use_abs If set, will use the total of distances in absolute value (ex, coming back on yourself will contribute to the total distance instead of cancelling it).\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering.\n --save_rejected filename\n Save the SFT of rejected streamlines.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "considered", - "potential", - "result", - "likely" - ], - [ - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundles" - ], - [ - "orientation", - "orientation" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "considered", - "are" - ], - [ - "clear", - "considered", - "highly", - "long", - "matter", - "true", - "work", - "very" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "proposed", - "rejected" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "left", - "left" - ], - [ - "specific", - "specific" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "long", - "short" - ], - [ - "unique", - "variety", - "examples" - ], - [ - "held", - "on" - ], - [ - "tool", - "useful" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "future", - "will" - ], - [ - "left", - "back" - ], - [ - "anterior", - "dorsal", - "lateral", - "medial", - "posterior", - "ventral", - "posterior" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "order", - "work", - "instead" - ], - [ - "left", - "after" - ], - [ - "left", - "right" - ], - [ - "cingulum", - "cingulum" - ], - [ - "streamlines", - "streamlines" - ], - [ - "left", - "before" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "callosum", - "callosum" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "anterior", - "dorsal", - "medial", - "posterior", - "ventral", - "anterior" - ], - [ - "streamline", - "streamline" - ], - [ - "clear", - "long", - "question", - "result", - "work", - "there" - ], - [ - "corpus", - "corpus" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "based", - "based" - ], - [ - "increase", - "total", - "total" - ], - [ - "considered", - "possibility", - "question", - "step", - "consider" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_filter_by_roi", - "docstring": "Filtering of a tractogram based on any combination of conditions involving a\nROI (ex: keep streamlines whose endoints are inside the ROI, exclude\nstreamlines not entirely included in a ROI, etc.)\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Can reject streamlines with endpoints in the WM or the CSF based on\n labels)\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n\nCondition\n---------\nFor every type of ROI, two or three values are expected: MODE CRITERIA DISTANCE\n(DISTANCE is always optional)\n- MODE must be one of these values: ['any', 'all', 'either_end', 'both_ends']\n - any: any part of the streamline must be in the mask\n - all: all parts of the streamline must be in the mask.\n - either_end: at least one end of the streamline must be in the mask.\n - both_ends: both ends of the streamline must be in the mask.\n- CRITERIA must be one of these values: ['include', 'exclude']\n - Include: If condition from MODE is respected, streamline is included.\n - Exlucde: If condition from MODE is respected, streamline is excluded.\n- DISTANCE must be an int and is optional.\n\n\nType of ROI\n-----------\n- Drawn ROI: Directly loaded from a binary file.\n- Atlas ROI: Selected label from an atlas.\n - ID is one or multiple integer values in the atlas. If multiple values,\n ID needs to be between quotes.\n Example: \"1:6 9 10:15\" will use values between 1 and 6 and values\n between 10 and 15 included as well as value 9.\n- BDO: The ROI is the interior of a bounding box.\n- Planes: The ROI is the equivalent of a one-voxel plane.\n * Using mode 'all' with x/y/z plane works but makes very little sense.\n\nNote: `--drawn_roi MASK.nii.gz all include` is equivalent to\n `--drawn_roi INVERSE_MASK.nii.gz any exclude`\n\nFor example, this allows to find out all streamlines entirely in the WM in one\ncommand (without manually inverting the mask first) or to remove any streamline\nstaying in the GM without getting out.\n\nSupports multiple filtering conditions\n--------------------------------------\nMultiple filtering conditions can be used, with varied ROI types if necessary.\nCombining two conditions is equivalent to a logical AND between the conditions.\nOrder of application does not matter for the final result, but may change the\nintermediate files, if any.\n\nDistance management\n-------------------\nDISTANCE is optional, and it should be used carefully with large voxel size\n(e.g > 2.5mm). The value is in voxel for ROIs and in mm for bounding boxes.\nAnisotropic data will affect each direction differently.\n When using --overwrite_distance, any filtering option with given criteria\nwill have its DISTANCE value replaced.\n\nFormerly: scil_filter_tractogram.py", - "help": "usage: scil_tractogram_filter_by_roi.py [-h]\n [--drawn_roi DRAWN_ROI [DRAWN_ROI ...]]\n [--atlas_roi ATLAS_ROI [ATLAS_ROI ...]]\n [--bdo BDO [BDO ...]]\n [--x_plane X_PLANE [X_PLANE ...]]\n [--y_plane Y_PLANE [Y_PLANE ...]]\n [--z_plane Z_PLANE [Z_PLANE ...]]\n [--filtering_list FILTERING_LIST]\n [--overwrite_distance OVERWRITE_DISTANCE [OVERWRITE_DISTANCE ...]]\n [--save_masks_atlas_roi SAVE_MASKS_ATLAS_ROI]\n [--no_empty] [--display_counts]\n [--save_rejected FILENAME]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nFiltering of a tractogram based on any combination of conditions involving a\nROI (ex: keep streamlines whose endoints are inside the ROI, exclude\nstreamlines not entirely included in a ROI, etc.)\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Can reject streamlines with endpoints in the WM or the CSF based on\n labels)\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n\nCondition\n---------\nFor every type of ROI, two or three values are expected: MODE CRITERIA DISTANCE\n(DISTANCE is always optional)\n- MODE must be one of these values: ['any', 'all', 'either_end', 'both_ends']\n - any: any part of the streamline must be in the mask\n - all: all parts of the streamline must be in the mask.\n - either_end: at least one end of the streamline must be in the mask.\n - both_ends: both ends of the streamline must be in the mask.\n- CRITERIA must be one of these values: ['include', 'exclude']\n - Include: If condition from MODE is respected, streamline is included.\n - Exlucde: If condition from MODE is respected, streamline is excluded.\n- DISTANCE must be an int and is optional.\n\nType of ROI\n-----------\n- Drawn ROI: Directly loaded from a binary file.\n- Atlas ROI: Selected label from an atlas.\n - ID is one or multiple integer values in the atlas. If multiple values,\n ID needs to be between quotes.\n Example: \"1:6 9 10:15\" will use values between 1 and 6 and values\n between 10 and 15 included as well as value 9.\n- BDO: The ROI is the interior of a bounding box.\n- Planes: The ROI is the equivalent of a one-voxel plane.\n * Using mode 'all' with x/y/z plane works but makes very little sense.\n\nNote: `--drawn_roi MASK.nii.gz all include` is equivalent to\n `--drawn_roi INVERSE_MASK.nii.gz any exclude`\n\nFor example, this allows to find out all streamlines entirely in the WM in one\ncommand (without manually inverting the mask first) or to remove any streamline\nstaying in the GM without getting out.\n\nSupports multiple filtering conditions\n--------------------------------------\nMultiple filtering conditions can be used, with varied ROI types if necessary.\nCombining two conditions is equivalent to a logical AND between the conditions.\nOrder of application does not matter for the final result, but may change the\nintermediate files, if any.\n\nDistance management\n-------------------\nDISTANCE is optional, and it should be used carefully with large voxel size\n(e.g > 2.5mm). The value is in voxel for ROIs and in mm for bounding boxes.\nAnisotropic data will affect each direction differently.\n When using --overwrite_distance, any filtering option with given criteria\nwill have its DISTANCE value replaced.\n\nFormerly: scil_filter_tractogram.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n out_tractogram Path of the output tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --drawn_roi DRAWN_ROI [DRAWN_ROI ...]\n ROI_NAME MODE CRITERIA DISTANCE (distance in voxel is optional)\n Filename of a hand drawn ROI (.nii or .nii.gz).\n --atlas_roi ATLAS_ROI [ATLAS_ROI ...]\n ATLAS_NAME ID MODE CRITERIA DISTANCE (distance in voxel is optional)\n Filename of an atlas (.nii or .nii.gz).\n --bdo BDO [BDO ...] BDO_NAME MODE CRITERIA DISTANCE (distance in mm is optional)\n Filename of a bounding box (bdo) file from MI-Brain.\n --x_plane X_PLANE [X_PLANE ...]\n PLANE MODE CRITERIA DISTANCE (distance in voxel is optional)\n Slice number in X, in voxel space.\n --y_plane Y_PLANE [Y_PLANE ...]\n PLANE MODE CRITERIA DISTANCE (distance in voxel is optional)\n Slice number in Y, in voxel space.\n --z_plane Z_PLANE [Z_PLANE ...]\n PLANE MODE CRITERIA DISTANCE (distance in voxel is optional)\n Slice number in Z, in voxel space.\n --filtering_list FILTERING_LIST\n Text file containing one rule per line\n (i.e. drawn_roi mask.nii.gz both_ends include 1).\n --overwrite_distance OVERWRITE_DISTANCE [OVERWRITE_DISTANCE ...]\n MODE CRITERIA DISTANCE (distance in voxel for ROIs and in mm for bounding box).\n If set, it will overwrite the distance associated to a specific mode/criteria.\n --save_masks_atlas_roi SAVE_MASKS_ATLAS_ROI\n If set, will save the atlas roi masks. The value to provide is the \n prefix, ex: my_path/atlas_roi_. Whole filename will be \n my_path/atlas_roi_{id}.nii.gz\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering\n --save_rejected FILENAME\n Save rejected streamlines to output tractogram.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "long", - "end" - ], - [ - "total", - "number" - ], - [ - "planes", - "planes" - ], - [ - "true", - "always" - ], - [ - "anatomy", - "anatomy" - ], - [ - "experience", - "perception", - "thinking", - "true", - "sense" - ], - [ - "average", - "per" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "form", - "combination" - ], - [ - "considered", - "are" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "clear", - "considered", - "highly", - "long", - "matter", - "true", - "work", - "very" - ], - [ - "order", - "order" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "proposed", - "rejected" - ], - [ - "conditions", - "conditions" - ], - [ - "left", - "from" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "lack", - "loss", - "result", - "result" - ], - [ - "involved", - "involving" - ], - [ - "methods", - "use" - ], - [ - "work", - "find" - ], - [ - "direction", - "direction" - ], - [ - "order", - "set" - ], - [ - "long", - "have" - ], - [ - "specific", - "specific" - ], - [ - "long", - "a" - ], - [ - "clear", - "left", - "out" - ], - [ - "voxel", - "voxel" - ], - [ - "action", - "clear", - "future", - "result", - "step", - "change" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "space", - "space" - ], - [ - "held", - "in" - ], - [ - "variety", - "include" - ], - [ - "action", - "clear", - "considered", - "future", - "matter", - "possibility", - "potential", - "question", - "result", - "specific", - "any" - ], - [ - "large", - "larger", - "large" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "atlas", - "atlas" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "supported", - "supports" - ], - [ - "application", - "allows" - ], - [ - "create", - "provide" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "left", - "after" - ], - [ - "increase", - "expected" - ], - [ - "streamlines", - "streamlines" - ], - [ - "application", - "application" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "work", - "works" - ], - [ - "left", - "before" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "result", - "may" - ], - [ - "order", - "rule" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "planes", - "plane" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "larger", - "size", - "size" - ], - [ - "matter", - "question", - "does" - ], - [ - "streamline", - "streamline" - ], - [ - "long", - "two" - ], - [ - "clear", - "long", - "question", - "result", - "work", - "there" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "area", - "main", - "work", - "part" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "based", - "based" - ], - [ - "areas", - "parts" - ], - [ - "forms", - "specific", - "variety", - "types" - ], - [ - "long", - "little" - ], - [ - "considered", - "experience", - "large", - "long", - "result", - "variety", - "work", - "working", - "well" - ], - [ - "thinking", - "getting" - ], - [ - "assigned", - "command" - ], - [ - "order", - "necessary" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_fix_trk", - "docstring": "This script is made to fix DSI-Studio or Startrack TRK file\n(unknown space/convention) to make it compatible with TrackVis,\nMI-Brain, Dipy Horizon (Stateful Tractogram).\n\nDSI-Studio\n==========\n\nThe script either make it match with an anatomy from DSI-Studio (AC-PC aligned,\nsometimes flipped) or if --in_native_fa is provided it moves it back to native\nDWI space (this involved registration).\n\nSince DSI-Studio sometimes leaves some skull around the brain, the --auto_crop\naims to stabilize registration. If this option fails, manually BET both FA.\nRegistration is more robust at resolution above 2mm (iso), be careful.\n\nIf you are fixing bundles, use this script once with --save_transfo and verify\nresults. Once satisfied, call the scripts on bundles using a bash for loop with\n--load_transfo to save computation.\n\nWe recommand the --cut_invalid to remove invalid points of streamlines rather\nremoving entire streamlines.\n\nThis script was tested on various datasets and worked on all of them. However,\nalways verify the results and if a specific case does not work. Open an issue\non the Scilpy GitHub repository.\n\nStartrack\n==========\n\nThe script will create a new stateful tractogram using the reference in\norder to fix the missing information in the header of the trk.\n\n\nWARNING: This script is still experimental, DSI-Studio and Startrack\nevolve quickly and results may vary depending on the data itself\nas well as DSI-studio/Startrack version.\n\nFormerly: scil_fix_dsi_studio_trk.py", - "help": "usage: scil_tractogram_fix_trk.py [-h] [--software string]\n [--cut_invalid | --remove_invalid]\n [--in_dsi_fa IN_DSI_FA]\n [--in_native_fa IN_NATIVE_FA] [--auto_crop]\n [--save_transfo FILE | --load_transfo FILE]\n [--reference REFERENCE] [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script is made to fix DSI-Studio or Startrack TRK file\n(unknown space/convention) to make it compatible with TrackVis,\nMI-Brain, Dipy Horizon (Stateful Tractogram).\n\nDSI-Studio\n==========\n\nThe script either make it match with an anatomy from DSI-Studio (AC-PC aligned,\nsometimes flipped) or if --in_native_fa is provided it moves it back to native\nDWI space (this involved registration).\n\nSince DSI-Studio sometimes leaves some skull around the brain, the --auto_crop\naims to stabilize registration. If this option fails, manually BET both FA.\nRegistration is more robust at resolution above 2mm (iso), be careful.\n\nIf you are fixing bundles, use this script once with --save_transfo and verify\nresults. Once satisfied, call the scripts on bundles using a bash for loop with\n--load_transfo to save computation.\n\nWe recommand the --cut_invalid to remove invalid points of streamlines rather\nremoving entire streamlines.\n\nThis script was tested on various datasets and worked on all of them. However,\nalways verify the results and if a specific case does not work. Open an issue\non the Scilpy GitHub repository.\n\nStartrack\n==========\n\nThe script will create a new stateful tractogram using the reference in\norder to fix the missing information in the header of the trk.\n\nWARNING: This script is still experimental, DSI-Studio and Startrack\nevolve quickly and results may vary depending on the data itself\nas well as DSI-studio/Startrack version.\n\nFormerly: scil_fix_dsi_studio_trk.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file from DSI studio (.trk).\n out_tractogram Path of the output tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --software string Software used to create in_tractogram.\n Choices: ['dsi_studio', 'startrack']\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_invalid Remove the streamlines landing out of the bounding box.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nDSI options:\n --in_dsi_fa IN_DSI_FA\n Path of the input FA from DSI Studio (.nii.gz).\n --in_native_fa IN_NATIVE_FA\n Path of the input FA from Dipy/MRtrix (.nii.gz).\n Move the tractogram back to a \"proper\" space, include registration.\n --auto_crop If both FA are not already BET, perform registration \n using a centered-cube crop to ignore the skull.\n A good BET for both is more robust.\n --save_transfo FILE Save estimated transformation to avoid recomputing (.txt).\n --load_transfo FILE Load estimated transformation to apply to other files (.txt).\n\nStarTrack options:\n --reference REFERENCE\n Reference anatomy (.nii or .nii.gz).\n", - "synonyms": [ - [ - "unknown", - "unknown" - ], - [ - "anatomy", - "anatomy" - ], - [ - "true", - "always" - ], - [ - "bundles", - "bundles" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "order", - "order" - ], - [ - "long", - "work", - "more" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "work", - "working", - "worked" - ], - [ - "work", - "working", - "work" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "invalid", - "valid", - "invalid" - ], - [ - "clear", - "considered", - "create", - "form", - "manner", - "matter", - "result", - "subject", - "thinking", - "true", - "view", - "work", - "rather" - ], - [ - "methods", - "use" - ], - [ - "thinking", - "you" - ], - [ - "specific", - "specific" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "total", - "estimated" - ], - [ - "long", - "a" - ], - [ - "application", - "systems", - "software" - ], - [ - "clear", - "left", - "out" - ], - [ - "held", - "on" - ], - [ - "action", - "clear", - "step", - "move" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "applied", - "apply" - ], - [ - "clear", - "create", - "future", - "step", - "work", - "make" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "possibility", - "avoid" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "specific", - "variety", - "various" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "step", - "moves" - ], - [ - "involved", - "working", - "involved" - ], - [ - "future", - "will" - ], - [ - "left", - "back" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "create" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "space", - "space" - ], - [ - "held", - "in" - ], - [ - "variety", - "include" - ], - [ - "long", - "with" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "experiment", - "experimental" - ], - [ - "clear", - "experience", - "thinking", - "true", - "good" - ], - [ - "methods", - "using" - ], - [ - "variety", - "work", - "other" - ], - [ - "create", - "aims" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "left", - "long", - "work", - "once" - ], - [ - "result", - "may" - ], - [ - "considered", - "subsequently", - "was" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "matter", - "question", - "does" - ], - [ - "result", - "results" - ], - [ - "level", - "above" - ], - [ - "matter", - "question", - "subject", - "issue" - ], - [ - "clear", - "work", - "made" - ], - [ - "clear", - "long", - "work", - "still" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "matter", - "question", - "case" - ], - [ - "considered", - "experience", - "large", - "long", - "result", - "variety", - "work", - "working", - "well" - ], - [ - "result", - "since" - ], - [ - "clear", - "considered", - "result", - "however" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "large", - "work", - "some" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_flip", - "docstring": "Flip streamlines locally around specific axes.\n\nIMPORTANT: this script should only be used in case of absolute necessity.\nIt's better to fix the real tools than to force flipping streamlines to\nhave them fit in the tools.\n\nFormerly: scil_flip_streamlines.py", - "help": "usage: scil_tractogram_flip.py [-h] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram {x,y,z}\n [{x,y,z} ...]\n\nFlip streamlines locally around specific axes.\n\nIMPORTANT: this script should only be used in case of absolute necessity.\nIt's better to fix the real tools than to force flipping streamlines to\nhave them fit in the tools.\n\nFormerly: scil_flip_streamlines.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n out_tractogram Path of the output tractogram file.\n {x,y,z} The axes you want to flip. eg: to flip the x and y axes use: x y.\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "long", - "have" - ], - [ - "specific", - "specific" - ], - [ - "thinking", - "you" - ], - [ - "experience", - "thinking", - "work", - "working", - "better" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "methods", - "using" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "methods", - "tool", - "tools" - ], - [ - "matter", - "question", - "case" - ], - [ - "considered", - "key", - "work", - "important" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_math", - "docstring": "Performs an operation on a list of streamline files. The supported\noperations are:\n\ndifference: Keep the streamlines from the first file that are not in\n any of the following files.\n\nintersection: Keep the streamlines that are present in all files.\n\nunion: Keep all streamlines while removing duplicates.\n\nconcatenate: Keep all streamlines with duplicates.\n\nlazy_concatenate: Keep all streamlines with duplicates, never load the whole\n tractograms in memory. Only works with trk/tck file,\n metadata will be lost and invalid streamlines are kept.\n\nIf a file 'duplicate.trk' have identical streamlines, calling the script using\nthe difference/intersection/union with a single input will remove these\nduplicated streamlines.\n\nTo allow a soft match, use the --precision option to increase the allowed\nthreshold for similarity. A precision of 1 represents 10**(-1), so a\nmaximum distance of 0.1mm is allowed. If the streamlines are identical, the\ndefault value of 3 (or 0.001mm distance) should work.\n\nIf there is a 0.5mm shift, use a precision of 0 (or 1mm distance) and the\n--robust option. Should make it work, but slightly slower. Will merge all\nstreamlines similar when rounded to that precision level.\n\nThe metadata (data per point, data per streamline) of the streamlines that\nare kept in the output will be preserved. This requires that all input files\nshare the same type of metadata. If this is not the case, use the option\n--no_metadata to strip the metadata from the output. Or --fake_metadata to\ninitialize dummy metadata in the file missing them.\n\nFormerly: scil_streamlines_math.py", - "help": "usage: scil_tractogram_math.py [-h] [--precision NBR_OF_DECIMALS] [--robust]\n [--no_metadata] [--fake_metadata]\n [--save_indices OUT_INDEX_FILE] [--save_empty]\n [--no_bbox_check] [--indent INDENT]\n [--sort_keys] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n OPERATION INPUT_FILES [INPUT_FILES ...]\n OUTPUT_FILE\n\nPerforms an operation on a list of streamline files. The supported\noperations are:\n\ndifference: Keep the streamlines from the first file that are not in\n any of the following files.\n\nintersection: Keep the streamlines that are present in all files.\n\nunion: Keep all streamlines while removing duplicates.\n\nconcatenate: Keep all streamlines with duplicates.\n\nlazy_concatenate: Keep all streamlines with duplicates, never load the whole\n tractograms in memory. Only works with trk/tck file,\n metadata will be lost and invalid streamlines are kept.\n\nIf a file 'duplicate.trk' have identical streamlines, calling the script using\nthe difference/intersection/union with a single input will remove these\nduplicated streamlines.\n\nTo allow a soft match, use the --precision option to increase the allowed\nthreshold for similarity. A precision of 1 represents 10**(-1), so a\nmaximum distance of 0.1mm is allowed. If the streamlines are identical, the\ndefault value of 3 (or 0.001mm distance) should work.\n\nIf there is a 0.5mm shift, use a precision of 0 (or 1mm distance) and the\n--robust option. Should make it work, but slightly slower. Will merge all\nstreamlines similar when rounded to that precision level.\n\nThe metadata (data per point, data per streamline) of the streamlines that\nare kept in the output will be preserved. This requires that all input files\nshare the same type of metadata. If this is not the case, use the option\n--no_metadata to strip the metadata from the output. Or --fake_metadata to\ninitialize dummy metadata in the file missing them.\n\nFormerly: scil_streamlines_math.py\n\npositional arguments:\n OPERATION The type of operation to be performed on the streamlines. Must\n be one of the following: difference, intersection, union, concatenate, lazy_concatenate.\n INPUT_FILES The list of files that contain the streamlines to operate on.\n OUTPUT_FILE The file where the remaining streamlines are saved.\n\noptions:\n -h, --help show this help message and exit\n --precision NBR_OF_DECIMALS, -p NBR_OF_DECIMALS\n Precision used to compare streamlines [4].\n --robust, -r Use version robust to small translation/rotation.\n --no_metadata, -n Strip the streamline metadata from the output.\n --fake_metadata Skip the metadata verification, create fake metadata if missing, can lead to unexpected behavior.\n --save_indices OUT_INDEX_FILE, -s OUT_INDEX_FILE\n Save the streamline indices to the supplied json file.\n --save_empty If set, we will save all results, even if tractogram if empty.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "memory", - "memory" - ], - [ - "average", - "per" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "large", - "larger", - "small" - ], - [ - "work", - "and" - ], - [ - "precision", - "precision" - ], - [ - "result", - "following" - ], - [ - "considered", - "are" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "difference", - "point" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "work", - "working", - "work" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "invalid", - "valid", - "invalid" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "long", - "have" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "long", - "matter", - "question", - "result", - "work", - "even" - ], - [ - "clear", - "create", - "future", - "step", - "work", - "make" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "future", - "will" - ], - [ - "loss", - "lost" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "create" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "action", - "clear", - "considered", - "future", - "matter", - "possibility", - "potential", - "question", - "result", - "specific", - "any" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "clear", - "long", - "matter", - "result", - "work", - "so" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "difference", - "difference" - ], - [ - "represent", - "represents" - ], - [ - "methods", - "using" - ], - [ - "contrast", - "form", - "forms", - "larger", - "result", - "specific", - "variety", - "similar" - ], - [ - "streamlines", - "streamlines" - ], - [ - "work", - "works" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "supported", - "supported" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "area", - "work", - "where" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "streamline", - "streamline" - ], - [ - "result", - "results" - ], - [ - "order", - "allowed" - ], - [ - "order", - "allow" - ], - [ - "similarity", - "similarity" - ], - [ - "clear", - "long", - "question", - "result", - "work", - "there" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "matter", - "question", - "case" - ], - [ - "higher", - "increase", - "total", - "increase" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_pairwise_comparison", - "docstring": "This script is designed to compare and help visualize differences between two\ntractograms. This can be especially useful in studies where multiple\ntractograms from different algorithms or parameters need to be compared.\n\nA similar script (scil_bundle_pairwise_comparison.py) is available for bundles,\nwith metrics more adapted to bundles (and spatial agreement).\n\nThe difference is computed in terms of\n- A voxel-wise spatial distance between streamlines crossing each voxel.\n This can help to see if both tractography reconstructions at each voxel\n look similar (out_diff.nii.gz)\n- An angular correlation (ACC) between streamline orientation from TODI.\n This compares the local orientation of streamlines at each voxel\n (out_acc.nii.gz)\n- A patch-wise correlation between streamline density maps from both\n tractograms. This compares where the high/low density regions agree or not\n (out_corr.nii.gz)\n- A heatmap combining all the previous metrics using a harmonic means of the\n normalized metrics to summarize general agreement (out_heatmap.nii.gz)", - "help": "usage: scil_tractogram_pairwise_comparison.py [-h] [--out_dir OUT_DIR]\n [--out_prefix OUT_PREFIX]\n [--in_mask IN_FILE]\n [--skip_streamlines_distance]\n [--processes NBR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram_1 in_tractogram_2\n\nThis script is designed to compare and help visualize differences between two\ntractograms. This can be especially useful in studies where multiple\ntractograms from different algorithms or parameters need to be compared.\n\nA similar script (scil_bundle_pairwise_comparison.py) is available for bundles,\nwith metrics more adapted to bundles (and spatial agreement).\n\nThe difference is computed in terms of\n- A voxel-wise spatial distance between streamlines crossing each voxel.\n This can help to see if both tractography reconstructions at each voxel\n look similar (out_diff.nii.gz)\n- An angular correlation (ACC) between streamline orientation from TODI.\n This compares the local orientation of streamlines at each voxel\n (out_acc.nii.gz)\n- A patch-wise correlation between streamline density maps from both\n tractograms. This compares where the high/low density regions agree or not\n (out_corr.nii.gz)\n- A heatmap combining all the previous metrics using a harmonic means of the\n normalized metrics to summarize general agreement (out_heatmap.nii.gz)\n\npositional arguments:\n in_tractogram_1 Input tractogram 1.\n in_tractogram_2 Input tractogram 2.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Directory where all output files will be saved.\n If not specified, outputs will be saved in the current directory.\n --out_prefix OUT_PREFIX\n Prefix for output files. Useful for distinguishing between different runs [out].\n --in_mask IN_FILE Optional input mask.\n --skip_streamlines_distance\n Skip computation of the spatial distance between streamlines. Slowest part of the computation.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "individual", - "specific", - "unique", - "variety", - "different" - ], - [ - "total", - "number" - ], - [ - "algorithm", - "algorithms" - ], - [ - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundles" - ], - [ - "shape", - "view", - "look" - ], - [ - "orientation", - "orientation" - ], - [ - "work", - "and" - ], - [ - "general", - "general" - ], - [ - "long", - "work", - "more" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "areas", - "region", - "regions", - "regions" - ], - [ - "long", - "a" - ], - [ - "clear", - "left", - "out" - ], - [ - "voxel", - "voxel" - ], - [ - "tool", - "useful" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "areas", - "considered", - "highly", - "especially" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "long", - "with" - ], - [ - "parameters", - "specified" - ], - [ - "clinical", - "literature", - "scientific", - "studies", - "study", - "studies" - ], - [ - "differences", - "differences" - ], - [ - "subject", - "terms" - ], - [ - "difference", - "difference" - ], - [ - "methods", - "using" - ], - [ - "contrast", - "form", - "forms", - "larger", - "result", - "specific", - "variety", - "similar" - ], - [ - "view", - "see" - ], - [ - "future", - "current" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "streamlines", - "streamlines" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "area", - "work", - "where" - ], - [ - "individual", - "each" - ], - [ - "high", - "higher", - "level", - "low" - ], - [ - "streamline", - "streamline" - ], - [ - "create", - "lack", - "step", - "work", - "working", - "need" - ], - [ - "average", - "compared" - ], - [ - "long", - "two" - ], - [ - "step", - "start" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "area", - "main", - "work", - "part" - ], - [ - "maps", - "maps" - ], - [ - "spatial", - "temporal", - "spatial" - ], - [ - "meaning", - "order", - "result", - "step", - "true", - "means" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_print_info", - "docstring": "Prints information on a loaded tractogram: number of streamlines, and\nmean / min / max / std of\n - length in number of points\n - length in mm\n - step size.\n\nFor trk files: also prints the data_per_point and data_per_streamline keys.\n\nSee also:\n - scil_header_print_info.py to see the header, affine, volume dimension.\n - scil_bundle_shape_measures.py to see bundle-specific information.", - "help": "usage: scil_tractogram_print_info.py [-h] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [--indent INDENT] [--sort_keys]\n in_tractogram\n\nPrints information on a loaded tractogram: number of streamlines, and\nmean / min / max / std of\n - length in number of points\n - length in mm\n - step size.\n\nFor trk files: also prints the data_per_point and data_per_streamline keys.\n\nSee also:\n - scil_header_print_info.py to see the header, affine, volume dimension.\n - scil_bundle_shape_measures.py to see bundle-specific information.\n\npositional arguments:\n in_tractogram Tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "specific", - "specific" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "action", - "process", - "step", - "step" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "larger", - "size", - "size" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "bundles", - "bundle" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_project_map_to_streamlines", - "docstring": "Projects maps extracted from a map onto the points of streamlines.\n\nThe default options will take data from a nifti image (3D or 4D) and project it\nonto the points of streamlines. If the image is 4D, the data is stored as a\nlist of 1D arrays per streamline. If the image is 3D, the data is stored as a\nlist of values per streamline.\n\nSee also scil_tractogram_project_streamlines_to_map.py for the reverse action.\n\n* Note that the data from your maps will be projected only on the coordinates\nof the points of your streamlines. Data underlying the whole segments between\ntwo consecutive points is not used. If your streamlines are strongly\ncompressed, or if they have a very big step size, the result will possibly\nreflect poorly your map. You may use scil_tractogram_resample.py to upsample\nyour streamlines first.\n\n* Hint: The streamlines themselves are not modified here, only their dpp. To\navoid multiplying data on disk, you could use the following arguments to save\nthe new dpp in your current tractogram:\n>> scil_tractogram_project_map_to_streamlines.py $in_bundle $in_bundle\n --keep_all_dpp -f", - "help": "usage: scil_tractogram_project_map_to_streamlines.py [-h] --in_maps IN_MAPS\n [IN_MAPS ...]\n --out_dpp_name\n OUT_DPP_NAME\n [OUT_DPP_NAME ...]\n [--trilinear]\n [--endpoints_only]\n [--keep_all_dpp]\n [--overwrite_dpp]\n [--reference REFERENCE]\n [-f]\n [-v [{DEBUG,INFO,WARNING}]]\n in_tractogram\n out_tractogram\n\nProjects maps extracted from a map onto the points of streamlines.\n\nThe default options will take data from a nifti image (3D or 4D) and project it\nonto the points of streamlines. If the image is 4D, the data is stored as a\nlist of 1D arrays per streamline. If the image is 3D, the data is stored as a\nlist of values per streamline.\n\nSee also scil_tractogram_project_streamlines_to_map.py for the reverse action.\n\n* Note that the data from your maps will be projected only on the coordinates\nof the points of your streamlines. Data underlying the whole segments between\ntwo consecutive points is not used. If your streamlines are strongly\ncompressed, or if they have a very big step size, the result will possibly\nreflect poorly your map. You may use scil_tractogram_resample.py to upsample\nyour streamlines first.\n\n* Hint: The streamlines themselves are not modified here, only their dpp. To\navoid multiplying data on disk, you could use the following arguments to save\nthe new dpp in your current tractogram:\n>> scil_tractogram_project_map_to_streamlines.py $in_bundle $in_bundle\n --keep_all_dpp -f\n\npositional arguments:\n in_tractogram Fiber bundle file.\n out_tractogram Output file.\n\noptions:\n -h, --help show this help message and exit\n --in_maps IN_MAPS [IN_MAPS ...]\n Nifti map to project onto streamlines.\n --out_dpp_name OUT_DPP_NAME [OUT_DPP_NAME ...]\n Name of the data_per_point to be saved in the \n output tractogram.\n --trilinear If set, will use trilinear interpolation \n else will use nearest neighbor interpolation \n by default.\n --endpoints_only If set, will only project the map onto the \n endpoints of the streamlines (all other values along \n streamlines will be NaN). If not set, will project \n the map onto all points of the streamlines.\n --keep_all_dpp If set, previous data_per_point will be preserved \n in the output tractogram. Else, only --out_dpp_name \n keys will be saved.\n --overwrite_dpp If set, if --keep_all_dpp is set and some \n --out_dpp_name keys already existed in your \n data_per_point, allow overwriting old data_per_point.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -f Force overwriting of the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "average", - "per" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "result", - "following" - ], - [ - "considered", - "are" - ], - [ - "clear", - "considered", - "highly", - "long", - "matter", - "true", - "work", - "very" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "lack", - "loss", - "result", - "result" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "long", - "have" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "possibility", - "avoid" - ], - [ - "supported", - "support" - ], - [ - "future", - "will" - ], - [ - "project", - "project" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "project", - "projects" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "clear", - "order", - "step", - "work", - "take" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "action", - "process", - "step", - "step" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "variety", - "work", - "other" - ], - [ - "future", - "current" - ], - [ - "area", - "main", - "along" - ], - [ - "streamlines", - "streamlines" - ], - [ - "matter", - "thinking", - "else" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "result", - "may" - ], - [ - "considered", - "possibility", - "result", - "possibly" - ], - [ - "large", - "big" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "larger", - "size", - "size" - ], - [ - "streamline", - "streamline" - ], - [ - "order", - "allow" - ], - [ - "long", - "two" - ], - [ - "action", - "step", - "action" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "maps", - "maps" - ], - [ - "exist", - "existed" - ], - [ - "maps", - "map" - ], - [ - "fundamental", - "underlying" - ], - [ - "supported", - "strongly" - ], - [ - "bundles", - "bundle" - ], - [ - "clear", - "result", - "work", - "could" - ], - [ - "large", - "work", - "some" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_project_streamlines_to_map", - "docstring": "Projects metrics onto the underlying voxels of a streamlines. This script can\nproject data from data_per_point (dpp) or data_per_streamline (dps) to maps.\n\nYou choose to project data from all points of the streamlines, or from the\nendpoints only. The idea then is to visualize the cortical areas affected by\nmetrics (assuming streamlines start/end in the cortex).\n\nSee also scil_tractogram_project_map_to_streamlines.py for the reverse action.\n\nHow to the data is loaded:\n - From dps: uses the same value for each point of the streamline.\n - From dpp: one value per point.\n\nHow the data is used:\n 1. Average all points of the streamline to get a mean value, set this value\n to all points.\n 2. Average the two endpoints and get their mean value, set this value to\n all points.\n 3. Keep each point individually.\n\nHow the data is projected to a map:\n A. Using each point.\n B. Using the endpoints only.\n\nFor more complex operations than the average per streamline, see\nscil_tractogram_dpp_math.py.", - "help": "usage: scil_tractogram_project_streamlines_to_map.py [-h]\n (--use_dps key [key ...] | --use_dpp key [key ...] | --load_dps file [file ...] | --load_dpp file [file ...])\n (--mean_endpoints | --mean_streamline | --point_by_point)\n (--to_endpoints | --to_wm)\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bundle out_prefix\n\nProjects metrics onto the underlying voxels of a streamlines. This script can\nproject data from data_per_point (dpp) or data_per_streamline (dps) to maps.\n\nYou choose to project data from all points of the streamlines, or from the\nendpoints only. The idea then is to visualize the cortical areas affected by\nmetrics (assuming streamlines start/end in the cortex).\n\nSee also scil_tractogram_project_map_to_streamlines.py for the reverse action.\n\nHow to the data is loaded:\n - From dps: uses the same value for each point of the streamline.\n - From dpp: one value per point.\n\nHow the data is used:\n 1. Average all points of the streamline to get a mean value, set this value\n to all points.\n 2. Average the two endpoints and get their mean value, set this value to\n all points.\n 3. Keep each point individually.\n\nHow the data is projected to a map:\n A. Using each point.\n B. Using the endpoints only.\n\nFor more complex operations than the average per streamline, see\nscil_tractogram_dpp_math.py.\n\npositional arguments:\n in_bundle Fiber bundle file.\n out_prefix Folder + prefix to save endpoints metric(s). We will save \n one nifti file per per dpp/dps key given.\n Ex: my_path/subjX_bundleY_ with --use_dpp key1 will output \n my_path/subjX_bundleY_key1.nii.gz\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nWhere to get the statistics from. (Choose one):\n --use_dps key [key ...]\n Use the data_per_streamline from the tractogram.\n It must be a .trk\n --use_dpp key [key ...]\n Use the data_per_point from the tractogram. \n It must be a trk.\n --load_dps file [file ...]\n Load data per streamline (scalar) .txt or .npy.\n Must load an array with the right shape.\n --load_dpp file [file ...]\n Load data per point (scalar) from .txt or .npy.\n Must load an array with the right shape.\n\nProcessing choices. (Choose one):\n --mean_endpoints Uses one single value per streamline: the mean of the two \n endpoints.\n --mean_streamline Use one single value per streamline: the mean of all \n points of the streamline.\n --point_by_point Directly project the streamlines values onto the map.\n\nWhere to send the statistics. (Choose one):\n --to_endpoints Project metrics onto a mask of the endpoints.\n --to_wm Project metrics into streamlines coverage.\n", - "synonyms": [ - [ - "long", - "end" - ], - [ - "anatomy", - "anatomy" - ], - [ - "average", - "per" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "left", - "into" - ], - [ - "long", - "work", - "more" - ], - [ - "difference", - "point" - ], - [ - "fibre", - "fiber" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "cortex", - "thalamus", - "cortex" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "area", - "areas", - "region", - "regions", - "areas" - ], - [ - "thinking", - "you" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "processing", - "processing" - ], - [ - "long", - "a" - ], - [ - "key", - "main", - "key" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "project", - "project" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "project", - "projects" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "matter", - "question", - "thinking", - "true", - "work", - "working", - "how" - ], - [ - "long", - "with" - ], - [ - "average", - "average" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "cortical", - "cortical" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "left", - "right" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "possibility", - "question", - "thinking", - "true", - "view", - "idea" - ], - [ - "area", - "work", - "where" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "streamline", - "streamline" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "long", - "two" - ], - [ - "complex", - "structure", - "structures", - "complex" - ], - [ - "step", - "start" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "action", - "step", - "action" - ], - [ - "work", - "all" - ], - [ - "maps", - "maps" - ], - [ - "maps", - "map" - ], - [ - "fundamental", - "underlying" - ], - [ - "bundles", - "bundle" - ], - [ - "shape", - "structure", - "shape" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_qbx", - "docstring": "Compute clusters using QuickBundlesX and save them separately.\nWe cannot know the number of clusters in advance.\n\nQuickbundles:\nGaryfallidis, E. et al. (2012). Quickbundles, a method for tractography\nsimplification. Frontiers in neuroscience, 6, 175.\n\nQuickbundlesX:\nGaryfallidis, E. et al. (2016) QuickBundlesX: sequential clustering of millions\nof streamlines in multiple levels of detail at record execution time. 24th\nInternational Society of Magnetic Resonance in Medicine (ISMRM).\n\n\"QuickBundlesX shows a remarkable 20+X speedup over its predecessor\"\n\nFormerly: scil_compute_qbx.py", - "help": "usage: scil_tractogram_qbx.py [-h] [--nb_points NB_POINTS]\n [--out_centroids OUT_CENTROIDS]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram dist_thresh out_clusters_dir\n\nCompute clusters using QuickBundlesX and save them separately.\nWe cannot know the number of clusters in advance.\n\nQuickbundles:\nGaryfallidis, E. et al. (2012). Quickbundles, a method for tractography\nsimplification. Frontiers in neuroscience, 6, 175.\n\nQuickbundlesX:\nGaryfallidis, E. et al. (2016) QuickBundlesX: sequential clustering of millions\nof streamlines in multiple levels of detail at record execution time. 24th\nInternational Society of Magnetic Resonance in Medicine (ISMRM).\n\n\"QuickBundlesX shows a remarkable 20+X speedup over its predecessor\"\n\nFormerly: scil_compute_qbx.py\n\npositional arguments:\n in_tractogram Tractogram filename.\n Path of the input tractogram or bundle.\n dist_thresh Last QuickBundlesX threshold in mm. Typically \n the value are between 10-20mm.\n out_clusters_dir Path where to save the clusters directory.\n\noptions:\n -h, --help show this help message and exit\n --nb_points NB_POINTS\n Streamlines will be resampled to have this number of points [20].\n --out_centroids OUT_CENTROIDS\n Output tractogram filename.\n Format must be readable by the Nibabel API.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "methods", - "method" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "matter", - "question", - "thinking", - "true", - "know" - ], - [ - "left", - "result", - "when" - ], - [ - "long", - "over" - ], - [ - "long", - "have" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "last" - ], - [ - "held", - "in" - ], - [ - "long", - "result", - "work", - "working", - "time" - ], - [ - "methods", - "using" - ], - [ - "higher", - "increase", - "level", - "levels" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "area", - "work", - "where" - ], - [ - "global", - "international" - ], - [ - "bundles", - "bundle" - ], - [ - "exist", - "cannot" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_register", - "docstring": "Generate a linear transformation matrix from the registration of 2 tractograms.\nTypically, this script is run before scil_tractogram_apply_transform.py.\n\nFor more information on how to use the various registration scripts, see the\ndoc at:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_register_tractogram.py", - "help": "usage: scil_tractogram_register.py [-h] [--out_name OUT_NAME] [--only_rigid]\n [--moving_tractogram_ref MOVING_TRACTOGRAM_REF]\n [--static_tractogram_ref STATIC_TRACTOGRAM_REF]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n moving_tractogram static_tractogram\n\nGenerate a linear transformation matrix from the registration of 2 tractograms.\nTypically, this script is run before scil_tractogram_apply_transform.py.\n\nFor more information on how to use the various registration scripts, see the\ndoc at:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_register_tractogram.py\n\npositional arguments:\n moving_tractogram Path of the moving tractogram.\n static_tractogram Path of the target tractogram.\n\noptions:\n -h, --help show this help message and exit\n --out_name OUT_NAME Filename of the transformation matrix. \n The registration type will be appended as a suffix,\n [_.txt]. \n Default: [transformation.txt]\n --only_rigid If set, will only use a rigid transformation (uses affine by default).\n --moving_tractogram_ref MOVING_TRACTOGRAM_REF\n Reference anatomy for moving_tractogram (if tck/vtk/fib/dpy) file\n support (.nii or .nii.gz).\n --static_tractogram_ref STATIC_TRACTOGRAM_REF\n Reference anatomy for static_tractogram (if tck/vtk/fib/dpy) file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] E. Garyfallidis, O. Ocegueda, D. Wassermann, M. Descoteaux\nRobust and efficient linear registration of white-matter fascicles in the\nspace of streamlines, NeuroImage, Volume 117, 15 August 2015, Pages 124-140\n(http://www.sciencedirect.com/science/article/pii/S1053811915003961)\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "create", - "generate" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "long", - "work", - "more" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "specific", - "variety", - "various" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "space", - "space" - ], - [ - "held", - "in" - ], - [ - "matter", - "question", - "thinking", - "true", - "work", - "working", - "how" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "streamlines", - "streamlines" - ], - [ - "left", - "before" - ], - [ - "matrices", - "matrix" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "literature", - "scientific", - "studies", - "study", - "science" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_remove_invalid", - "docstring": "Removal of streamlines that are out of the volume bounding box. In voxel space\nno negative coordinate and no above volume dimension coordinate are possible.\nAny streamline that do not respect these two conditions are removed.\n\nThe --cut_invalid option will cut streamlines so that their longest segment are\nwithin the bounding box\n\nFormerly: scil_remove_invalid_streamlines.py", - "help": "usage: scil_tractogram_remove_invalid.py [-h] [--cut_invalid]\n [--remove_single_point]\n [--remove_overlapping_points]\n [--threshold THRESHOLD] [--no_empty]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nRemoval of streamlines that are out of the volume bounding box. In voxel space\nno negative coordinate and no above volume dimension coordinate are possible.\nAny streamline that do not respect these two conditions are removed.\n\nThe --cut_invalid option will cut streamlines so that their longest segment are\nwithin the bounding box\n\nFormerly: scil_remove_invalid_streamlines.py\n\npositional arguments:\n in_tractogram Tractogram filename. Format must be one of \n trk, tck, vtk, fib, dpy.\n out_tractogram Output filename. Format must be one of \n trk, tck, vtk, fib, dpy.\n\noptions:\n -h, --help show this help message and exit\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_single_point\n Consider single point streamlines invalid.\n --remove_overlapping_points\n Consider streamlines with overlapping points invalid.\n --threshold THRESHOLD\n Maximum distance between two points to be considered overlapping [0.001 mm].\n --no_empty Do not save empty tractogram.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "positive", - "negative" - ], - [ - "anatomy", - "anatomy" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "difference", - "point" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "conditions", - "conditions" - ], - [ - "left", - "result", - "when" - ], - [ - "invalid", - "valid", - "invalid" - ], - [ - "clear", - "considered", - "create", - "form", - "manner", - "matter", - "result", - "subject", - "thinking", - "true", - "view", - "work", - "rather" - ], - [ - "clear", - "left", - "out" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "space", - "space" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "action", - "clear", - "considered", - "future", - "matter", - "possibility", - "potential", - "question", - "result", - "specific", - "any" - ], - [ - "long", - "with" - ], - [ - "clear", - "long", - "matter", - "result", - "work", - "so" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "methods", - "using" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "streamline", - "streamline" - ], - [ - "level", - "above" - ], - [ - "long", - "two" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "considered", - "possibility", - "question", - "step", - "consider" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_resample", - "docstring": "Script to resample a tractogram to a set number of streamlines.\nDefault behavior:\n- IF number of requested streamlines is lower than streamline count: DOWNSAMPLE\n- IF number of requested streamlines is higher than streamline count: UPSAMPLE\nTo prevent upsample if not desired use --never_upsample.\n\nCan be useful to build training sets for machine learning algorithms, to\nupsample under-represented bundles or downsample over-represented bundles.\n\nWorks by either selecting a subset of streamlines or by generating new\nstreamlines by adding gaussian noise to existing ones.\n\nUpsampling:\n Includes smoothing to compensate for the noisiness of new streamlines\n generated by the process.\nDownsampling:\n Includes the possibility of choosing randomly *per Quickbundle cluster* to\n ensure that all clusters are represented in the final tractogram.\n\nExample usage:\n$ scil_tractogram_resample.py input.trk 1000 output.trk --point_wise_std 0.5 --gaussian 5 --keep_invalid_streamlines\n$ scil_visualize_bundles.py output.trk --local_coloring --width=0.1", - "help": "usage: scil_tractogram_resample.py [-h] [--never_upsample]\n [--point_wise_std POINT_WISE_STD]\n [--tube_radius TUBE_RADIUS]\n [--gaussian SIGMA] [-e ERROR_RATE]\n [--keep_invalid_streamlines]\n [--downsample_per_cluster]\n [--qbx_thresholds QBX_THRESHOLDS [QBX_THRESHOLDS ...]]\n [--seed SEED] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram nb_streamlines out_tractogram\n\nScript to resample a tractogram to a set number of streamlines.\nDefault behavior:\n- IF number of requested streamlines is lower than streamline count: DOWNSAMPLE\n- IF number of requested streamlines is higher than streamline count: UPSAMPLE\nTo prevent upsample if not desired use --never_upsample.\n\nCan be useful to build training sets for machine learning algorithms, to\nupsample under-represented bundles or downsample over-represented bundles.\n\nWorks by either selecting a subset of streamlines or by generating new\nstreamlines by adding gaussian noise to existing ones.\n\nUpsampling:\n Includes smoothing to compensate for the noisiness of new streamlines\n generated by the process.\nDownsampling:\n Includes the possibility of choosing randomly *per Quickbundle cluster* to\n ensure that all clusters are represented in the final tractogram.\n\nExample usage:\n$ scil_tractogram_resample.py input.trk 1000 output.trk --point_wise_std 0.5 --gaussian 5 --keep_invalid_streamlines\n$ scil_visualize_bundles.py output.trk --local_coloring --width=0.1\n\npositional arguments:\n in_tractogram Input tractography file.\n nb_streamlines Number of streamlines to resample the tractogram to.\n out_tractogram Output tractography file.\n\noptions:\n -h, --help show this help message and exit\n --never_upsample Make sure to never upsample a tractogram.\n Useful when downsample batch of files using bash.\n --seed SEED Use a specific random seed for the resampling.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nUpsampling params:\n --point_wise_std POINT_WISE_STD\n Noise to add to existing streamlines points to generate new ones [1].\n --tube_radius TUBE_RADIUS\n Maximum distance to generate streamlines around the original ones [1].\n --gaussian SIGMA Sigma for smoothing. Use the value of surrounding X,Y,Z points on the streamline to blur the streamlines.\n A good sigma choice would be around 5.\n -e ERROR_RATE Maximum compression distance in mm [0.1].\n --keep_invalid_streamlines\n Keep invalid newly generated streamlines that may go out of the \n bounding box.\n\nDownsampling params:\n --downsample_per_cluster\n If set, downsampling will be done per cluster (computed with \n Quickbundles) to ensure that at least some streamlines are \n kept per bundle. Else, random downsampling is performed (default).\n --qbx_thresholds QBX_THRESHOLDS [QBX_THRESHOLDS ...]\n If you chose option '--downsample_per_cluster', you may set \n the QBx threshold value(s) here. Default: [40, 30, 20]\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "algorithm", - "algorithms" - ], - [ - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundles" - ], - [ - "create", - "generate" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "average", - "per" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "represent", - "represented" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "invalid", - "valid", - "invalid" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "long", - "over" - ], - [ - "specific", - "specific" - ], - [ - "thinking", - "you" - ], - [ - "high", - "higher", - "increase", - "level", - "higher" - ], - [ - "long", - "a" - ], - [ - "clear", - "left", - "out" - ], - [ - "total", - "40" - ], - [ - "tool", - "useful" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "create", - "future", - "step", - "work", - "make" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "future", - "will" - ], - [ - "experience", - "knowledge", - "learning", - "learning" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "random", - "random" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "step", - "go" - ], - [ - "clear", - "adding" - ], - [ - "clear", - "experience", - "thinking", - "true", - "good" - ], - [ - "methods", - "using" - ], - [ - "create", - "build" - ], - [ - "area", - "areas", - "surrounding" - ], - [ - "process", - "processes", - "step", - "process" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "work", - "works" - ], - [ - "matter", - "thinking", - "else" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "result", - "may" - ], - [ - "higher", - "lower" - ], - [ - "streamline", - "streamline" - ], - [ - "work", - "working", - "done" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "matter", - "question", - "thinking", - "sure" - ], - [ - "bundles", - "bundle" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "large", - "work", - "some" - ], - [ - "future", - "possibility", - "potential", - "question", - "possibility" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_resample_nb_points", - "docstring": "Script to resample a set of streamlines to either a new number of points per\nstreamline or to a fixed step size. WARNING: data_per_point is not carried.\n\nFormerly: scil_resample_streamlines.py", - "help": "usage: scil_tractogram_resample_nb_points.py [-h]\n (--nb_pts_per_streamline NB_PTS_PER_STREAMLINE | --step_size STEP_SIZE)\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nScript to resample a set of streamlines to either a new number of points per\nstreamline or to a fixed step size. WARNING: data_per_point is not carried.\n\nFormerly: scil_resample_streamlines.py\n\npositional arguments:\n in_tractogram Streamlines input file name.\n out_tractogram Streamlines output file name.\n\noptions:\n -h, --help show this help message and exit\n --nb_pts_per_streamline NB_PTS_PER_STREAMLINE\n Number of points per streamline in the output.\n --step_size STEP_SIZE\n Step size in the output (in mm).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "larger", - "size", - "size" - ], - [ - "anatomy", - "anatomy" - ], - [ - "streamline", - "streamline" - ], - [ - "action", - "process", - "step", - "step" - ], - [ - "long", - "a" - ], - [ - "average", - "per" - ], - [ - "work", - "and" - ], - [ - "held", - "on" - ], - [ - "methods", - "using" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "streamlines", - "streamlines" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "is" - ], - [ - "held", - "in" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "meaning", - "name" - ], - [ - "order", - "set" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_seed_density_map", - "docstring": "Compute a density map of seeds saved in .trk file.\n\nFormerly: scil_compute_seed_density_map.py", - "help": "usage: scil_tractogram_seed_density_map.py [-h] [--binary [FIXED_VALUE]]\n [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n tractogram_filename\n seed_density_filename\n\nCompute a density map of seeds saved in .trk file.\n\nFormerly: scil_compute_seed_density_map.py\n\npositional arguments:\n tractogram_filename Tracts filename. Format must be .trk. \n File should contain a \"seeds\" value in the data_per_streamline.\n These seeds must be in space: voxel, origin: corner.\n seed_density_filename\n Output seed density filename. Format must be Nifti.\n\noptions:\n -h, --help show this help message and exit\n --binary [FIXED_VALUE]\n If set, will store the same value for all intersected voxels, creating a binary map.\n When set without a value, 1 is used (and dtype uint8).\n If a value is given, will be used as the stored value.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "create", - "creating" - ], - [ - "invalid", - "valid", - "invalid" - ], - [ - "order", - "set" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "space", - "space" - ], - [ - "held", - "in" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "streamlines", - "streamlines" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "intersected", - "intersected" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "tract", - "tracts", - "tracts" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "maps", - "map" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_segment_and_score", - "docstring": "Scores input tractogram overall and bundlewise.\n\nOutputs\n-------\n\n - results.json: Contains a full tractometry report.\n - processing_stats.json: Contains information on the segmentation of\n bundles (ex: the number of wpc per criteria).\n - Splits the input tractogram into\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (if args.compute_ic)\n segmented_WPC/*_wpc.trk (if args.save_wpc_separately)\n IS.trk OR NC.trk (if args.compute_ic)\n\nBy default, if a streamline fits in many bundles, it will be included in every\none. This means a streamline may be a VS for a bundle and an IS for\n(potentially many) others. If you want to assign each streamline to at most one\nbundle, use the `--unique` flag.\n\nConfig file\n-----------\n\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\nMandatory:\n - endpoints OR [head AND tail]: filename for the endpoints ROI.\n If 'enpoints' is used, we will automatically separate the mask into two\n ROIs, acting as head and tail. Quality check is strongly recommended.\n\nOptional:\n Concerning metrics:\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n Concerning inclusion criteria (other streamlines will be WPC):\n - all_mask: ROI serving as \"all\" criteria: to be included in the bundle,\n ALL points of a streamline must be inside the mask.*\n - any_mask: ROI serving as \"any\" criteria: streamlines\n must touch that mask in at least one point (\"any\" point) to be included\n in the bundle.\n - angle: angle criteria. Streamlines containing loops and sharp turns above\n given angle will be rejected from the bundle.\n - length: maximum and minimum lengths per bundle.\n - length_x / length_x_abs: maximum and minimum total distance in the x\n direction (i.e. first coordinate).**\n - length_y / length_y_abs: maximum and minimum total distance in the y\n direction (i.e. second coordinate).**\n - length_z / length_z_abs: maximum and minimum total distance in the z\n direction (i.e. third coordinate).**\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n** With absolute values: coming back on yourself will contribute to the total\ndistance instead of cancelling it.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n \"angle\": 300,\n \"length\": [140, 150],\n \"endpoints\": \"PATH/file1.nii.gz\"\n }\n}", - "help": "usage: scil_tractogram_segment_and_score.py [-h] [--json_prefix p]\n [--gt_dir DIR]\n [--use_gt_masks_as_all_masks]\n [--dilate_endpoints NB_PASS]\n [--remove_invalid]\n [--save_wpc_separately]\n [--compute_ic] [--unique]\n [--remove_wpc_belonging_to_another_bundle]\n [--no_empty] [--indent INDENT]\n [--sort_keys] [--no_bbox_check]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram gt_config out_dir\n\nScores input tractogram overall and bundlewise.\n\nOutputs\n-------\n\n - results.json: Contains a full tractometry report.\n - processing_stats.json: Contains information on the segmentation of\n bundles (ex: the number of wpc per criteria).\n - Splits the input tractogram into\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (if args.compute_ic)\n segmented_WPC/*_wpc.trk (if args.save_wpc_separately)\n IS.trk OR NC.trk (if args.compute_ic)\n\nBy default, if a streamline fits in many bundles, it will be included in every\none. This means a streamline may be a VS for a bundle and an IS for\n(potentially many) others. If you want to assign each streamline to at most one\nbundle, use the `--unique` flag.\n\nConfig file\n-----------\n\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\nMandatory:\n - endpoints OR [head AND tail]: filename for the endpoints ROI.\n If 'enpoints' is used, we will automatically separate the mask into two\n ROIs, acting as head and tail. Quality check is strongly recommended.\n\nOptional:\n Concerning metrics:\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n Concerning inclusion criteria (other streamlines will be WPC):\n - all_mask: ROI serving as \"all\" criteria: to be included in the bundle,\n ALL points of a streamline must be inside the mask.*\n - any_mask: ROI serving as \"any\" criteria: streamlines\n must touch that mask in at least one point (\"any\" point) to be included\n in the bundle.\n - angle: angle criteria. Streamlines containing loops and sharp turns above\n given angle will be rejected from the bundle.\n - length: maximum and minimum lengths per bundle.\n - length_x / length_x_abs: maximum and minimum total distance in the x\n direction (i.e. first coordinate).**\n - length_y / length_y_abs: maximum and minimum total distance in the y\n direction (i.e. second coordinate).**\n - length_z / length_z_abs: maximum and minimum total distance in the z\n direction (i.e. third coordinate).**\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n** With absolute values: coming back on yourself will contribute to the total\ndistance instead of cancelling it.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n \"angle\": 300,\n \"length\": [140, 150],\n \"endpoints\": \"PATH/file1.nii.gz\"\n }\n}\n\nTractometry\n-----------\nGlobal connectivity metrics:\n\n- Computed by default:\n - VS: valid streamlines, belonging to a bundle (i.e. respecting all the\n criteria for that bundle; endpoints, limit_mask, gt_mask.).\n - IS: invalid streamlines. All other streamlines. IS = IC + NC.\n\n- Optional:\n - WPC: wrong path connections, streamlines connecting correct ROIs but not\n respecting the other criteria for that bundle. Such streamlines always\n exist but they are only saved separately if specified in the options.\n Else, they are merged back with the IS.\n By definition. WPC are only computed if \"limits masks\" are provided.\n - IC: invalid connections, streamlines joining an incorrect combination of\n ROIs. Use carefully, quality depends on the quality of your ROIs and no\n analysis is done on the shape of the streamlines.\n - NC: no connections. Invalid streamlines minus invalid connections.\n\n- Fidelity metrics:\n - OL: Overlap. Percentage of ground truth voxels containing streamline(s)\n for a given bundle.\n - OR: Overreach. Amount of voxels containing streamline(s) when they\n shouldn't, for a given bundle. We compute two versions :\n OR_pct_vs = divided by the total number of voxel covered by the bundle.\n (percentage of the voxels touched by VS).\n Values range between 0 and 100%. Values are not defined when we\n recovered no streamline for a bundle, but we set the OR_pct_vs to 0\n in that case.\n OR_pct_gt = divided by the total size of the ground truth bundle mask.\n Values could be higher than 100%.\n - f1 score: which is the same as the Dice score.\n\npositional arguments:\n in_tractogram Input tractogram to score\n gt_config .json dict configured as specified above.\n out_dir Output directory for the resulting segmented bundles.\n\noptions:\n -h, --help show this help message and exit\n --json_prefix p Prefix of the two output json files. Ex: 'study_x_'.Files will be saved inside out_dir.\n Suffixes will be 'processing_stats.json' and 'results.json'.\n --no_empty Do not write file if there is no streamline.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nAdditions to gt_config:\n --gt_dir DIR Root path of the ground truth files listed in the gt_config. \n If not set, filenames in the config file are considered \n as absolute paths.\n --use_gt_masks_as_all_masks\n If set, the gt_config's 'gt_mask' will also be used as\n 'all_mask' for each bundle. Note that this means the\n OR will necessarily be 0.\n\nPreprocessing:\n --dilate_endpoints NB_PASS\n Dilate endpoint masks n-times. Default: 0.\n --remove_invalid Remove invalid streamlines before scoring.\n\nTractometry choices:\n --save_wpc_separately\n If set, streamlines rejected from VC based on the config\n file criteria will be saved separately from IS (and IC)\n in one file *_wpc.tck per bundle.\n --compute_ic If set, IS are split into NC + IC, where IC are computed as one bundle per\n pair of ROI not belonging to a true connection, named\n *_*_IC.tck.\n --unique If set, streamlines are assigned to the first bundle they fit in and not to all.\n --remove_wpc_belonging_to_another_bundle\n If set, WPC actually belonging to any VB (in the \n case of overlapping ROIs) will be removed\n from the WPC classification.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "limiting", - "limits" - ], - [ - "invalid", - "valid", - "invalid" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "order", - "set" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "left", - "back" - ], - [ - "long", - "with" - ], - [ - "reported", - "report" - ], - [ - "order", - "work", - "instead" - ], - [ - "long", - "than" - ], - [ - "thinking", - "wrong" - ], - [ - "unique", - "variety", - "unique" - ], - [ - "large", - "work", - "many" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "assigned", - "assigned" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "connect", - "connected", - "connecting", - "connects", - "connecting" - ], - [ - "create", - "created" - ], - [ - "bundles", - "bundle" - ], - [ - "supported", - "strongly" - ], - [ - "clear", - "result", - "work", - "could" - ], - [ - "meaning", - "order", - "result", - "step", - "true", - "means" - ], - [ - "long", - "full" - ], - [ - "true", - "always" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "average", - "per" - ], - [ - "work", - "also" - ], - [ - "difference", - "point" - ], - [ - "left", - "from" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "defined", - "definition" - ], - [ - "voxel", - "voxel" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "future", - "will" - ], - [ - "parameters", - "specified" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "increase", - "expected" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "meaning", - "true", - "true" - ], - [ - "result", - "resulting" - ], - [ - "result", - "may" - ], - [ - "connect", - "connected", - "connection", - "connections", - "connections" - ], - [ - "streamline", - "streamline" - ], - [ - "connectivity", - "connectivity" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "connection", - "connections", - "connection" - ], - [ - "lack", - "quality" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "shape", - "structure", - "shape" - ], - [ - "considered", - "specific", - "variety", - "such" - ], - [ - "considered", - "highly", - "long", - "work", - "most" - ], - [ - "considered", - "are" - ], - [ - "left", - "into" - ], - [ - "proposed", - "rejected" - ], - [ - "lack", - "loss", - "result", - "result" - ], - [ - "exist", - "exist" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "analysis", - "data", - "methods", - "study", - "analysis" - ], - [ - "supported", - "support" - ], - [ - "thinking", - "i" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "acted", - "role", - "acting" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "involved", - "others" - ], - [ - "methods", - "using" - ], - [ - "variety", - "work", - "other" - ], - [ - "increase", - "total", - "amount" - ], - [ - "defined", - "function", - "defined" - ], - [ - "left", - "before" - ], - [ - "matter", - "thinking", - "else" - ], - [ - "potential", - "potentially" - ], - [ - "exist", - "necessarily" - ], - [ - "area", - "work", - "where" - ], - [ - "individual", - "each" - ], - [ - "long", - "two" - ], - [ - "matter", - "question", - "case" - ], - [ - "based", - "based" - ], - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "global", - "global" - ], - [ - "bundles", - "bundles" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "form", - "combination" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "direction", - "direction" - ], - [ - "invalid", - "valid", - "valid" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "high", - "higher", - "increase", - "level", - "higher" - ], - [ - "average", - "percentage" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "true", - "truth" - ], - [ - "action", - "clear", - "considered", - "future", - "matter", - "possibility", - "potential", - "question", - "result", - "specific", - "any" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "larger", - "size", - "size" - ], - [ - "result", - "results" - ], - [ - "level", - "above" - ], - [ - "work", - "working", - "done" - ], - [ - "clear", - "long", - "question", - "result", - "work", - "there" - ], - [ - "total", - "100" - ], - [ - "increase", - "total", - "total" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_segment_bundles", - "docstring": "Compute BundleSeg & supports multi-atlas & multi-parameters (RBx-like).\nThe model needs to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are not sure about the transformation 'direction' you can try\nscil_tractogram_segment_bundles.py (with the -v option), a warning will popup\nif the provided transformation is not used correctly.\n\nThe number of folders inside 'models_directories' will increase the number of\nruns. Each folder is considered like an atlas and bundles inside will initiate\nmore BundleSeg executions. The more atlases you have, the more robust the\nrecognition will be.\n\n--minimal_vote_ratio is a value between 0 and 1. If you have 5 input model\ndirectories and a minimal_vote_ratio of 0.5, you will need at least 3 votes\n\nExample data and usage available at: https://zenodo.org/record/7950602\n\nFor RAM usage, it is recommanded to use this heuristic:\n (size of inputs tractogram (GB) * number of processes) < RAM (GB)\nThis is important because many instances of data structures are initialized\nin parallel and can lead to a RAM overflow.\n\nFormerly: scil_recognize_multi_bundles.py", - "help": "usage: scil_tractogram_segment_bundles.py [-h] [--out_dir OUT_DIR]\n [--minimal_vote_ratio MINIMAL_VOTE_RATIO]\n [--seed SEED] [--inverse]\n [--reference REFERENCE]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractograms [in_tractograms ...]\n in_config_file in_directory\n in_transfo\n\nCompute BundleSeg & supports multi-atlas & multi-parameters (RBx-like).\nThe model needs to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are not sure about the transformation 'direction' you can try\nscil_tractogram_segment_bundles.py (with the -v option), a warning will popup\nif the provided transformation is not used correctly.\n\nThe number of folders inside 'models_directories' will increase the number of\nruns. Each folder is considered like an atlas and bundles inside will initiate\nmore BundleSeg executions. The more atlases you have, the more robust the\nrecognition will be.\n\n--minimal_vote_ratio is a value between 0 and 1. If you have 5 input model\ndirectories and a minimal_vote_ratio of 0.5, you will need at least 3 votes\n\nExample data and usage available at: https://zenodo.org/record/7950602\n\nFor RAM usage, it is recommanded to use this heuristic:\n (size of inputs tractogram (GB) * number of processes) < RAM (GB)\nThis is important because many instances of data structures are initialized\nin parallel and can lead to a RAM overflow.\n\nFormerly: scil_recognize_multi_bundles.py\n\npositional arguments:\n in_tractograms Input tractogram filename (.trk or .tck).\n in_config_file Path of the config file (.json)\n in_directory Path of parent folder of models directories.\n Each folder inside will be considered as adifferent atlas.\n in_transfo Path for the transformation to model space (.txt, .npy or .mat).\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Path for the output directory [voting_results].\n --minimal_vote_ratio MINIMAL_VOTE_RATIO\n Streamlines will only be considered for saving if\n recognized often enough [0.5].\n --seed SEED Random number generator seed 0.\n --inverse Use the inverse transformation.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] St-Onge, Etienne, Kurt G. Schilling, and Francois Rheault.\n\"BundleSeg: A versatile,reliable and reproducible approach to white\nmatter bundle segmentation.\" International Workshop on Computational\nDiffusion MRI. Cham: Springer Nature Switzerland (2023)\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundles" - ], - [ - "thinking", - "view", - "approach" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "long", - "work", - "more" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "considered", - "recognized" - ], - [ - "direction", - "direction" - ], - [ - "methods", - "use" - ], - [ - "long", - "have" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "step", - "work", - "come" - ], - [ - "clear", - "create", - "enough" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "imaging", - "mri" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "random", - "random" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "space", - "space" - ], - [ - "traditionally", - "often" - ], - [ - "held", - "in" - ], - [ - "true", - "nature" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "complex", - "structure", - "structures", - "structures" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "atlas", - "atlas" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "supported", - "supports" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "clear", - "considered", - "future", - "lack", - "long", - "matter", - "question", - "result", - "work", - "because" - ], - [ - "methods", - "using" - ], - [ - "reliable", - "reliable" - ], - [ - "streamlines", - "streamlines" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "large", - "work", - "many" - ], - [ - "step", - "try" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "larger", - "size", - "size" - ], - [ - "create", - "lack", - "step", - "work", - "working", - "need" - ], - [ - "global", - "international" - ], - [ - "step", - "start" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "matter", - "question", - "thinking", - "sure" - ], - [ - "bundles", - "bundle" - ], - [ - "higher", - "increase", - "total", - "increase" - ], - [ - "considered", - "key", - "work", - "important" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_segment_bundles_for_connectivity", - "docstring": "Compute a connectivity matrix from a tractogram and a parcellation.\n\nCurrent strategy is to keep the longest streamline segment connecting 2\nregions. If the streamline crosses other gray matter regions before reaching\nits final connected region, the kept connection is still the longest. This is\nrobust to compressed streamlines.\n\nThe output file is a hdf5 (.h5) where the keys are 'LABEL1_LABEL2' and each\ngroup is composed of 'data', 'offsets' and 'lengths' from the array_sequence.\nThe 'data' is stored in VOX/CORNER for simplicity and efficiency. See script\nscil_tractogram_convert_hdf5_to_trk.py to convert to a list of .trk bundles.\n\nFor the --outlier_threshold option the default is a recommended good trade-off\nfor a freesurfer parcellation. With smaller parcels (brainnetome, glasser) the\nthreshold should most likely be reduced.\n\nGood candidate connections to QC are the brainstem to precentral gyrus\nconnection and precentral left to precentral right connection, or equivalent\nin your parcellation.\n\nNOTE: this script can take a while to run. Please be patient.\nExample: on a tractogram with 1.8M streamlines, running on a SSD:\n- 15 minutes without post-processing, only saving final bundles.\n- 30 minutes with full post-processing, only saving final bundles.\n- 60 minutes with full post-processing, saving all possible files.\n\nFormerly: scil_decompose_connectivity.py", - "help": "usage: scil_tractogram_segment_bundles_for_connectivity.py [-h] [--no_pruning]\n [--no_remove_loops]\n [--no_remove_outliers]\n [--no_remove_curv_dev]\n [--min_length MIN_LENGTH]\n [--max_length MAX_LENGTH]\n [--outlier_threshold OUTLIER_THRESHOLD]\n [--loop_max_angle LOOP_MAX_ANGLE]\n [--curv_qb_distance CURV_QB_DISTANCE]\n [--out_dir OUT_DIR]\n [--save_raw_connections]\n [--save_intermediate]\n [--save_discarded]\n [--out_labels_list OUT_FILE]\n [--reference REFERENCE]\n [--no_bbox_check]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_tractograms\n [in_tractograms ...]\n in_labels out_hdf5\n\nCompute a connectivity matrix from a tractogram and a parcellation.\n\nCurrent strategy is to keep the longest streamline segment connecting 2\nregions. If the streamline crosses other gray matter regions before reaching\nits final connected region, the kept connection is still the longest. This is\nrobust to compressed streamlines.\n\nThe output file is a hdf5 (.h5) where the keys are 'LABEL1_LABEL2' and each\ngroup is composed of 'data', 'offsets' and 'lengths' from the array_sequence.\nThe 'data' is stored in VOX/CORNER for simplicity and efficiency. See script\nscil_tractogram_convert_hdf5_to_trk.py to convert to a list of .trk bundles.\n\nFor the --outlier_threshold option the default is a recommended good trade-off\nfor a freesurfer parcellation. With smaller parcels (brainnetome, glasser) the\nthreshold should most likely be reduced.\n\nGood candidate connections to QC are the brainstem to precentral gyrus\nconnection and precentral left to precentral right connection, or equivalent\nin your parcellation.\n\nNOTE: this script can take a while to run. Please be patient.\nExample: on a tractogram with 1.8M streamlines, running on a SSD:\n- 15 minutes without post-processing, only saving final bundles.\n- 30 minutes with full post-processing, only saving final bundles.\n- 60 minutes with full post-processing, saving all possible files.\n\nFormerly: scil_decompose_connectivity.py\n\npositional arguments:\n in_tractograms Tractogram filenames. Format must be one of \n trk, tck, vtk, fib, dpy.\n in_labels Labels file name (nifti). Labels must have 0 as background.\n out_hdf5 Output hdf5 file (.h5).\n\noptions:\n -h, --help show this help message and exit\n --out_labels_list OUT_FILE\n Save the labels list as text file.\n Needed for scil_connectivity_compute_matrices.py and others.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nPost-processing options:\n --no_pruning If set, will NOT prune on length.\n Length criteria in --min_length, --max_length.\n --no_remove_loops If set, will NOT remove streamlines making loops.\n Angle criteria based on --loop_max_angle.\n --no_remove_outliers If set, will NOT remove outliers using QB.\n Criteria based on --outlier_threshold.\n --no_remove_curv_dev If set, will NOT remove streamlines that deviate from the mean curvature.\n Threshold based on --curv_qb_distance.\n\nPruning options:\n --min_length MIN_LENGTH\n Pruning minimal segment length. [20.0]\n --max_length MAX_LENGTH\n Pruning maximal segment length. [200.0]\n\nOutliers and loops options:\n --outlier_threshold OUTLIER_THRESHOLD\n Outlier removal threshold when using hierarchical QB. [0.6]\n --loop_max_angle LOOP_MAX_ANGLE\n Maximal winding angle over which a streamline is considered as looping. [330.0]\n --curv_qb_distance CURV_QB_DISTANCE\n Clustering threshold for centroids curvature filtering with QB. [10.0]\n\nSaving options:\n --out_dir OUT_DIR Output directory for each connection as separate file (.trk).\n --save_raw_connections\n If set, will save all raw cut connections in a subdirectory.\n --save_intermediate If set, will save the intermediate results of filtering.\n --save_discarded If set, will save discarded streamlines in subdirectories.\n Includes loops, outliers and qb_loops.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "considered", - "potential", - "result", - "likely" - ], - [ - "anatomy", - "anatomy" - ], - [ - "considered", - "highly", - "long", - "work", - "most" - ], - [ - "bundles", - "bundles" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "left", - "off" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "higher", - "increase", - "result", - "reduced" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "invalid", - "valid", - "invalid" - ], - [ - "areas", - "region", - "regions", - "regions" - ], - [ - "meaning", - "name" - ], - [ - "order", - "set" - ], - [ - "left", - "left" - ], - [ - "thalamus", - "brainstem" - ], - [ - "long", - "have" - ], - [ - "long", - "over" - ], - [ - "processing", - "processing" - ], - [ - "long", - "a" - ], - [ - "gyrus", - "gyrus" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "work", - "making" - ], - [ - "held", - "in" - ], - [ - "clear", - "order", - "step", - "work", - "take" - ], - [ - "parcels", - "parcels" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "involved", - "others" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "connect", - "connected", - "connecting", - "connections", - "connected" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "clear", - "experience", - "thinking", - "true", - "good" - ], - [ - "methods", - "using" - ], - [ - "variety", - "work", - "other" - ], - [ - "view", - "see" - ], - [ - "future", - "current" - ], - [ - "clear", - "future", - "possibility", - "potential", - "question", - "result", - "specific", - "step", - "possible" - ], - [ - "left", - "right" - ], - [ - "precentral", - "precentral" - ], - [ - "streamlines", - "streamlines" - ], - [ - "methods", - "process", - "processes", - "processes" - ], - [ - "left", - "before" - ], - [ - "matrices", - "matrix" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "area", - "work", - "where" - ], - [ - "based", - "group" - ], - [ - "individual", - "each" - ], - [ - "connect", - "connected", - "connection", - "connections", - "connections" - ], - [ - "blue", - "dark", - "green", - "grey", - "white", - "gray" - ], - [ - "lack", - "minimal" - ], - [ - "streamline", - "streamline" - ], - [ - "result", - "results" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "connectivity", - "connectivity" - ], - [ - "step", - "start" - ], - [ - "total", - "60" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "post", - "post" - ], - [ - "clear", - "long", - "work", - "still" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "connection", - "connections", - "connection" - ], - [ - "connect", - "connected", - "connecting", - "connects", - "connecting" - ], - [ - "based", - "based" - ], - [ - "area", - "areas", - "region", - "regions", - "region" - ], - [ - "large", - "larger", - "smaller" - ], - [ - "long", - "full" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_segment_one_bundle", - "docstring": "Compute a simple Recobundles (single-atlas & single-parameters).\nThe model need to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are unsure about the transformation 'direction' use the verbose\noption (-v) and try with and without the --inverse flag. If you are not using\nthe right transformation 'direction' a warning will popup. If there is no\nwarning in both case it means the transformation is very close to identity and\nboth 'direction' will work.\n\nFormerly: scil_recognize_single_bundles.py", - "help": "usage: scil_tractogram_segment_one_bundle.py [-h]\n [--tractogram_clustering_thr TRACTOGRAM_CLUSTERING_THR]\n [--model_clustering_thr MODEL_CLUSTERING_THR]\n [--pruning_thr PRUNING_THR]\n [--slr_threads SLR_THREADS]\n [--seed SEED] [--inverse]\n [--no_empty]\n [--in_pickle IN_PICKLE | --out_pickle OUT_PICKLE]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram in_model in_transfo\n out_tractogram\n\nCompute a simple Recobundles (single-atlas & single-parameters).\nThe model need to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are unsure about the transformation 'direction' use the verbose\noption (-v) and try with and without the --inverse flag. If you are not using\nthe right transformation 'direction' a warning will popup. If there is no\nwarning in both case it means the transformation is very close to identity and\nboth 'direction' will work.\n\nFormerly: scil_recognize_single_bundles.py\n\npositional arguments:\n in_tractogram Input tractogram filename.\n in_model Model to use for recognition.\n in_transfo Path for the transformation to model space (.txt, .npy or .mat).\n out_tractogram Output tractogram filename.\n\noptions:\n -h, --help show this help message and exit\n --tractogram_clustering_thr TRACTOGRAM_CLUSTERING_THR\n Clustering threshold used for the whole brain [8mm].\n --model_clustering_thr MODEL_CLUSTERING_THR\n Clustering threshold used for the model [4mm].\n --pruning_thr PRUNING_THR\n MDF threshold used for final streamlines selection [6mm].\n --slr_threads SLR_THREADS\n Number of threads for SLR [1].\n --seed SEED Random number generator seed [None].\n --inverse Use the inverse transformation.\n --no_empty Do not write file if there is no streamline.\n --in_pickle IN_PICKLE\n Input pickle clusters map file.\n Will override the tractogram_clustering_thr parameter.\n --out_pickle OUT_PICKLE\n Output pickle clusters map file.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nGaryfallidis, E., Cote, M. A., Rheault, F., ... &\nDescoteaux, M. (2018). Recognition of white matter\nbundles using local and global streamline-based registration and\nclustering. NeuroImage, 170, 283-295.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "global", - "global" - ], - [ - "bundles", - "bundles" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "clear", - "considered", - "highly", - "long", - "matter", - "true", - "work", - "very" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "work", - "working", - "work" - ], - [ - "left", - "from" - ], - [ - "matter", - "question", - "subject", - "matter" - ], - [ - "left", - "result", - "when" - ], - [ - "parameter", - "parameters", - "parameter" - ], - [ - "methods", - "use" - ], - [ - "direction", - "direction" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "step", - "work", - "come" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "random", - "random" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "space", - "space" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "atlas", - "atlas" - ], - [ - "methods", - "using" - ], - [ - "left", - "right" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "step", - "try" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "streamline", - "streamline" - ], - [ - "create", - "lack", - "step", - "work", - "working", - "need" - ], - [ - "clear", - "long", - "question", - "result", - "work", - "there" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "matter", - "question", - "case" - ], - [ - "based", - "based" - ], - [ - "maps", - "map" - ], - [ - "meaning", - "order", - "result", - "step", - "true", - "means" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_shuffle", - "docstring": "Shuffle the ordering of streamlines.\n\nFormerly: scil_shuffle_streamlines.py", - "help": "usage: scil_tractogram_shuffle.py [-h] [--seed SEED] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nShuffle the ordering of streamlines.\n\nFormerly: scil_shuffle_streamlines.py\n\npositional arguments:\n in_tractogram Input tractography file.\n out_tractogram Output tractography file.\n\noptions:\n -h, --help show this help message and exit\n --seed SEED Random number generator seed [None].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "work", - "and" - ], - [ - "held", - "on" - ], - [ - "methods", - "using" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "streamlines", - "streamlines" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "random", - "random" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "is" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_smooth", - "docstring": "This script will smooth the streamlines, usually to remove the 'wiggles' in\nprobabilistic tracking.\n\nTwo choices of methods are available:\n- Gaussian will use the surrounding coordinates for smoothing. Streamlines are\nresampled to 1mm step-size and the smoothing is performed on the coordinate\narray. The sigma will be indicative of the number of points surrounding the\ncenter points to be used for blurring.\n- Spline will fit a spline curve to every streamline using a sigma and the\nnumber of control points. The sigma represents the allowed distance from the\ncontrol points. The final streamlines are obtained by evaluating the spline at\nconstant intervals so that it will have the same number of points as initially.\n\nThis script enforces endpoints to remain the same.\n\nWARNING:\n- too low of a sigma (e.g: 1) with a lot of control points (e.g: 15)\nwill create crazy streamlines that could end up out of the bounding box.\n- data_per_point will be lost.\n\nFormerly: scil_smooth_streamlines.py", - "help": "usage: scil_tractogram_smooth.py [-h]\n (--gaussian SIGMA | --spline SIGMA NB_CTRL_POINT)\n [--compress [COMPRESS_TH]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script will smooth the streamlines, usually to remove the 'wiggles' in\nprobabilistic tracking.\n\nTwo choices of methods are available:\n- Gaussian will use the surrounding coordinates for smoothing. Streamlines are\nresampled to 1mm step-size and the smoothing is performed on the coordinate\narray. The sigma will be indicative of the number of points surrounding the\ncenter points to be used for blurring.\n- Spline will fit a spline curve to every streamline using a sigma and the\nnumber of control points. The sigma represents the allowed distance from the\ncontrol points. The final streamlines are obtained by evaluating the spline at\nconstant intervals so that it will have the same number of points as initially.\n\nThis script enforces endpoints to remain the same.\n\nWARNING:\n- too low of a sigma (e.g: 1) with a lot of control points (e.g: 15)\nwill create crazy streamlines that could end up out of the bounding box.\n- data_per_point will be lost.\n\nFormerly: scil_smooth_streamlines.py\n\npositional arguments:\n in_tractogram Input tractography file.\n out_tractogram Output tractography file.\n\noptions:\n -h, --help show this help message and exit\n --gaussian SIGMA Sigma for smoothing. Use the value of surronding\n X,Y,Z points on the streamline to blur the streamlines.\n A good sigma choice would be around 5.\n --spline SIGMA NB_CTRL_POINT\n Sigma for smoothing. Model each streamline as a spline.\n A good sigma choice would be around 5 and control point around 10.\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.[0.1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "long", - "end" - ], - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "clear", - "future", - "order", - "step", - "work", - "would" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "difference", - "point" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "clear", - "long", - "too" - ], - [ - "experience", - "thinking", - "lot" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "long", - "have" - ], - [ - "examining", - "evaluating" - ], - [ - "long", - "a" - ], - [ - "clear", - "left", - "out" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "future", - "will" - ], - [ - "loss", - "lost" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "create" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "subsequently", - "initially" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "clear", - "long", - "matter", - "result", - "work", - "so" - ], - [ - "action", - "process", - "step", - "step" - ], - [ - "clear", - "experience", - "thinking", - "true", - "good" - ], - [ - "represent", - "represents" - ], - [ - "methods", - "using" - ], - [ - "area", - "areas", - "surrounding" - ], - [ - "traditionally", - "usually" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "result", - "resulting" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "analysis", - "methodology", - "methods", - "processes", - "methods" - ], - [ - "high", - "higher", - "level", - "low" - ], - [ - "individual", - "each" - ], - [ - "larger", - "size", - "size" - ], - [ - "streamline", - "streamline" - ], - [ - "order", - "allowed" - ], - [ - "long", - "two" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "result", - "work", - "could" - ], - [ - "data", - "tracking", - "tracking" - ], - [ - "probabilistic", - "probabilistic" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_split", - "docstring": "Split a tractogram into multiple files, 2 options available :\nSplit into X files, or split into files of Y streamlines.\n\nBy default, streamlines to add to each chunk will be chosen randomly.\nOptionally, you can split streamlines...\n - sequentially (the first n/nb_chunks streamlines in the first chunk and so\n on).\n - randomly, but per Quickbundles clusters.\n\nFormerly: scil_split_tractogram.py", - "help": "usage: scil_tractogram_split.py [-h] [--out_dir OUT_DIR]\n (--chunk_size CHUNK_SIZE | --nb_chunks NB_CHUNKS)\n [--split_per_cluster | --do_not_randomize]\n [--qbx_thresholds t [t ...]] [--seed SEED]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_prefix\n\nSplit a tractogram into multiple files, 2 options available :\nSplit into X files, or split into files of Y streamlines.\n\nBy default, streamlines to add to each chunk will be chosen randomly.\nOptionally, you can split streamlines...\n - sequentially (the first n/nb_chunks streamlines in the first chunk and so\n on).\n - randomly, but per Quickbundles clusters.\n\nFormerly: scil_split_tractogram.py\n\npositional arguments:\n in_tractogram Tractogram input file name.\n out_prefix Prefix for the output tractogram, index will be appended \n automatically (ex, _0.trk), based on input type.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Put all output tractogram in a specific directory.\n --chunk_size CHUNK_SIZE\n The maximum number of streamlines per file.\n --nb_chunks NB_CHUNKS\n Divide the file in equal parts.\n --split_per_cluster If set, splitting will be done per cluster (computed with \n Quickbundles) to ensure that at least some streamlines are \n kept from each bundle in each chunk. Else, random splitting is\n performed (default).\n --do_not_randomize If set, splitting is done sequentially through the original \n sft instead of using random indices.\n --qbx_thresholds t [t ...]\n If you chose option '--split_per_cluster', you may set the \n QBx threshold value(s) here. Default: [40, 30, 20]\n --seed SEED Use a specific random seed for the subsampling.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "average", - "per" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "left", - "into" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "thinking", - "you" - ], - [ - "specific", - "specific" - ], - [ - "long", - "a" - ], - [ - "total", - "40" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "supported", - "support" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "random", - "random" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "clear", - "long", - "matter", - "result", - "work", - "so" - ], - [ - "clear", - "left", - "work", - "put" - ], - [ - "methods", - "using" - ], - [ - "order", - "work", - "instead" - ], - [ - "streamlines", - "streamlines" - ], - [ - "matter", - "thinking", - "else" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "result", - "may" - ], - [ - "represent", - "chosen" - ], - [ - "individual", - "each" - ], - [ - "work", - "working", - "done" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "based", - "based" - ], - [ - "areas", - "parts" - ], - [ - "bundles", - "bundle" - ], - [ - "large", - "work", - "some" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_bingham_fit", - "docstring": "Visualize 2-dimensional Bingham volume slice loaded from disk. The volume is\nassumed to be saved from scil_fodf_to_bingham.py.\n\nGiven an image of Bingham coefficients, this script displays a slice in a\ngiven orientation.", - "help": "usage: scil_viz_bingham_fit.py [-h] [--slice_index SLICE_INDEX]\n [--win_dims WIDTH HEIGHT]\n [--interactor {image,trackball}]\n [--axis_name {sagittal,coronal,axial}]\n [--silent] [--output OUTPUT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--color_per_lobe]\n in_bingham\n\nVisualize 2-dimensional Bingham volume slice loaded from disk. The volume is\nassumed to be saved from scil_fodf_to_bingham.py.\n\nGiven an image of Bingham coefficients, this script displays a slice in a\ngiven orientation.\n\npositional arguments:\n in_bingham Input SH image file.\n\noptions:\n -h, --help show this help message and exit\n --slice_index SLICE_INDEX\n Index of the slice to visualize along a given axis. Defaults to middle of volume.\n --win_dims WIDTH HEIGHT\n The dimensions for the vtk window. [(768, 768)]\n --interactor {image,trackball}\n Specify interactor mode for vtk window. [trackball]\n --axis_name {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n --silent Disable interactive visualization.\n --output OUTPUT Path to output file.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Name of the sphere used to reconstruct SF. [symmetric362]\n --color_per_lobe Color each bingham distribution with a different color. [False]\n", - "synonyms": [ - [ - "individual", - "specific", - "unique", - "variety", - "different" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "orientation", - "orientation" - ], - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "meaning", - "name" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "axial", - "axial" - ], - [ - "methods", - "using" - ], - [ - "area", - "main", - "along" - ], - [ - "sagittal", - "sagittal" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "middle", - "middle" - ], - [ - "individual", - "each" - ], - [ - "coronal", - "coronal" - ], - [ - "false", - "false" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_bundle", - "docstring": "Visualize bundles.\n\nExample usages:\n\n# Visualize streamlines as tubes, each bundle with a different color\n>>> scil_viz_bundle.py path_to_bundles/ --shape tube --random_coloring 1337\n\n# Visualize a tractogram with each streamlines drawn as lines, colored with\n# their local orientation, but only load 1 in 10 streamlines\n>>> scil_viz_bundle.py tractogram.trk --shape line --subsample 10\n\n# Visualize CSTs as large tubes and color them from a list of colors in a file\n>>> scil_viz_bundle.py path_to_bundles/CST_* --width 0.5\n --color_dict colors.json", - "help": "usage: scil_viz_bundle.py [-h]\n [--random_coloring SEED | --uniform_coloring R G B | --local_coloring | --color_dict JSON | --color_from_streamlines KEY | --color_from_points KEY]\n [--shape {line,tube}] [--width WIDTH]\n [--subsample SUBSAMPLE] [--downsample DOWNSAMPLE]\n [--background R G B] [-v [{DEBUG,INFO,WARNING}]]\n in_bundles [in_bundles ...]\n\nVisualize bundles.\n\nExample usages:\n\n# Visualize streamlines as tubes, each bundle with a different color\n>>> scil_viz_bundle.py path_to_bundles/ --shape tube --random_coloring 1337\n\n# Visualize a tractogram with each streamlines drawn as lines, colored with\n# their local orientation, but only load 1 in 10 streamlines\n>>> scil_viz_bundle.py tractogram.trk --shape line --subsample 10\n\n# Visualize CSTs as large tubes and color them from a list of colors in a file\n>>> scil_viz_bundle.py path_to_bundles/CST_* --width 0.5\n --color_dict colors.json\n\npositional arguments:\n in_bundles List of tractography files supported by nibabel.\n\noptions:\n -h, --help show this help message and exit\n --shape {line,tube} Display streamlines either as lines or tubes.\n [Default: tube]\n --width WIDTH Width of tubes or lines representing streamlines\n [Default: 0.25]\n --subsample SUBSAMPLE\n Only load 1 in N streamlines.\n [Default: 1]\n --downsample DOWNSAMPLE\n Downsample streamlines to N points.\n [Default: None]\n --background R G B RBG values [0, 255] of the color of the background.\n [Default: [0, 0, 0]]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nColouring options:\n --random_coloring SEED\n Assign a random color to bundles.\n --uniform_coloring R G B\n Assign a uniform color to streamlines.\n --local_coloring Assign coloring to streamlines depending on their local orientations.\n --color_dict JSON JSON file containing colors for each bundle.\n Bundle filenames are indicated as keys and colors as values.\n A 'default' key and value can be included.\n --color_from_streamlines KEY\n Extract a color per streamline from the data_per_streamline property of the tractogram at the specified key.\n --color_from_points KEY\n Extract a color per point from the data_per_point property of the tractogram at the specified key.\n", - "synonyms": [ - [ - "individual", - "specific", - "unique", - "variety", - "different" - ], - [ - "bundles", - "bundles" - ], - [ - "average", - "per" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "orientation", - "orientation" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "difference", - "point" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "long", - "a" - ], - [ - "key", - "main", - "key" - ], - [ - "represent", - "representing" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "random", - "random" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "blue", - "colored" - ], - [ - "large", - "larger", - "large" - ], - [ - "long", - "with" - ], - [ - "parameters", - "specified" - ], - [ - "methods", - "using" - ], - [ - "streamlines", - "streamlines" - ], - [ - "supported", - "supported" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "individual", - "each" - ], - [ - "streamline", - "streamline" - ], - [ - "indicating", - "indicated" - ], - [ - "bundles", - "bundle" - ], - [ - "shape", - "structure", - "shape" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_bundle_screenshot_mni", - "docstring": "Register bundle to a template for screenshots using a reference.\nThe template can be any MNI152 (any resolution, cropped or not)\nIf your in_anat has a skull, select a MNI152 template with a skull and\nvice-versa.\n\nIf the bundle is already in MNI152 space, do not use --target_template.\n\nAxial, coronal and sagittal slices are captured.\nSagittal can be capture from the left (default) or the right.\n\nFor the --roi argument: If 1 value is provided, the ROI will be white,\nif 4 values are provided, the ROI will be colored with the RGB values\nprovided, if 5 values are provided, it is RGBA (values from 0-255).", - "help": "usage: scil_viz_bundle_screenshot_mni.py [-h]\n [--target_template TARGET_TEMPLATE]\n [--local_coloring | --uniform_coloring R G B | --reference_coloring COLORBAR]\n [--roi ROI [ROI ...]] [--right]\n [--anat_opacity ANAT_OPACITY]\n [--output_suffix OUTPUT_SUFFIX]\n [--out_dir OUT_DIR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle in_anat\n\nRegister bundle to a template for screenshots using a reference.\nThe template can be any MNI152 (any resolution, cropped or not)\nIf your in_anat has a skull, select a MNI152 template with a skull and\nvice-versa.\n\nIf the bundle is already in MNI152 space, do not use --target_template.\n\nAxial, coronal and sagittal slices are captured.\nSagittal can be capture from the left (default) or the right.\n\nFor the --roi argument: If 1 value is provided, the ROI will be white,\nif 4 values are provided, the ROI will be colored with the RGB values\nprovided, if 5 values are provided, it is RGBA (values from 0-255).\n\npositional arguments:\n in_bundle Path of the input bundle.\n in_anat Path of the reference file (.nii or nii.gz).\n\noptions:\n -h, --help show this help message and exit\n --target_template TARGET_TEMPLATE\n Path to the target MNI152 template for registration. If in_anat has a skull, select a MNI152 template with a skull and vice-versa.\n --local_coloring Color streamlines using local segments orientation.\n --uniform_coloring R G B\n Color streamlines with uniform coloring.\n --reference_coloring COLORBAR\n Color streamlines with reference coloring (0-255).\n --roi ROI [ROI ...] Path to a ROI file (.nii or nii.gz).\n --right Take screenshot from the right instead of the left for the sagittal plane.\n --anat_opacity ANAT_OPACITY\n Set the opacity for the anatomy, use 0 for complete transparency, 1 for opaque. [0.3]\n --output_suffix OUTPUT_SUFFIX\n Add a suffix to the output, else the axis name is used.\n --out_dir OUT_DIR Put all images in a specific directory.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "anatomy", - "anatomy" - ], - [ - "orientation", - "orientation" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "question", - "argument" - ], - [ - "order", - "set" - ], - [ - "left", - "left" - ], - [ - "specific", - "specific" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "space", - "space" - ], - [ - "held", - "in" - ], - [ - "blue", - "colored" - ], - [ - "clear", - "order", - "step", - "work", - "take" - ], - [ - "action", - "clear", - "considered", - "future", - "matter", - "possibility", - "potential", - "question", - "result", - "specific", - "any" - ], - [ - "long", - "with" - ], - [ - "axial", - "axial" - ], - [ - "clear", - "left", - "work", - "put" - ], - [ - "methods", - "using" - ], - [ - "order", - "work", - "instead" - ], - [ - "left", - "right" - ], - [ - "sagittal", - "sagittal" - ], - [ - "streamlines", - "streamlines" - ], - [ - "matter", - "thinking", - "else" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "blue", - "green", - "red", - "white", - "white" - ], - [ - "planes", - "plane" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "coronal", - "coronal" - ], - [ - "bundles", - "bundle" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_bundle_screenshot_mosaic", - "docstring": "Visualize bundles from a list. The script will output a mosaic (image) with\nscreenshots, 6 views per bundle in the list.", - "help": "usage: scil_viz_bundle_screenshot_mosaic.py [-h] [--uniform_coloring R G B]\n [--random_coloring SEED]\n [--zoom ZOOM] [--ttf TTF]\n [--ttf_size TTF_SIZE]\n [--opacity_background OPACITY_BACKGROUND]\n [--resolution_of_thumbnails RESOLUTION_OF_THUMBNAILS]\n [--light_screenshot]\n [--no_information]\n [--no_bundle_name]\n [--no_streamline_number]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_volume in_bundles\n [in_bundles ...] out_image\n\nVisualize bundles from a list. The script will output a mosaic (image) with\nscreenshots, 6 views per bundle in the list.\n\npositional arguments:\n in_volume Volume used as background (e.g. T1, FA, b0).\n in_bundles List of tractography files supported by nibabel or binary mask files.\n out_image Name of the output image mosaic (e.g. mosaic.jpg, mosaic.png).\n\noptions:\n -h, --help show this help message and exit\n --uniform_coloring R G B\n Assign an uniform color to streamlines (or ROIs).\n --random_coloring SEED\n Assign a random color to streamlines (or ROIs).\n --zoom ZOOM Rendering zoom. A value greater than 1 is a zoom-in,\n a value less than 1 is a zoom-out [1.0].\n --ttf TTF Path of the true type font to use for legends.\n --ttf_size TTF_SIZE Font size (int) to use for the legends [35].\n --opacity_background OPACITY_BACKGROUND\n Opacity of background image, between 0 and 1.0 [0.4].\n --resolution_of_thumbnails RESOLUTION_OF_THUMBNAILS\n Resolution of thumbnails used in mosaic [300].\n --light_screenshot Keep only 3 views instead of 6 [False].\n --no_information Don't display axis and bundle information [False].\n --no_bundle_name Don't display bundle name [False].\n --no_streamline_number\n Don't display bundle streamlines number [False].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "anatomy", - "anatomy" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "bundles", - "bundles" - ], - [ - "average", - "per" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "rendered", - "rendering", - "rendering" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "long", - "a" - ], - [ - "clear", - "left", - "out" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "supported", - "support" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "future", - "will" - ], - [ - "random", - "random" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "order", - "work", - "instead" - ], - [ - "long", - "than" - ], - [ - "streamlines", - "streamlines" - ], - [ - "highly", - "less" - ], - [ - "meaning", - "true", - "true" - ], - [ - "supported", - "supported" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "view", - "views", - "views" - ], - [ - "larger", - "size", - "size" - ], - [ - "false", - "false" - ], - [ - "bundles", - "bundle" - ], - [ - "greater", - "greater" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_connectivity", - "docstring": "Script to display a connectivity matrix and adjust the desired visualization.\nMade to work with scil_tractogram_segment_bundles_for_connectivity.py and\nscil_connectivity_reorder_rois.py.\n\nThis script can either display the axis labels as:\n- Coordinates (0..N)\n- Labels (using --labels_list)\n- Names (using --labels_list and --lookup_table)\nExamples of labels_list.txt and lookup_table.json can be found in the\nfreesurfer_flow output (https://github.com/scilus/freesurfer_flow)\n\nIf the matrix was made from a bigger matrix using\nscil_connectivity_reorder_rois.py, provide the text file(s), using\n--labels_list and/or --reorder_txt.\n\nThe chord chart is always displaying parting in the order they are defined\n(clockwise), the color is attributed in that order following a colormap. The\nthickness of the line represent the 'size/intensity', the greater the value is\nthe thicker the line will be. In order to hide the low values, two options are\navailable:\n- Angle threshold + alpha, any connections with a small angle on the chord\n chart will be slightly transparent to increase the focus on bigger\n connections.\n- Percentile, hide any connections with a value below that percentile", - "help": "", - "synonyms": [ - [ - "true", - "always" - ], - [ - "large", - "larger", - "small" - ], - [ - "work", - "and" - ], - [ - "result", - "following" - ], - [ - "considered", - "are" - ], - [ - "order", - "order" - ], - [ - "work", - "working", - "work" - ], - [ - "left", - "from" - ], - [ - "clear", - "long", - "work", - "they" - ], - [ - "long", - "a" - ], - [ - "unique", - "variety", - "examples" - ], - [ - "held", - "on" - ], - [ - "represent", - "represent" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "action", - "clear", - "considered", - "future", - "matter", - "possibility", - "potential", - "question", - "result", - "specific", - "any" - ], - [ - "long", - "with" - ], - [ - "create", - "provide" - ], - [ - "methods", - "using" - ], - [ - "larger", - "bigger" - ], - [ - "defined", - "function", - "defined" - ], - [ - "matrices", - "matrix" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "considered", - "subsequently", - "was" - ], - [ - "high", - "higher", - "level", - "low" - ], - [ - "connect", - "connected", - "connection", - "connections", - "connections" - ], - [ - "larger", - "size", - "size" - ], - [ - "long", - "two" - ], - [ - "clear", - "work", - "made" - ], - [ - "connectivity", - "connectivity" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "higher", - "increase", - "total", - "increase" - ], - [ - "attention", - "future", - "work", - "focus" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "greater", - "greater" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_dti_screenshot", - "docstring": "Register DWI to a template for screenshots.\nThe templates are on http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009\n\nFor quick quality control, the MNI template can be downsampled to 2mm iso.\nAxial, coronal and sagittal slices are captured.", - "help": "usage: scil_viz_dti_screenshot.py [-h] [--shells SHELLS [SHELLS ...]]\n [--out_suffix OUT_SUFFIX]\n [--out_dir OUT_DIR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_template\n\nRegister DWI to a template for screenshots.\nThe templates are on http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009\n\nFor quick quality control, the MNI template can be downsampled to 2mm iso.\nAxial, coronal and sagittal slices are captured.\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bval file, in FSL format.\n in_bvec Path of the bvec file, in FSL format.\n in_template Path to the target MNI152 template for \n registration, use the one provided online.\n\noptions:\n -h, --help show this help message and exit\n --shells SHELLS [SHELLS ...]\n Shells to use for DTI fit (usually below 1200), b0 must be listed.\n --out_suffix OUT_SUFFIX\n Add a suffix to the output, else the axis name is used.\n --out_dir OUT_DIR Put all images in a specific directory.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "volume", - "volumes", - "volume" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "specific", - "specific" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "axial", - "axial" - ], - [ - "clear", - "left", - "work", - "put" - ], - [ - "methods", - "using" - ], - [ - "sagittal", - "sagittal" - ], - [ - "traditionally", - "usually" - ], - [ - "matter", - "thinking", - "else" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "coronal", - "coronal" - ], - [ - "lack", - "quality" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_fodf", - "docstring": "Visualize 2-dimensional fODF slice loaded from disk.\n\nGiven an image of SH coefficients, this script displays a slice in a\ngiven orientation. The user can also add a background on top of which the\nfODF are to be displayed. Using a full SH basis, the script can be used to\nvisualize asymmetric fODF. The user can supply a peaks image to visualize\npeaks on top of fODF.\n\nIf a transparency_mask is given (e.g. a brain mask), all values outside the\nmask non-zero values are set to full transparency in the saved scene.\n\n!!! CAUTION !!! The script is memory intensive about (9kB of allocated RAM per\nvoxel, or 9GB for a 1M voxel volume) with a sphere interpolated to 362 points.", - "help": "usage: scil_viz_fodf.py [-h] [--slice_index SLICE_INDEX]\n [--win_dims WIDTH HEIGHT]\n [--interactor {image,trackball}]\n [--axis_name {sagittal,coronal,axial}] [--silent]\n [--in_transparency_mask IN_TRANSPARENCY_MASK]\n [--output OUTPUT] [-f]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--sphere {repulsion200,repulsion100,symmetric724,symmetric642,symmetric362,repulsion724}]\n [--sph_subdivide SPH_SUBDIVIDE] [--mask MASK]\n [--colormap COLORMAP | --color_rgb COLOR_RGB COLOR_RGB COLOR_RGB]\n [--scale SCALE] [--radial_scale_off] [--norm_off]\n [-v [{DEBUG,INFO,WARNING}]] [--background BACKGROUND]\n [--bg_range MIN MAX] [--bg_opacity BG_OPACITY]\n [--bg_offset BG_OFFSET]\n [--bg_interpolation {nearest,linear}]\n [--bg_color BG_COLOR BG_COLOR BG_COLOR]\n [--peaks PEAKS]\n [--peaks_color PEAKS_COLOR PEAKS_COLOR PEAKS_COLOR]\n [--peaks_width PEAKS_WIDTH]\n [--peaks_values PEAKS_VALUES | --peaks_length PEAKS_LENGTH]\n [--variance VARIANCE] [--variance_k VARIANCE_K]\n [--var_color VAR_COLOR VAR_COLOR VAR_COLOR]\n in_fodf\n\nVisualize 2-dimensional fODF slice loaded from disk.\n\nGiven an image of SH coefficients, this script displays a slice in a\ngiven orientation. The user can also add a background on top of which the\nfODF are to be displayed. Using a full SH basis, the script can be used to\nvisualize asymmetric fODF. The user can supply a peaks image to visualize\npeaks on top of fODF.\n\nIf a transparency_mask is given (e.g. a brain mask), all values outside the\nmask non-zero values are set to full transparency in the saved scene.\n\n!!! CAUTION !!! The script is memory intensive about (9kB of allocated RAM per\nvoxel, or 9GB for a 1M voxel volume) with a sphere interpolated to 362 points.\n\npositional arguments:\n in_fodf Input SH image file.\n\noptions:\n -h, --help show this help message and exit\n --slice_index SLICE_INDEX\n Index of the slice to visualize along a given axis. Defaults to middle of volume.\n --win_dims WIDTH HEIGHT\n The dimensions for the vtk window. [(768, 768)]\n --interactor {image,trackball}\n Specify interactor mode for vtk window. [trackball]\n --axis_name {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n --silent Disable interactive visualization.\n --in_transparency_mask IN_TRANSPARENCY_MASK\n Input mask image file.\n --output OUTPUT Path to output file.\n -f Force overwriting of the output files.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --sphere {repulsion200,repulsion100,symmetric724,symmetric642,symmetric362,repulsion724}\n Name of the sphere used to reconstruct SF. [symmetric362]\n --sph_subdivide SPH_SUBDIVIDE\n Number of subdivisions for given sphere. If not supplied, use the given sphere as is.\n --mask MASK Optional mask file. Only fODF inside the mask are displayed.\n --colormap COLORMAP Colormap for the ODF slicer. If None, then a RGB colormap will be used. [None]\n --color_rgb COLOR_RGB COLOR_RGB COLOR_RGB\n Uniform color for the ODF slicer given as RGB, scaled between 0 and 1. [None]\n --scale SCALE Scaling factor for FODF. [0.5]\n --radial_scale_off Disable radial scale for ODF slicer.\n --norm_off Disable normalization of ODF slicer.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nBackground arguments:\n --background BACKGROUND\n Background image file. If RGB, values must be between 0 and 255.\n --bg_range MIN MAX The range of values mapped to range [0, 1] for background image. [(bg.min(), bg.max())]\n --bg_opacity BG_OPACITY\n The opacity of the background image. Opacity of 0.0 means transparent and 1.0 is completely visible. [1.0]\n --bg_offset BG_OFFSET\n The offset of the background image. [0.5]\n --bg_interpolation {nearest,linear}\n Interpolation mode for the background image. [nearest]\n --bg_color BG_COLOR BG_COLOR BG_COLOR\n The color of the overall background, behind everything. Must be RGB values scaled between 0 and 1. [(0, 0, 0)]\n\nPeaks arguments:\n --peaks PEAKS Peaks image file.\n --peaks_color PEAKS_COLOR PEAKS_COLOR PEAKS_COLOR\n Color used for peaks, as RGB values scaled between 0 and 1. If None, then a RGB colormap is used. [None]\n --peaks_width PEAKS_WIDTH\n Width of peaks segments. [1.0]\n\nPeaks scaling arguments:\n Choose between peaks values and arbitrary length.\n\n --peaks_values PEAKS_VALUES\n Peaks values file.\n --peaks_length PEAKS_LENGTH\n Length of the peaks segments. [0.65]\n\nVariance arguments:\n For the visualization of fodf uncertainty, the variance is used as follow: mean + k * sqrt(variance), where mean is the input fodf (in_fodf) and k is the scaling factor (variance_k).\n\n --variance VARIANCE FODF variance file.\n --variance_k VARIANCE_K\n Scaling factor (k) for the computation of the fodf uncertainty. [1]\n --var_color VAR_COLOR VAR_COLOR VAR_COLOR\n Color of variance outline. Must be RGB values scaled between 0 and 1. [(1, 1, 1)]\n", - "synonyms": [ - [ - "application", - "database", - "user" - ], - [ - "total", - "number" - ], - [ - "memory", - "memory" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "average", - "per" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "orientation", - "orientation" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "considered", - "are" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "papers", - "paper" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "process", - "implementation" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "axial", - "axial" - ], - [ - "methods", - "using" - ], - [ - "thinking", - "everything" - ], - [ - "area", - "main", - "along" - ], - [ - "sagittal", - "sagittal" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "variance", - "variance" - ], - [ - "step", - "follow" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "middle", - "middle" - ], - [ - "area", - "work", - "where" - ], - [ - "left", - "behind" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "work", - "all" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "coronal", - "coronal" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "meaning", - "order", - "result", - "step", - "true", - "means" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "long", - "full" - ], - [ - "increase", - "offset" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_gradients_screenshot", - "docstring": "Vizualisation for directions on a sphere, either from a gradient sampling (i.e.\na list of b-vectors) or from a Dipy sphere.", - "help": "usage: scil_viz_gradients_screenshot.py [-h]\n (--in_gradient_scheme IN_GRADIENT_SCHEME [IN_GRADIENT_SCHEME ...] | --dipy_sphere {symmetric362,symmetric642,symmetric724,repulsion724,repulsion100,repulsion200})\n [--dis-sym]\n [--out_basename OUT_BASENAME]\n [--res RES] [--dis-sphere]\n [--dis-proj] [--plot_shells]\n [--same-color] [--opacity OPACITY]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n\nVizualisation for directions on a sphere, either from a gradient sampling (i.e.\na list of b-vectors) or from a Dipy sphere.\n\noptions:\n -h, --help show this help message and exit\n --in_gradient_scheme IN_GRADIENT_SCHEME [IN_GRADIENT_SCHEME ...]\n Gradient sampling filename. (only accepts .bvec and\n .bval together or only .b).\n --dipy_sphere {symmetric362,symmetric642,symmetric724,repulsion724,repulsion100,repulsion200}\n Dipy sphere choice.\n --dis-sym Disable antipodal symmetry.\n --out_basename OUT_BASENAME\n Output file name picture without extension (will be\n png file(s)).\n --res RES Resolution of the output picture(s).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n\nEnable/Disable renderings.:\n --dis-sphere Disable the rendering of the sphere.\n --dis-proj Disable rendering of the projection supershell.\n --plot_shells Enable rendering each shell individually.\n\nRendering options.:\n --same-color Use same color for all shell.\n --opacity OPACITY Opacity for the shells.\n", - "synonyms": [ - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "working", - "together" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "rendered", - "rendering", - "rendering" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "direction", - "directions" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "thinking", - "i" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "methods", - "using" - ], - [ - "projection", - "projection" - ], - [ - "considered", - "is" - ], - [ - "image", - "picture" - ], - [ - "individual", - "each" - ], - [ - "work", - "all" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_tractogram_seeds", - "docstring": "Visualize seeds used to generate the tractogram or bundle.\nWhen tractography was run, each streamline produced by the tracking algorithm\nsaved its seeding point (its origin).\n\nThe tractogram must have been generated from scil_tracking_local.py or\nscil_tracking_pft.py with the --save_seeds option.", - "help": "usage: scil_viz_tractogram_seeds.py [-h] [--save SAVE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n tractogram\n\nVisualize seeds used to generate the tractogram or bundle.\nWhen tractography was run, each streamline produced by the tracking algorithm\nsaved its seeding point (its origin).\n\nThe tractogram must have been generated from scil_tracking_local.py or\nscil_tracking_pft.py with the --save_seeds option.\n\npositional arguments:\n tractogram Tractogram file (must be trk)\n\noptions:\n -h, --help show this help message and exit\n --save SAVE If set, save a screenshot of the result in the specified filename\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "create", - "generate" - ], - [ - "work", - "and" - ], - [ - "difference", - "point" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "lack", - "loss", - "result", - "result" - ], - [ - "seeding", - "seeding" - ], - [ - "order", - "set" - ], - [ - "long", - "have" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "parameters", - "specified" - ], - [ - "considered", - "involved", - "work", - "been" - ], - [ - "algorithm", - "algorithm" - ], - [ - "methods", - "using" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "considered", - "subsequently", - "was" - ], - [ - "individual", - "each" - ], - [ - "streamline", - "streamline" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "bundles", - "bundle" - ], - [ - "data", - "tracking", - "tracking" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_tractogram_seeds_3d", - "docstring": "Visualize seeds as 3D points, with heatmaps corresponding to seed density\n\nExample usages:\n\n>>> scil_viz_tractogram_seeds_3d.py seeds.nii.gz --tractogram tractogram.trk", - "help": "usage: scil_viz_tractogram_seeds_3d.py [-h] [--tractogram TRACTOGRAM]\n [--colormap COLORMAP]\n [--seed_opacity SEED_OPACITY]\n [--tractogram_shape {line,tube}]\n [--tractogram_opacity TRACTOGRAM_OPACITY]\n [--tractogram_width TRACTOGRAM_WIDTH]\n [--tractogram_color R G B [R G B ...]]\n [--background R G B [R G B ...]]\n [-v [{DEBUG,INFO,WARNING}]]\n in_seed_map\n\nVisualize seeds as 3D points, with heatmaps corresponding to seed density\n\nExample usages:\n\n>>> scil_viz_tractogram_seeds_3d.py seeds.nii.gz --tractogram tractogram.trk\n\npositional arguments:\n in_seed_map Seed density map.\n\noptions:\n -h, --help show this help message and exit\n --tractogram TRACTOGRAM\n Tractogram coresponding to the seeds.\n --colormap COLORMAP Name of the map for the density coloring. Can be any colormap that matplotlib offers.\n [Default: bone]\n --seed_opacity SEED_OPACITY\n Opacity of the contour generated.\n [Default: 0.5]\n --tractogram_shape {line,tube}\n Display streamlines either as lines or tubes.\n [Default: tube]\n --tractogram_opacity TRACTOGRAM_OPACITY\n Opacity of the streamlines.\n [Default: 0.5]\n --tractogram_width TRACTOGRAM_WIDTH\n Width of tubes or lines representing streamlines.\n [Default: 0.05]\n --tractogram_color R G B [R G B ...]\n Color for the tractogram.\n --background R G B [R G B ...]\n RBG values [0, 255] of the color of the background.\n [Default: [0, 0, 0]]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "meaning", - "name" - ], - [ - "represent", - "representing" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "action", - "clear", - "considered", - "future", - "matter", - "possibility", - "potential", - "question", - "result", - "specific", - "any" - ], - [ - "long", - "with" - ], - [ - "methods", - "using" - ], - [ - "streamlines", - "streamlines" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "maps", - "map" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_volume_histogram", - "docstring": "Script to display a histogram of a metric (FA, MD, etc.) from a binary mask\n(wm mask, vascular mask, ect.).\nThese two images must be coregister with each other.\n\n>>> scil_viz_volume_histogram.py metric.nii.gz mask_bin.nii.gz 8\n out_filename_image.png", - "help": "usage: scil_viz_volume_histogram.py [-h] [--title TITLE] [--x_label X_LABEL]\n [--colors COLORS] [--show_only]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_metric in_mask n_bins out_png\n\nScript to display a histogram of a metric (FA, MD, etc.) from a binary mask\n(wm mask, vascular mask, ect.).\nThese two images must be coregister with each other.\n\n>>> scil_viz_volume_histogram.py metric.nii.gz mask_bin.nii.gz 8\n out_filename_image.png\n\npositional arguments:\n in_metric Metric map ex : FA, MD,... .\n in_mask Binary mask data to extract value.\n n_bins Number of bins to use for the histogram.\n out_png Output filename for the figure.\n\noptions:\n -h, --help show this help message and exit\n --show_only Do not save the figure, only display.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nHistogram options:\n --title TITLE Use the provided info for the histogram title. [Histogram]\n --x_label X_LABEL Use the provided info for the x axis name.\n --colors COLORS Use the provided info for the bars color. [#0504aa]\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "long", - "a" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "long", - "with" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "variety", - "work", - "other" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "individual", - "each" - ], - [ - "long", - "two" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "maps", - "map" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_volume_scatterplot", - "docstring": "Script to display scatter plot between two maps (ex. FA and MD, ihMT and MT).\nBy default, no mask is applied to the data.\nDifferent options are available to mask or threshold data:\n - a binary mask\n - two probability maps, which can be used to threshold maps with\n --in_prob_maps. A same threshold is applied on these two maps (--thr).\n - parcellation, which can be used to plot values for each region of\n an atlas (--in_atlas) or a subset of regions (--specific_label).\n Atlas option required a json file (--atlas_lut) with indices and\n names of each label corresponding to the atlas as following:\n \"1\": \"lh_A8m\",\n \"2\": \"rh_A8m\",\n The numbers must be corresponding to the label indices in the json file.\n\nBe careful, you can not use all of them at the same time.\n\nFor general scatter plot without mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n\nFor scatter plot with mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_bin_mask mask_wm.nii.gz\n\nFor tissue probability scatter plot:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --prob_maps wm_map.nii.gz gm_map.nii.gz\n\nFor scatter plot using atlas:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n --specific_label 34 67 87", - "help": "usage: scil_viz_volume_scatterplot.py [-h] [--out_dir OUT_DIR] [--thr THR]\n [--not_exclude_zero]\n [--in_bin_mask IN_BIN_MASK | --in_prob_maps IN_PROB_MAPS IN_PROB_MAPS | --in_atlas IN_ATLAS]\n [--atlas_lut ATLAS_LUT]\n [--specific_label SPECIFIC_LABEL [SPECIFIC_LABEL ...]]\n [--in_folder] [--title TITLE]\n [--x_label X_LABEL] [--y_label Y_LABEL]\n [--label LABEL]\n [--label_prob LABEL_PROB]\n [--marker MARKER]\n [--marker_size MARKER_SIZE]\n [--transparency TRANSPARENCY]\n [--dpi DPI] [--colors color1 color2]\n [--show_only]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_x_map in_y_map out_name\n\nScript to display scatter plot between two maps (ex. FA and MD, ihMT and MT).\nBy default, no mask is applied to the data.\nDifferent options are available to mask or threshold data:\n - a binary mask\n - two probability maps, which can be used to threshold maps with\n --in_prob_maps. A same threshold is applied on these two maps (--thr).\n - parcellation, which can be used to plot values for each region of\n an atlas (--in_atlas) or a subset of regions (--specific_label).\n Atlas option required a json file (--atlas_lut) with indices and\n names of each label corresponding to the atlas as following:\n \"1\": \"lh_A8m\",\n \"2\": \"rh_A8m\",\n The numbers must be corresponding to the label indices in the json file.\n\nBe careful, you can not use all of them at the same time.\n\nFor general scatter plot without mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n\nFor scatter plot with mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_bin_mask mask_wm.nii.gz\n\nFor tissue probability scatter plot:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --prob_maps wm_map.nii.gz gm_map.nii.gz\n\nFor scatter plot using atlas:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n --specific_label 34 67 87\n\npositional arguments:\n in_x_map Map in x axis, FA for example.\n in_y_map Map in y axis, MD for example.\n out_name Output filename for the figure without extension.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Output directory to save scatter plot.\n --thr THR Use to apply threshold only on probability maps (same for both map) with --in_prob_maps option. [0.9]\n --not_exclude_zero Keep zero value in data.\n --in_bin_mask IN_BIN_MASK\n Binary mask. Use this option to extract x and y maps value from specific mask or region: wm_mask or roi_mask for example.\n --in_prob_maps IN_PROB_MAPS IN_PROB_MAPS\n Probability maps, WM and GW for example.\n --in_atlas IN_ATLAS Path to the input atlas image.\n --show_only Do not save the figure, only display. Not avalaible with --in_atlas option.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nAtlas options:\n --atlas_lut ATLAS_LUT\n Path of the LUT file corresponding to atlas used to name the regions of interest.\n --specific_label SPECIFIC_LABEL [SPECIFIC_LABEL ...]\n Label list to use to do scatter plot. Label must corresponding to atlas LUT file. [None]\n --in_folder Save label plots in subfolder \"Label_plots\".\n\nScatter plot options:\n --title TITLE Use the provided info for the title name. [Scatter Plot]\n --x_label X_LABEL Use the provided info for the x axis name. [x]\n --y_label Y_LABEL Use the provided info for the y axis name. [y]\n --label LABEL Use the provided info for the legend box corresponding to mask or first probability map. [None]\n --label_prob LABEL_PROB\n Use the provided info for the legend box corresponding to the second probability map. [Threshold prob_map 2]\n --marker MARKER Use the provided info for the marker shape. [.]\n --marker_size MARKER_SIZE\n Use the provided info for the marker size. [15]\n --transparency TRANSPARENCY\n Use the provided info for the point transparency. [0.4]\n --dpi DPI Use the provided info for the dpi resolution. [300]\n --colors color1 color2\n", - "synonyms": [ - [ - "individual", - "specific", - "unique", - "variety", - "different" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "order", - "required" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "general", - "general" - ], - [ - "brain", - "tissue" - ], - [ - "applied", - "applied" - ], - [ - "result", - "following" - ], - [ - "considered", - "are" - ], - [ - "probability", - "probability" - ], - [ - "difference", - "point" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "areas", - "region", - "regions", - "regions" - ], - [ - "meaning", - "name" - ], - [ - "methods", - "use" - ], - [ - "thinking", - "you" - ], - [ - "specific", - "specific" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "applied", - "apply" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "higher", - "interest" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "long", - "result", - "work", - "working", - "time" - ], - [ - "atlas", - "atlas" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "future", - "result", - "specific", - "variety", - "work", - "these" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "individual", - "each" - ], - [ - "larger", - "size", - "size" - ], - [ - "long", - "two" - ], - [ - "work", - "all" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "maps", - "maps" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "area", - "areas", - "region", - "regions", - "region" - ], - [ - "maps", - "map" - ], - [ - "shape", - "structure", - "shape" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_volume_screenshot", - "docstring": "Take screenshot(s) of one or more slices in a given image volume along the\nrequested axis. If slice indices are not provided, all slices in the volume\nare used. The name of the output images are suffixed with _slice_{id}, with\nid being the slice number in the volume. If a labelmap image is provided (e.g.\na tissue segmentation map), it is overlaid on the volume slices. Same goes if\na mask is provided, with the difference that it can be rendered as a\ntransparency overlay as well as a contour.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nExample:\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_annotated.png\n --display_slice_number --display_lr\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_masked.png\n --transparency brainmask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial.png\n --slices 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_sagittal.png --axis sagittal\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_plasma_cmap.png\n --slices 30 40 50 60 70 80 90 100 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_overlay.png\n --slices 30 40 50 60 70 80 90 100 --overlays brain_mask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_contour.png\n --slices 30 40 50 60 70 80 90 100\n --overlays brain_mask.nii.gz --overlays_as_contours\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_map.png\n --slices 30 40 50 60 70 80 90 100 --labelmap tissue_map.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_viridis_cmap.png\n --slices 30 40 50 60 70 80 90 100\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_peaks.png\n --slices 30 40 50 60 70 80 90 100 --peaks peaks.nii.gz --volume_opacity 0.5", - "help": "usage: scil_viz_volume_screenshot.py [-h]\n [--volume_cmap_name VOLUME_CMAP_NAME]\n [--volume_opacity VOLUME_OPACITY]\n [--transparency TRANSPARENCY]\n [--slices SID [SID ...]]\n [--axis {sagittal,coronal,axial}]\n [--size WIDTH HEIGHT]\n [--display_slice_number] [--display_lr]\n [--labelmap LABELMAP]\n [--labelmap_cmap_name LABELMAP_CMAP_NAME]\n [--labelmap_opacity LABELMAP_OPACITY]\n [--overlays OVERLAYS [OVERLAYS ...]]\n [--overlays_as_contours]\n [--overlays_colors R G B [R G B ...]]\n [--overlays_opacity OVERLAYS_OPACITY]\n [--peaks PEAKS [PEAKS ...]]\n [--peaks_width PEAKS_WIDTH]\n [--peaks_opacity PEAKS_OPACITY]\n [-v [{DEBUG,INFO,WARNING}]]\n volume out_fname\n\nTake screenshot(s) of one or more slices in a given image volume along the\nrequested axis. If slice indices are not provided, all slices in the volume\nare used. The name of the output images are suffixed with _slice_{id}, with\nid being the slice number in the volume. If a labelmap image is provided (e.g.\na tissue segmentation map), it is overlaid on the volume slices. Same goes if\na mask is provided, with the difference that it can be rendered as a\ntransparency overlay as well as a contour.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nExample:\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_annotated.png\n --display_slice_number --display_lr\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_masked.png\n --transparency brainmask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial.png\n --slices 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_sagittal.png --axis sagittal\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_plasma_cmap.png\n --slices 30 40 50 60 70 80 90 100 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_overlay.png\n --slices 30 40 50 60 70 80 90 100 --overlays brain_mask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_contour.png\n --slices 30 40 50 60 70 80 90 100\n --overlays brain_mask.nii.gz --overlays_as_contours\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_map.png\n --slices 30 40 50 60 70 80 90 100 --labelmap tissue_map.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_viridis_cmap.png\n --slices 30 40 50 60 70 80 90 100\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_peaks.png\n --slices 30 40 50 60 70 80 90 100 --peaks peaks.nii.gz --volume_opacity 0.5\n\npositional arguments:\n volume Input 3D Nifti file (.nii/.nii.gz).\n out_fname Name of the output image(s). If multiple slices are provided (or none), their index will be append to the name (e.g. volume.jpg, volume.png becomes volume_slice_0.jpg, volume_slice_0.png).\n\noptions:\n -h, --help show this help message and exit\n --transparency TRANSPARENCY\n Transparency Nifti image (.nii/.nii.gz). Can either be a binary mask or a scalar image in the range [0, 1].\n --size WIDTH HEIGHT Size of the output image. [(768, 768)]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nSlicing:\n --slices SID [SID ...]\n Slice indices to screenshot. If None are supplied, all slices inside the transparency mask are selected.\n --axis {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n\nInput overlays:\n --labelmap LABELMAP Input labelmap file (.nii/.nii.gz).\n --overlays OVERLAYS [OVERLAYS ...]\n 3D Nifti image(s) to overlay (.nii/.nii.gz).\n --peaks PEAKS [PEAKS ...]\n Peaks Nifti image (.nii/.nii.gz).\n\nVolume rendering:\n --volume_cmap_name VOLUME_CMAP_NAME\n Colormap name for the 3D Nifti image data. [None]\n --volume_opacity VOLUME_OPACITY\n Opacity value for the 3D Nifti image data. [1.0]\n --labelmap_cmap_name LABELMAP_CMAP_NAME\n Colormap name for the labelmap image data. [viridis]\n --labelmap_opacity LABELMAP_OPACITY\n Opacity value for the labelmap image data. [0.5]\n\nPeaks rendering:\n --peaks_width PEAKS_WIDTH\n Width of the peaks lines. [3.0]\n --peaks_opacity PEAKS_OPACITY\n Opacity value for the peaks overlay. [1.0]\n\nOverlay rendering:\n --overlays_as_contours\n Display overlays contours and reduce the opacity of their inner region (see the `--overlays_opacity` argument).\n --overlays_colors R G B [R G B ...]\n Colors for the overlays or contours. You may provide a single color, for all overlays/contours, or one color for each. Each color is given as three values: R G B\n --overlays_opacity OVERLAYS_OPACITY\n Opacity value for the masks overlays. When combined with `--overlays_as_contours`, this will be the opacity of the region inside the computed contours. [0.5]\n\nAnnotations:\n --display_slice_number\n If true, displays the slice number in the upper left corner.\n --display_lr If true, add left and right annotations to the images.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "total", - "90" - ], - [ - "work", - "and" - ], - [ - "brain", - "tissue" - ], - [ - "considered", - "are" - ], - [ - "considered", - "involved", - "being" - ], - [ - "long", - "work", - "more" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "image", - "images" - ], - [ - "meaning", - "name" - ], - [ - "rendered", - "rendering", - "rendering" - ], - [ - "rendered", - "rendering", - "rendered" - ], - [ - "question", - "argument" - ], - [ - "left", - "left" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "total", - "40" - ], - [ - "total", - "70" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "clear", - "order", - "step", - "work", - "take" - ], - [ - "long", - "with" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "total", - "50" - ], - [ - "axial", - "axial" - ], - [ - "create", - "provide" - ], - [ - "difference", - "difference" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "left", - "right" - ], - [ - "area", - "main", - "along" - ], - [ - "sagittal", - "sagittal" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "meaning", - "true", - "true" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "result", - "may" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "larger", - "size", - "size" - ], - [ - "total", - "combined" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "total", - "60" - ], - [ - "work", - "all" - ], - [ - "total", - "100" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "coronal", - "coronal" - ], - [ - "total", - "80" - ], - [ - "area", - "areas", - "region", - "regions", - "region" - ], - [ - "maps", - "map" - ], - [ - "increase", - "reduce" - ], - [ - "considered", - "experience", - "large", - "long", - "result", - "variety", - "work", - "working", - "well" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_volume_screenshot_mosaic", - "docstring": "Compose a mosaic of screenshots of the given image volume slices along the\nrequested axis. The provided transparency mask (e.g. a brain mask volume) is\nused to set the screenshot values outside the mask non-zero values to full\ntransparency. Additionally, if a labelmap image is provided (e.g. a tissue\nsegmentation map), it is overlaid on the volume slices. Also, a series of\nmasks can be provided and will be used to generate contours overlaid on each\nvolume slice.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nThe screenshots are overlapped according to the given factors.\n\nThe mosaic supports either horizontal, vertical or matrix arrangements.\n\nExample:\n>>> scil_viz_volume_screenshot_mosaic.py 1 8 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial.png 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --overlap_factor 0.6 0.5 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 tissues.nii.gz brain_mask.nii.gz\n mosaic_overlap_tissues_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_viridis_cmap.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_contours.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --overlays wm_mask.nii.gz gm_mask.nii.gz csf_mask.nii.gz", - "help": "usage: scil_viz_volume_screenshot_mosaic.py [-h]\n [--volume_cmap_name VOLUME_CMAP_NAME]\n [--volume_opacity VOLUME_OPACITY]\n [--axis {sagittal,coronal,axial}]\n [--size WIDTH HEIGHT]\n [--labelmap LABELMAP]\n [--labelmap_cmap_name LABELMAP_CMAP_NAME]\n [--labelmap_opacity LABELMAP_OPACITY]\n [--overlays OVERLAYS [OVERLAYS ...]]\n [--overlays_as_contours]\n [--overlays_colors R G B [R G B ...]]\n [--overlays_opacity OVERLAYS_OPACITY]\n [--overlap rWIDTH rHEIGHT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n rows cols volume transparency\n out_fname SID [SID ...]\n\nCompose a mosaic of screenshots of the given image volume slices along the\nrequested axis. The provided transparency mask (e.g. a brain mask volume) is\nused to set the screenshot values outside the mask non-zero values to full\ntransparency. Additionally, if a labelmap image is provided (e.g. a tissue\nsegmentation map), it is overlaid on the volume slices. Also, a series of\nmasks can be provided and will be used to generate contours overlaid on each\nvolume slice.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nThe screenshots are overlapped according to the given factors.\n\nThe mosaic supports either horizontal, vertical or matrix arrangements.\n\nExample:\n>>> scil_viz_volume_screenshot_mosaic.py 1 8 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial.png 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --overlap_factor 0.6 0.5 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 tissues.nii.gz brain_mask.nii.gz\n mosaic_overlap_tissues_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_viridis_cmap.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_contours.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --overlays wm_mask.nii.gz gm_mask.nii.gz csf_mask.nii.gz\n\npositional arguments:\n rows The mosaic row count.\n cols The mosaic column count.\n volume Input 3D Nifti file (.nii/.nii.gz).\n transparency Transparency Nifti image (.nii/.nii.gz). Can either be a binary mask or a scalar image in the range [0, 1].\n out_fname Name of the output image (e.g. img.jpg, img.png).\n SID Slice indices to screenshot.\n\noptions:\n -h, --help show this help message and exit\n --axis {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n --size WIDTH HEIGHT Size of the output image. [(768, 768)]\n --overlap rWIDTH rHEIGHT\n The overlap factor as a ratio of each image dimension. [(0.6, 0.0)]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nInput overlays:\n --labelmap LABELMAP Input labelmap file (.nii/.nii.gz).\n --overlays OVERLAYS [OVERLAYS ...]\n 3D Nifti image(s) to overlay (.nii/.nii.gz).\n\nVolume rendering:\n --volume_cmap_name VOLUME_CMAP_NAME\n Colormap name for the 3D Nifti image data. [None]\n --volume_opacity VOLUME_OPACITY\n Opacity value for the 3D Nifti image data. [1.0]\n --labelmap_cmap_name LABELMAP_CMAP_NAME\n Colormap name for the labelmap image data. [viridis]\n --labelmap_opacity LABELMAP_OPACITY\n Opacity value for the labelmap image data. [0.5]\n\nOverlay rendering:\n --overlays_as_contours\n Display overlays contours and reduce the opacity of their inner region (see the `--overlays_opacity` argument).\n --overlays_colors R G B [R G B ...]\n Colors for the overlays or contours. You may provide a single color, for all overlays/contours, or one color for each. Each color is given as three values: R G B\n --overlays_opacity OVERLAYS_OPACITY\n Opacity value for the masks overlays. When combined with `--overlays_as_contours`, this will be the opacity of the region inside the computed contours. [0.5]\n", - "synonyms": [ - [ - "volume", - "volumes", - "volume" - ], - [ - "create", - "generate" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "total", - "90" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "brain", - "tissue" - ], - [ - "considered", - "are" - ], - [ - "brain", - "brain" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "contrast", - "difference", - "form", - "result", - "specific", - "subject", - "true", - "unique", - "work", - "example" - ], - [ - "meaning", - "name" - ], - [ - "rendered", - "rendering", - "rendering" - ], - [ - "question", - "argument" - ], - [ - "order", - "set" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "total", - "40" - ], - [ - "total", - "70" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "intersected", - "overlapped" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "total", - "50" - ], - [ - "supported", - "supports" - ], - [ - "axial", - "axial" - ], - [ - "create", - "provide" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "view", - "see" - ], - [ - "area", - "main", - "along" - ], - [ - "sagittal", - "sagittal" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "matrices", - "matrix" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "result", - "may" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "larger", - "size", - "size" - ], - [ - "total", - "combined" - ], - [ - "total", - "60" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "total", - "100" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "coronal", - "coronal" - ], - [ - "based", - "reported", - "according" - ], - [ - "total", - "80" - ], - [ - "area", - "areas", - "region", - "regions", - "region" - ], - [ - "maps", - "map" - ], - [ - "increase", - "reduce" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "long", - "full" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_apply_transform", - "docstring": "Transform Nifti (.nii.gz) using an affine/rigid transformation.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_apply_transform_to_image.py.", - "help": "usage: scil_volume_apply_transform.py [-h] [--inverse] [--keep_dtype]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file in_target_file in_transfo\n out_name\n\nTransform Nifti (.nii.gz) using an affine/rigid transformation.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_apply_transform_to_image.py.\n\npositional arguments:\n in_file Path of the file to be transformed (nii or nii.gz)\n in_target_file Path of the reference target file (.nii.gz).\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_name Output filename of the transformed data.\n\noptions:\n -h, --help show this help message and exit\n --inverse Apply the inverse transformation.\n --keep_dtype If True, keeps the data_type of the input image (in_file) when saving the output image (out_name).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "long", - "work", - "more" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "methods", - "use" - ], - [ - "held", - "on" - ], - [ - "applied", - "apply" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "matter", - "question", - "thinking", - "true", - "work", - "working", - "how" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "methods", - "using" - ], - [ - "meaning", - "true", - "true" - ], - [ - "matrices", - "matrix" - ], - [ - "step", - "follow" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "connection", - "connections", - "link" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_b0_synthesis", - "docstring": "Wrapper for SyNb0 available in Dipy, to run it on a single subject.\nRequires Skull-Strip b0 and t1w images as input, the script will normalize the\nt1w's WM to 110, co-register both images, then register it to the appropriate\ntemplate, run SyNb0 and then transform the result back to the original space.\n\nSyNb0 is a deep learning model that predicts a synthetic a distortion-free\nb0 image from a distorted b0 and T1w.\n\nThis script must be used carefully, as it is meant to be used in an\nenvironment with the following dependencies already installed (not installed by\ndefault in Scilpy):\n- tensorflow-addons\n- tensorrt\n- tensorflow", - "help": "usage: scil_volume_b0_synthesis.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_b0 in_b0_mask in_t1 in_t1_mask out_b0\n\nWrapper for SyNb0 available in Dipy, to run it on a single subject.\nRequires Skull-Strip b0 and t1w images as input, the script will normalize the\nt1w's WM to 110, co-register both images, then register it to the appropriate\ntemplate, run SyNb0 and then transform the result back to the original space.\n\nSyNb0 is a deep learning model that predicts a synthetic a distortion-free\nb0 image from a distorted b0 and T1w.\n\nThis script must be used carefully, as it is meant to be used in an\nenvironment with the following dependencies already installed (not installed by\ndefault in Scilpy):\n- tensorflow-addons\n- tensorrt\n- tensorflow\n\npositional arguments:\n in_b0 Input b0 image.\n in_b0_mask Input b0 mask.\n in_t1 Input t1w image.\n in_t1_mask Input t1w mask.\n out_b0 Output b0 image without distortion.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Schilling, Kurt G., et al. \"Synthesized b0 for diffusion distortion\n correction (Synb0-DisCo).\" Magnetic resonance imaging 64 (2019): 62-70.\n", - "synonyms": [ - [ - "work", - "and" - ], - [ - "result", - "following" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "lack", - "loss", - "result", - "result" - ], - [ - "long", - "a" - ], - [ - "total", - "70" - ], - [ - "held", - "on" - ], - [ - "clear", - "order", - "step", - "must" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "lack", - "result", - "without" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "future", - "will" - ], - [ - "experience", - "knowledge", - "learning", - "learning" - ], - [ - "left", - "back" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "space", - "space" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "clear", - "order", - "result", - "meant" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "methods", - "using" - ], - [ - "imaging", - "imaging" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "manner", - "specific", - "appropriate" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "matter", - "question", - "subject", - "subjects", - "subject" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_count_non_zero_voxels", - "docstring": "Count the number of non-zero voxels in an image file.\n\nIf you give it an image with more than 3 dimensions, it will summarize the 4th\n(or more) dimension to one voxel, and then find non-zero voxels over this.\nThis means that if there is at least one non-zero voxel in the 4th dimension,\nthis voxel of the 3D volume will be considered as non-zero.\n\nFormerly: scil_count_non_zero_voxels.py", - "help": "usage: scil_volume_count_non_zero_voxels.py [-h] [--out OUT_FILE] [--stats]\n [--id VALUE_ID]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n IN_FILE\n\nCount the number of non-zero voxels in an image file.\n\nIf you give it an image with more than 3 dimensions, it will summarize the 4th\n(or more) dimension to one voxel, and then find non-zero voxels over this.\nThis means that if there is at least one non-zero voxel in the 4th dimension,\nthis voxel of the 3D volume will be considered as non-zero.\n\nFormerly: scil_count_non_zero_voxels.py\n\npositional arguments:\n IN_FILE Input file name, in nifti format.\n\noptions:\n -h, --help show this help message and exit\n --out OUT_FILE Name of the output file, which will be saved as a text file.\n --stats If set, output the value using a stats format. Using this synthax will append\n a line to the output file, instead of creating a file with only one line.\n This is useful to create a file to be used as the source of data for a graph.\n Can be combined with --id\n --id VALUE_ID Id of the current count. If used, the value of this argument will be\n output (followed by a \":\") before the count value.\n Mostly useful with --stats.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "long", - "work", - "more" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "create", - "creating" - ], - [ - "meaning", - "name" - ], - [ - "work", - "find" - ], - [ - "order", - "set" - ], - [ - "question", - "argument" - ], - [ - "long", - "over" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "clear", - "left", - "out" - ], - [ - "voxel", - "voxel" - ], - [ - "tool", - "useful" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "result", - "followed" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "create" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "left", - "subsequently", - "then" - ], - [ - "methods", - "using" - ], - [ - "future", - "current" - ], - [ - "order", - "work", - "instead" - ], - [ - "long", - "than" - ], - [ - "left", - "before" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "clear", - "give" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "total", - "combined" - ], - [ - "clear", - "long", - "question", - "result", - "work", - "there" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "meaning", - "order", - "result", - "step", - "true", - "means" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_crop", - "docstring": "Crop a volume using a given or an automatically computed bounding box. If a\npreviously computed bounding box file is given, the cropping will be applied\nand the affine fixed accordingly.\n\nWarning: This works well on masked images (like with FSL-Bet) volumes since\nit's looking for non-zero data. Therefore, you should validate the results on\nother types of images that haven't been masked.\n\nFormerly: scil_crop_volume.py", - "help": "usage: scil_volume_crop.py [-h] [--ignore_voxel_size]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--input_bbox INPUT_BBOX | --output_bbox OUTPUT_BBOX]\n in_image out_image\n\nCrop a volume using a given or an automatically computed bounding box. If a\npreviously computed bounding box file is given, the cropping will be applied\nand the affine fixed accordingly.\n\nWarning: This works well on masked images (like with FSL-Bet) volumes since\nit's looking for non-zero data. Therefore, you should validate the results on\nother types of images that haven't been masked.\n\nFormerly: scil_crop_volume.py\n\npositional arguments:\n in_image Path of the nifti file to crop.\n out_image Path of the cropped nifti file to write.\n\noptions:\n -h, --help show this help message and exit\n --ignore_voxel_size Ignore voxel size compatibility test between input bounding box and data. Warning, use only if you know what you are doing.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --input_bbox INPUT_BBOX\n Path of the pickle file from which to take the bounding box to crop input file.\n --output_bbox OUTPUT_BBOX\n Path of the pickle file where to write the computed bounding box. (.pickle extension)\n", - "synonyms": [ - [ - "volume", - "volumes", - "volume" - ], - [ - "subsequently", - "previously" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "applied", - "applied" - ], - [ - "considered", - "are" - ], - [ - "considered", - "result", - "therefore" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "matter", - "question", - "thinking", - "true", - "know" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "methods", - "use" - ], - [ - "thinking", - "you" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "clear", - "order", - "step", - "work", - "take" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "clear", - "matter", - "question", - "thinking", - "true", - "view", - "work", - "what" - ], - [ - "considered", - "involved", - "work", - "been" - ], - [ - "methods", - "using" - ], - [ - "variety", - "work", - "other" - ], - [ - "thinking", - "work", - "working", - "doing" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "work", - "works" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "thinking", - "working", - "looking" - ], - [ - "considered", - "is" - ], - [ - "area", - "work", - "where" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "larger", - "size", - "size" - ], - [ - "result", - "results" - ], - [ - "volume", - "volumes", - "volumes" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "forms", - "specific", - "variety", - "types" - ], - [ - "considered", - "experience", - "large", - "long", - "result", - "variety", - "work", - "working", - "well" - ], - [ - "result", - "since" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_flip", - "docstring": "Flip the volume according to the specified axis.\n\nFormerly: scil_flip_volume.py", - "help": "usage: scil_volume_flip.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image dimension [dimension ...]\n\nFlip the volume according to the specified axis.\n\nFormerly: scil_flip_volume.py\n\npositional arguments:\n in_image Path of the input volume (nifti).\n out_image Path of the output volume (nifti).\n dimension The axes you want to flip. eg: to flip the x and y axes use: x y.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "thinking", - "you" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "work", - "and" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "methods", - "using" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "based", - "reported", - "according" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "is" - ], - [ - "methods", - "use" - ], - [ - "parameters", - "specified" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_math", - "docstring": "Performs an operation on a list of images. The supported operations are\nlisted below.\n\nThis script is loading all images in memory, will often crash after a few\nhundred images.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of images.\n> scil_volume_math.py multiplication img.nii.gz 10 mult_10.nii.gz\n\nFormerly: scil_image_math.py", - "help": "usage: scil_volume_math.py [-h] [--data_type DATA_TYPE] [--exclude_background]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference,concatenate,dilation,erosion,closing,opening,blur}\n in_args [in_args ...] out_image\n\nPerforms an operation on a list of images. The supported operations are\nlisted below.\n\nThis script is loading all images in memory, will often crash after a few\nhundred images.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of images.\n> scil_volume_math.py multiplication img.nii.gz 10 mult_10.nii.gz\n\nFormerly: scil_image_math.py\n\n lower_threshold: IMG THRESHOLD\n All values below the threshold will be set to zero.\n All values above the threshold will be set to one.\n \n upper_threshold: IMG THRESHOLD\n All values below the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_eq: IMG THRESHOLD\n All values below the threshold will be set to zero.\n All values above or equal the threshold will be set to one.\n \n upper_threshold_eq: IMG THRESHOLD\n All values below or equal the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_otsu: IMG\n All values below or equal to the Otsu threshold will be set to zero.\n All values above the Otsu threshold will be set to one.\n (Otsu's method is an algorithm to perform automatic image thresholding\n of the background.)\n \n upper_threshold_otsu: IMG\n All values below the Otsu threshold will be set to one.\n All values above or equal to the Otsu threshold will be set to zero.\n Equivalent to lower_threshold_otsu followed by an inversion.\n \n lower_clip: IMG THRESHOLD\n All values below the threshold will be set to threshold.\n \n upper_clip: IMG THRESHOLD\n All values above the threshold will be set to threshold.\n \n absolute_value: IMG\n All negative values will become positive.\n \n round: IMG\n Round all decimal values to the closest integer.\n \n ceil: IMG\n Ceil all decimal values to the next integer.\n \n floor: IMG\n Floor all decimal values to the previous integer.\n \n normalize_sum: IMG\n Normalize the image so the sum of all values is one.\n \n normalize_max: IMG\n Normalize the image so the maximum value is one.\n \n log_10: IMG\n Apply a log (base 10) to all non zeros values of an image.\n \n log_e: IMG\n Apply a natural log to all non zeros values of an image.\n \n convert: IMG\n Perform no operation, but simply change the data type.\n \n invert: IMG\n Operation on binary image to interchange 0s and 1s in a binary mask.\n \n addition: IMGs\n Add multiple images together.\n \n subtraction: IMG_1 IMG_2\n Subtract first image by the second (IMG_1 - IMG_2).\n \n multiplication: IMGs\n Multiply multiple images together (danger of underflow and overflow)\n \n division: IMG_1 IMG_2\n Divide first image by the second (danger of underflow and overflow)\n Ignore zeros values, excluded from the operation.\n \n mean: IMGs\n Compute the mean of images.\n If a single 4D image is provided, average along the last dimension.\n \n std: IMGs\n Compute the standard deviation average of multiple images.\n If a single 4D image is provided, compute the STD along the last\n dimension.\n \n correlation: IMGs\n Computes the correlation of the 3x3x3 neighborhood of each voxel, for\n all pair of input images. The final image is the average correlation\n (through all pairs).\n For a given pair of images\n - Background is considered as 0. May lead to very high correlations\n close to the border of the background regions, or very poor ones if the\n background in both images differ.\n - Images are zero-padded. For the same reason as higher, may lead to\n very high correlations if you have data close to the border of the\n image.\n - NaN values (if a voxel's neighborhood is entirely uniform; std 0) are\n replaced by\n - 0 if at least one neighborhood was entirely containing background.\n - 1 if the voxel's neighborhoods are uniform in both images\n - 0 if the voxel's neighborhoods is uniform in one image, but not\n the other.\n\n UPDATE AS OF VERSION 2.0: Random noise was previously added in the\n process to help avoid NaN values. Now replaced by either 0 or 1 as\n explained above.\n \n union: IMGs\n Operation on binary image to keep voxels, that are non-zero, in at\n least one file.\n \n intersection: IMGs\n Operation on binary image to keep the voxels, that are non-zero,\n are present in all files.\n \n difference: IMG_1 IMG_2\n Operation on binary image to keep voxels from the first file that are\n not in the second file (non-zeros).\n \n concatenate: IMGs\n Concatenate a list of 3D and 4D images into a single 4D image.\n \n dilation: IMG, VALUE\n Binary morphological operation to spatially extend the values of an\n image to their neighbors. VALUE is in voxels: an integer > 0.\n \n erosion: IMG, VALUE\n Binary morphological operation to spatially shrink the volume contained\n in a binary image. VALUE is in voxels: an integer > 0.\n \n closing: IMG, VALUE\n Binary morphological operation, dilation followed by an erosion.\n \n opening: IMG, VALUE\n Binary morphological operation, erosion followed by a dilation.\n \n blur: IMG, VALUE\n Apply a gaussian blur to a single image. VALUE is sigma, the standard\n deviation of the Gaussian kernel.\n \n\npositional arguments:\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference,concatenate,dilation,erosion,closing,opening,blur}\n The type of operation to be performed on the images.\n in_args The list of image files or parameters. Refer to each operation's documentation of the expected arguments.\n out_image Output image path.\n\noptions:\n -h, --help show this help message and exit\n --data_type DATA_TYPE\n Data type of the output image. Use the format: \n uint8, int16, int/float32, int/float64.\n --exclude_background Does not affect the background of the original images.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "positive", - "negative" - ], - [ - "memory", - "memory" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "subsequently", - "previously" - ], - [ - "methods", - "method" - ], - [ - "clear", - "left", - "long", - "result", - "work", - "but" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "clear", - "considered", - "highly", - "long", - "matter", - "true", - "work", - "very" - ], - [ - "left", - "into" - ], - [ - "working", - "together" - ], - [ - "long", - "work", - "working", - "now" - ], - [ - "possibility", - "danger" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "areas", - "region", - "regions", - "regions" - ], - [ - "positive", - "positive" - ], - [ - "methods", - "use" - ], - [ - "order", - "set" - ], - [ - "long", - "have" - ], - [ - "thinking", - "you" - ], - [ - "areas", - "neighborhoods" - ], - [ - "high", - "higher", - "increase", - "level", - "higher" - ], - [ - "meaning", - "refer" - ], - [ - "long", - "a" - ], - [ - "action", - "clear", - "future", - "result", - "step", - "change" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "applied", - "apply" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "methods", - "using" - ], - [ - "clear", - "considered", - "form", - "long", - "meaning", - "result", - "true", - "work", - "same" - ], - [ - "clear", - "lack", - "matter", - "possibility", - "question", - "result", - "thinking", - "true", - "reason" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "possibility", - "avoid" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "clear", - "result", - "work", - "that" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "result", - "followed" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "random", - "random" - ], - [ - "traditionally", - "often" - ], - [ - "threshold", - "threshold" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "last" - ], - [ - "held", - "in" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "average", - "average" - ], - [ - "clear", - "long", - "matter", - "result", - "work", - "so" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "considered", - "highly", - "considered" - ], - [ - "difference", - "difference" - ], - [ - "algorithm", - "algorithm" - ], - [ - "binary", - "binary" - ], - [ - "thinking", - "simply" - ], - [ - "variety", - "work", - "other" - ], - [ - "order", - "work", - "instead" - ], - [ - "left", - "after" - ], - [ - "process", - "processes", - "step", - "process" - ], - [ - "area", - "main", - "along" - ], - [ - "increase", - "expected" - ], - [ - "high", - "higher", - "level", - "high" - ], - [ - "considered", - "order", - "result", - "subject", - "given" - ], - [ - "supported", - "supported" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "result", - "may" - ], - [ - "considered", - "subsequently", - "was" - ], - [ - "long", - "result", - "work", - "both" - ], - [ - "individual", - "each" - ], - [ - "matter", - "question", - "does" - ], - [ - "level", - "above" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "considered", - "long", - "work", - "one" - ], - [ - "work", - "all" - ], - [ - "large", - "larger", - "variety", - "work", - "addition" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "clear", - "question", - "result", - "true", - "no" - ], - [ - "anatomical", - "similarity", - "morphological" - ], - [ - "future", - "held", - "step", - "next" - ], - [ - "area", - "neighborhood" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "large", - "work", - "some" - ], - [ - "large", - "long", - "few" - ], - [ - "considered", - "become" - ], - [ - "considered", - "specific", - "variety", - "such" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_remove_outliers_ransac", - "docstring": "Remove outliers from image using the RANSAC algorithm.\nThe RANSAC algorithm parameters are sensitive to the input data.\n\nNOTE: Current default parameters are tuned for ad/md/rd images only.\n\nFormerly: scil_remove_outliers_ransac.py", - "help": "usage: scil_volume_remove_outliers_ransac.py [-h] [--min_fit MIN_FIT]\n [--max_iter MAX_ITER]\n [--fit_thr FIT_THR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image\n\nRemove outliers from image using the RANSAC algorithm.\nThe RANSAC algorithm parameters are sensitive to the input data.\n\nNOTE: Current default parameters are tuned for ad/md/rd images only.\n\nFormerly: scil_remove_outliers_ransac.py\n\npositional arguments:\n in_image Nifti image.\n out_image Corrected Nifti image.\n\noptions:\n -h, --help show this help message and exit\n --min_fit MIN_FIT The minimum number of data values required to fit the model. [50]\n --max_iter MAX_ITER The maximum number of iterations allowed in the algorithm. [1000]\n --fit_thr FIT_THR Threshold value for determining when a data point fits a model. [0.01]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "total", - "number" - ], - [ - "order", - "required" - ], - [ - "clear", - "considered", - "long", - "result", - "work", - "only" - ], - [ - "work", - "and" - ], - [ - "considered", - "are" - ], - [ - "difference", - "point" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "image", - "images" - ], - [ - "long", - "a" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "threshold", - "threshold" - ], - [ - "held", - "in" - ], - [ - "parameter", - "parameters", - "parameters" - ], - [ - "analysis", - "data", - "database", - "tracking", - "data" - ], - [ - "total", - "50" - ], - [ - "algorithm", - "algorithm" - ], - [ - "methods", - "using" - ], - [ - "future", - "current" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "considered", - "is" - ], - [ - "order", - "allowed" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_resample", - "docstring": "Script to resample a dataset to match the resolution of another\nreference dataset or to the resolution specified as in argument.\n\nFormerly: scil_resample_volume.py", - "help": "usage: scil_volume_resample.py [-h]\n (--ref REF | --volume_size VOLUME_SIZE [VOLUME_SIZE ...] | --voxel_size VOXEL_SIZE [VOXEL_SIZE ...] | --iso_min)\n [--interp {nn,lin,quad,cubic}]\n [--enforce_dimensions]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image\n\nScript to resample a dataset to match the resolution of another\nreference dataset or to the resolution specified as in argument.\n\nFormerly: scil_resample_volume.py\n\npositional arguments:\n in_image Path of the input volume.\n out_image Path of the resampled volume.\n\noptions:\n -h, --help show this help message and exit\n --ref REF Reference volume to resample to.\n --volume_size VOLUME_SIZE [VOLUME_SIZE ...]\n Sets the size for the volume. If the value is set to is Y, it will resample to a shape of Y x Y x Y.\n --voxel_size VOXEL_SIZE [VOXEL_SIZE ...]\n Sets the voxel size. If the value is set to is Y, it will set a voxel size of Y x Y x Y.\n --iso_min Resample the volume to R x R x R with R being the smallest current voxel dimension.\n --interp {nn,lin,quad,cubic}\n Interpolation mode.\n nn: nearest neighbour\n lin: linear\n quad: quadratic\n cubic: cubic\n Defaults to linear\n --enforce_dimensions Enforce the reference volume dimension.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "volume", - "volumes", - "volume" - ], - [ - "work", - "and" - ], - [ - "considered", - "involved", - "being" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "order", - "set" - ], - [ - "question", - "argument" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "future", - "will" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "long", - "with" - ], - [ - "parameters", - "specified" - ], - [ - "methods", - "using" - ], - [ - "future", - "current" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "larger", - "size", - "size" - ], - [ - "left", - "another" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "shape", - "structure", - "shape" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_reshape_to_reference", - "docstring": "Reshape / reslice / resample *.nii or *.nii.gz using a reference.\nThis script can be used to align freesurfer/civet output, as .mgz,\nto the original input image.\n\n>>> scil_volume_reshape_to_reference.py wmparc.mgz t1.nii.gz wmparc_t1.nii.gz\\\n --interpolation nearest\n\nFormerly: scil_reshape_to_reference.py", - "help": "usage: scil_volume_reshape_to_reference.py [-h]\n [--interpolation {linear,nearest}]\n [--keep_dtype]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file in_ref_file out_file\n\nReshape / reslice / resample *.nii or *.nii.gz using a reference.\nThis script can be used to align freesurfer/civet output, as .mgz,\nto the original input image.\n\n>>> scil_volume_reshape_to_reference.py wmparc.mgz t1.nii.gz wmparc_t1.nii.gz\\\n --interpolation nearest\n\nFormerly: scil_reshape_to_reference.py\n\npositional arguments:\n in_file Path of the image (.nii or .mgz) to be reshaped.\n in_ref_file Path of the reference image (.nii).\n out_file Output filename of the reshaped image (.nii).\n\noptions:\n -h, --help show this help message and exit\n --interpolation {linear,nearest}\n Interpolation: \"linear\" or \"nearest\". [linear]\n --keep_dtype If True, keeps the data_type of the input image (in_file) when saving the output image (out_file).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "long", - "a" - ], - [ - "work", - "and" - ], - [ - "methods", - "using" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "meaning", - "true", - "true" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "considered", - "role", - "work", - "as" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "image", - "image" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "left", - "result", - "when" - ], - [ - "considered", - "is" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_stats_in_ROI", - "docstring": "Compute the statistics (mean, std) of scalar maps, which can represent\ndiffusion metrics, in a ROI. Prints the results.\n\nThe mask can either be a binary mask, or a weighting mask. If the mask is\na weighting mask it should either contain floats between 0 and 1 or should be\nnormalized with --normalize_weights. IMPORTANT: if the mask contains weights\n(and not 0 and 1 exclusively), the standard deviation will also be weighted.", - "help": "usage: scil_volume_stats_in_ROI.py [-h]\n (--metrics_dir dir | --metrics file [file ...])\n [--bin] [--normalize_weights]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_mask\n\nCompute the statistics (mean, std) of scalar maps, which can represent\ndiffusion metrics, in a ROI. Prints the results.\n\nThe mask can either be a binary mask, or a weighting mask. If the mask is\na weighting mask it should either contain floats between 0 and 1 or should be\nnormalized with --normalize_weights. IMPORTANT: if the mask contains weights\n(and not 0 and 1 exclusively), the standard deviation will also be weighted.\n\npositional arguments:\n in_mask Mask volume filename.\n Can be a binary mask or a weighted mask.\n\noptions:\n -h, --help show this help message and exit\n --bin If set, will consider every value of the mask higherthan 0 to be \n part of the mask (equivalent weighting for every voxel).\n --normalize_weights If set, the weights will be normalized to the [0,1] range.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics input options:\n --metrics_dir dir Name of the directory containing metrics files: we will \n load all nifti files.\n --metrics file [file ...]\n Metrics nifti filename. List of the names of the metrics file, \n in nifti format.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "volume", - "volumes", - "volume" - ], - [ - "based", - "form", - "result", - "which" - ], - [ - "work", - "and" - ], - [ - "work", - "also" - ], - [ - "create", - "experience", - "matter", - "thinking", - "true", - "sort" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "left", - "result", - "when" - ], - [ - "meaning", - "name" - ], - [ - "order", - "set" - ], - [ - "clear", - "thinking", - "work", - "we" - ], - [ - "long", - "a" - ], - [ - "voxel", - "voxel" - ], - [ - "held", - "on" - ], - [ - "represent", - "represent" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "diffusion", - "diffusion" - ], - [ - "future", - "will" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "held", - "in" - ], - [ - "clear", - "matter", - "order", - "question", - "step", - "work", - "should" - ], - [ - "long", - "with" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "using" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "result", - "results" - ], - [ - "weighted", - "weighted" - ], - [ - "difference", - "meaning", - "result", - "mean" - ], - [ - "work", - "all" - ], - [ - "area", - "main", - "work", - "part" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "clear", - "considered", - "long", - "matter", - "order", - "question", - "result", - "work", - "not" - ], - [ - "maps", - "maps" - ], - [ - "clear", - "considered", - "form", - "result", - "either" - ], - [ - "considered", - "possibility", - "question", - "step", - "consider" - ], - [ - "considered", - "key", - "work", - "important" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_stats_in_labels", - "docstring": "Computes the information from the input map for each cortical region\n(corresponding to an atlas).\n\nHint: For instance, this script could be useful if you have a seed map from a\nspecific bundle, to know from which regions it originated.\n\nFormerly: scil_compute_seed_by_labels.py", - "help": "usage: scil_volume_stats_in_labels.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_labels in_labels_lut in_map\n\nComputes the information from the input map for each cortical region\n(corresponding to an atlas).\n\nHint: For instance, this script could be useful if you have a seed map from a\nspecific bundle, to know from which regions it originated.\n\nFormerly: scil_compute_seed_by_labels.py\n\npositional arguments:\n in_labels Path of the input label file.\n in_labels_lut Path of the LUT file corresponding to labels,used to name the regions of interest.\n in_map Path of the input map file. Expecting a 3D file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "based", - "form", - "result", - "which" - ], - [ - "work", - "and" - ], - [ - "high", - "higher", - "highest", - "level", - "level" - ], - [ - "matter", - "question", - "thinking", - "true", - "know" - ], - [ - "left", - "from" - ], - [ - "left", - "result", - "when" - ], - [ - "areas", - "region", - "regions", - "regions" - ], - [ - "meaning", - "name" - ], - [ - "long", - "have" - ], - [ - "thinking", - "you" - ], - [ - "specific", - "specific" - ], - [ - "long", - "a" - ], - [ - "tool", - "useful" - ], - [ - "held", - "on" - ], - [ - "clear", - "long", - "main", - "result", - "the" - ], - [ - "higher", - "interest" - ], - [ - "clear", - "considered", - "form", - "long", - "result", - "true", - "work", - "this" - ], - [ - "future", - "work", - "working", - "for" - ], - [ - "data", - "knowledge", - "information" - ], - [ - "clear", - "considered", - "result", - "work", - "be" - ], - [ - "create", - "work", - "working", - "help" - ], - [ - "atlas", - "atlas" - ], - [ - "cortical", - "cortical" - ], - [ - "methods", - "using" - ], - [ - "contrast", - "specific", - "subject", - "instance" - ], - [ - "create", - "order", - "work", - "to" - ], - [ - "considered", - "is" - ], - [ - "clear", - "considered", - "result", - "work", - "it" - ], - [ - "individual", - "each" - ], - [ - "clear", - "matter", - "question", - "result", - "if" - ], - [ - "area", - "areas", - "region", - "regions", - "region" - ], - [ - "maps", - "map" - ], - [ - "bundles", - "bundle" - ], - [ - "clear", - "result", - "work", - "could" - ] - ], - "keywords": [] - } - ] -} \ No newline at end of file diff --git a/scilpy-bot-scripts/json_files/knowledge_base_word2vec.json b/scilpy-bot-scripts/json_files/knowledge_base_word2vec.json deleted file mode 100644 index b5d1f96bd..000000000 --- a/scilpy-bot-scripts/json_files/knowledge_base_word2vec.json +++ /dev/null @@ -1,9715 +0,0 @@ -{ - "scripts": [ - { - "name": "scil_NODDI_maps", - "docstring": "Compute NODDI [1] maps using AMICO.\nMulti-shell DWI necessary.\n\nFormerly: scil_compute_NODDI.py", - "help": "usage: scil_NODDI_maps.py [-h] [--mask MASK] [--out_dir OUT_DIR]\n [--tolerance tol] [--skip_b0_check]\n [--para_diff PARA_DIFF] [--iso_diff ISO_DIFF]\n [--lambda1 LAMBDA1] [--lambda2 LAMBDA2]\n [--save_kernels DIRECTORY | --load_kernels DIRECTORY]\n [--compute_only] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec\n\nCompute NODDI [1] maps using AMICO.\nMulti-shell DWI necessary.\n\nFormerly: scil_compute_NODDI.py\n\npositional arguments:\n in_dwi DWI file acquired with a NODDI compatible protocol (single-shell data not suited).\n in_bval b-values filename, in FSL format (.bval).\n in_bvec b-vectors filename, in FSL format (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Brain mask filename.\n --out_dir OUT_DIR Output directory for the NODDI results. [results]\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nModel options:\n --para_diff PARA_DIFF\n Axial diffusivity (AD) in the CC. [0.0017]\n --iso_diff ISO_DIFF Mean diffusivity (MD) in ventricles. [0.003]\n --lambda1 LAMBDA1 First regularization parameter. [0.5]\n --lambda2 LAMBDA2 Second regularization parameter. [0.001]\n\nKernels options:\n --save_kernels DIRECTORY\n Output directory for the COMMIT kernels.\n --load_kernels DIRECTORY\n Input directory where the COMMIT kernels are located.\n --compute_only Compute kernels only, --save_kernels must be used.\n\nReference:\n [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC.\n NODDI: practical in vivo neurite orientation dispersion\n and density imaging of the human brain.\n NeuroImage. 2012 Jul 16;61:1000-16.\n", - "synonyms": [ - [ - "human", - "human" - ], - [ - "axial", - "axial" - ], - [ - "orientation", - "orientation" - ], - [ - "imaging", - "imaging" - ], - [ - "high", - "high" - ], - [ - "maps", - "maps" - ], - [ - "parameter", - "parameter" - ], - [ - "Data", - "data", - "data" - ], - [ - "processes", - "processes" - ], - [ - "vivo", - "vivo" - ], - [ - "level", - "level" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_NODDI_priors", - "docstring": "Compute the axial (para_diff), radial (perp_diff), and mean (iso_diff)\ndiffusivity priors for NODDI.\n\nFormerly: scil_compute_NODDI_priors.py", - "help": "usage: scil_NODDI_priors.py [-h] [--fa_min_single_fiber FA_MIN_SINGLE_FIBER]\n [--fa_max_ventricles FA_MAX_VENTRICLES]\n [--md_min_ventricles MD_MIN_VENTRICLES]\n [--roi_radius ROI_RADIUS]\n [--roi_center pos pos pos]\n [--out_txt_1fiber_para FILE]\n [--out_txt_1fiber_perp FILE]\n [--out_mask_1fiber FILE]\n [--out_txt_ventricles FILE]\n [--out_mask_ventricles FILE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_FA in_AD in_RD in_MD\n\nCompute the axial (para_diff), radial (perp_diff), and mean (iso_diff)\ndiffusivity priors for NODDI.\n\nFormerly: scil_compute_NODDI_priors.py\n\npositional arguments:\n in_FA Path to the FA volume.\n in_AD Path to the axial diffusivity (AD) volume.\n in_RD Path to the radial diffusivity (RD) volume.\n in_MD Path to the mean diffusivity (MD) volume.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics options:\n --fa_min_single_fiber FA_MIN_SINGLE_FIBER\n Minimal threshold of FA (voxels above that threshold are considered in \n the single fiber mask). [0.7]\n --fa_max_ventricles FA_MAX_VENTRICLES\n Maximal threshold of FA (voxels under that threshold are considered in \n the ventricles). [0.1]\n --md_min_ventricles MD_MIN_VENTRICLES\n Minimal threshold of MD in mm2/s (voxels above that threshold are considered \n for in the ventricles). [0.003]\n\nRegions options:\n --roi_radius ROI_RADIUS\n Radius of the region used to estimate the priors. The roi will be a cube spanning \n from ROI_CENTER in each direction. [20]\n --roi_center pos pos pos\n Center of the roi of size roi_radius used to estimate the priors; a 3-value coordinate. \n If not set, uses the center of the 3D volume.\n\nOutputs:\n --out_txt_1fiber_para FILE\n Output path for the text file containing the single fiber average value of AD.\n If not set, the file will not be saved.\n --out_txt_1fiber_perp FILE\n Output path for the text file containing the single fiber average value of RD.\n If not set, the file will not be saved.\n --out_mask_1fiber FILE\n Output path for single fiber mask. If not set, the mask will not be saved.\n --out_txt_ventricles FILE\n Output path for the text file containing the ventricles average value of MD.\n If not set, the file will not be saved.\n --out_mask_ventricles FILE\n Output path for the ventricule mask.\n If not set, the mask will not be saved.\n\nReference:\n [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC.\n NODDI: practical in vivo neurite orientation dispersion and density\n imaging of the human brain. NeuroImage. 2012 Jul 16;61:1000-16.\n", - "synonyms": [ - [ - "region", - "regions", - "region" - ], - [ - "human", - "human" - ], - [ - "axial", - "axial" - ], - [ - "direction", - "direction" - ], - [ - "orientation", - "orientation" - ], - [ - "region", - "regions", - "regions" - ], - [ - "imaging", - "imaging" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "voxel", - "voxels" - ], - [ - "vivo", - "vivo" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "average", - "average" - ], - [ - "considered", - "considered" - ], - [ - "level", - "level" - ], - [ - "brain", - "brain" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_aodf_metrics", - "docstring": "Script to compute various metrics derivated from asymmetric ODF.\n\nThese metrics include the asymmetric peak directions image, a number of fiber\ndirections (nufid) map [1], the asymmetry index (ASI) map [2] and an odd-power\nmap [3].\n\nThe asymmetric peak directions image contains peaks per hemisphere, considering\nantipodal sphere directions as distinct. On a symmetric signal, the number of\nasymmetric peaks extracted is then twice the number of symmetric peaks.\n\nThe nufid map is the asymmetric alternative to NuFO maps. It counts the\nnumber of asymmetric peaks extracted and ranges in [0..N] with N the maximum\nnumber of peaks.\n\nThe asymmetric index is a cosine-based metric in the range [0..1], with 0\ncorresponding to a perfectly symmetric signal and 1 to a perfectly asymmetric\nsignal.\n\nThe odd-power map is also in the range [0..1], with 0 corresponding to a\nperfectly symmetric signal and 1 to a perfectly anti-symmetric signal. It is\ngiven as the ratio of the L2-norm of odd SH coefficients on the L2-norm of all\nSH coefficients.\n\nFormerly: scil_compute_asym_odf_metrics.py", - "help": "usage: scil_aodf_metrics.py [-h] [--mask MASK] [--asi_map ASI_MAP]\n [--odd_power_map ODD_POWER_MAP] [--peaks PEAKS]\n [--peak_values PEAK_VALUES]\n [--peak_indices PEAK_INDICES] [--nufid NUFID]\n [--not_all] [--at A_THRESHOLD] [--rt R_THRESHOLD]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--processes NBR]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh\n\nScript to compute various metrics derivated from asymmetric ODF.\n\nThese metrics include the asymmetric peak directions image, a number of fiber\ndirections (nufid) map [1], the asymmetry index (ASI) map [2] and an odd-power\nmap [3].\n\nThe asymmetric peak directions image contains peaks per hemisphere, considering\nantipodal sphere directions as distinct. On a symmetric signal, the number of\nasymmetric peaks extracted is then twice the number of symmetric peaks.\n\nThe nufid map is the asymmetric alternative to NuFO maps. It counts the\nnumber of asymmetric peaks extracted and ranges in [0..N] with N the maximum\nnumber of peaks.\n\nThe asymmetric index is a cosine-based metric in the range [0..1], with 0\ncorresponding to a perfectly symmetric signal and 1 to a perfectly asymmetric\nsignal.\n\nThe odd-power map is also in the range [0..1], with 0 corresponding to a\nperfectly symmetric signal and 1 to a perfectly anti-symmetric signal. It is\ngiven as the ratio of the L2-norm of odd SH coefficients on the L2-norm of all\nSH coefficients.\n\nFormerly: scil_compute_asym_odf_metrics.py\n\npositional arguments:\n in_sh Input SH image.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Optional mask.\n --asi_map ASI_MAP Output asymmetry index (ASI) map.\n --odd_power_map ODD_POWER_MAP\n Output odd power map.\n --peaks PEAKS Output filename for the extracted peaks.\n --peak_values PEAK_VALUES\n Output filename for the extracted peaks values.\n --peak_indices PEAK_INDICES\n Output filename for the generated peaks indices on the sphere.\n --nufid NUFID Output filename for the nufid file.\n --not_all If set, only saves the files specified using the file flags [False].\n --at A_THRESHOLD Absolute threshold on fODF amplitude. This value should be set to\n approximately 1.5 to 2 times the maximum fODF amplitude in isotropic voxels\n (ie. ventricles).\n Use scil_fodf_max_in_ventricles.py to find the maximal value.\n See [Dell'Acqua et al HBM 2013] [0.0].\n --rt R_THRESHOLD Relative threshold on fODF amplitude in percentage [0.1].\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Sphere to use for peak directions estimation [symmetric724].\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] C. Poirier and M. Descoteaux, \"Filtering Methods for Asymmetric ODFs:\nWhere and How Asymmetry Occurs in the White Matter.\" bioRxiv. 2022 Jan 1;\n2022.12.18.520881. doi: https://doi.org/10.1101/2022.12.18.520881\n\n[2] S. Cetin Karayumak, E. \u00d6zarslan, and G. Unal,\n\"Asymmetric Orientation Distribution Functions (AODFs) revealing intravoxel\ngeometry in diffusion MRI,\" Magnetic Resonance Imaging, vol. 49, pp. 145-158,\nJun. 2018, doi: https://doi.org/10.1016/j.mri.2018.03.006.\n\n[3] C. Poirier, E. St-Onge, and M. Descoteaux, \"Investigating the Occurence of\nAsymmetric Patterns in White Matter Fiber Orientation Distribution Functions\"\n[Abstract], In: Proc. Intl. Soc. Mag. Reson. Med. 29 (2021), 2021 May 15-20,\nVancouver, BC, Abstract number 0865.\n", - "synonyms": [ - [ - "variety", - "various" - ], - [ - "maps", - "map" - ], - [ - "white", - "white" - ], - [ - "orientation", - "orientation" - ], - [ - "imaging", - "imaging" - ], - [ - "image", - "image" - ], - [ - "diffusion", - "diffusion" - ], - [ - "maps", - "maps" - ], - [ - "signal", - "signal" - ], - [ - "processes", - "processes" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "voxel", - "voxels" - ], - [ - "based", - "based" - ], - [ - "occurrence", - "occurence" - ], - [ - "matter", - "matter" - ], - [ - "hemisphere", - "hemisphere" - ], - [ - "methods", - "methods" - ], - [ - "level", - "level" - ], - [ - "false", - "false" - ], - [ - "function", - "functions", - "functions" - ] - ], - "keywords": [] - }, - { - "name": "scil_bids_validate", - "docstring": "Create a json file from a BIDS dataset detailling all info\nneeded for tractoflow\n- DWI/rev_DWI\n- T1\n- fmap/sbref (based on IntendedFor entity)\n- Freesurfer (optional - could be one per participant\n or one per participant/session)\n\nThe BIDS dataset MUST be homogeneous.\nThe metadata need to be uniform across all participants/sessions/runs\n\nMandatory entity: IntendedFor\nSensitive entities: PhaseEncodingDirection, TotalReadoutTime, direction\n\nFormerly: scil_validate_bids.py", - "help": "usage: scil_bids_validate.py [-h] [--bids_ignore BIDS_IGNORE] [--fs FS]\n [--clean] [--readout READOUT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bids out_json\n\nCreate a json file from a BIDS dataset detailling all info\nneeded for tractoflow\n- DWI/rev_DWI\n- T1\n- fmap/sbref (based on IntendedFor entity)\n- Freesurfer (optional - could be one per participant\n or one per participant/session)\n\nThe BIDS dataset MUST be homogeneous.\nThe metadata need to be uniform across all participants/sessions/runs\n\nMandatory entity: IntendedFor\nSensitive entities: PhaseEncodingDirection, TotalReadoutTime, direction\n\nFormerly: scil_validate_bids.py\n\npositional arguments:\n in_bids Input BIDS folder.\n out_json Output json file.\n\noptions:\n -h, --help show this help message and exit\n --bids_ignore BIDS_IGNORE\n If you want to ignore some subjects or some files, you\n can provide an extra bidsignore file.Check:\n https://github.com/bids-standard/bids-\n validator#bidsignore\n --fs FS Output freesurfer path. It will add keys wmparc and\n aparc+aseg.\n --clean If set, it will remove all the participants that are\n missing any information.\n --readout READOUT Default total readout time value [0.062].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "participants", - "participants" - ], - [ - "direction", - "direction" - ], - [ - "create", - "create" - ], - [ - "participant", - "participant" - ], - [ - "subjects", - "subjects" - ], - [ - "based", - "based" - ], - [ - "level", - "level" - ], - [ - "total", - "total" - ] - ], - "keywords": [] - }, - { - "name": "scil_bingham_metrics", - "docstring": "Script to compute fODF lobe-specific metrics derived from a Bingham\ndistribution fit, as described in [1]. Resulting metrics are fiber density\n(FD), fiber spread (FS) and fiber fraction (FF) [2].\n\nThe Bingham coefficients volume comes from scil_fodf_to_bingham.py.\n\nA lobe's FD is the integral of the Bingham function on the sphere. It\nrepresents the density of fibers going through a given voxel for a given\nfODF lobe (fixel). A lobe's FS is the ratio of its FD on its maximum AFD. It\nis at its minimum for a sharp lobe and at its maximum for a wide lobe. A lobe's\nFF is the ratio of its FD on the total FD in the voxel.\n\nUsing 12 threads, the execution takes 10 minutes for FD estimation for a brain\nwith 1mm isotropic resolution. Other metrics take less than a second.\n\nFormerly: scil_compute_lobe_specific_fodf_metrics.py", - "help": "usage: scil_bingham_metrics.py [-h] [--out_fd OUT_FD] [--out_fs OUT_FS]\n [--out_ff OUT_FF] [--not_all] [--mask MASK]\n [--nbr_integration_steps NBR_INTEGRATION_STEPS]\n [-v [{DEBUG,INFO,WARNING}]] [--processes NBR]\n [-f]\n in_bingham\n\nScript to compute fODF lobe-specific metrics derived from a Bingham\ndistribution fit, as described in [1]. Resulting metrics are fiber density\n(FD), fiber spread (FS) and fiber fraction (FF) [2].\n\nThe Bingham coefficients volume comes from scil_fodf_to_bingham.py.\n\nA lobe's FD is the integral of the Bingham function on the sphere. It\nrepresents the density of fibers going through a given voxel for a given\nfODF lobe (fixel). A lobe's FS is the ratio of its FD on its maximum AFD. It\nis at its minimum for a sharp lobe and at its maximum for a wide lobe. A lobe's\nFF is the ratio of its FD on the total FD in the voxel.\n\nUsing 12 threads, the execution takes 10 minutes for FD estimation for a brain\nwith 1mm isotropic resolution. Other metrics take less than a second.\n\nFormerly: scil_compute_lobe_specific_fodf_metrics.py\n\npositional arguments:\n in_bingham Input Bingham nifti image.\n\noptions:\n -h, --help show this help message and exit\n --out_fd OUT_FD Path to output fiber density. [fd.nii.gz]\n --out_fs OUT_FS Path to output fiber spread. [fs.nii.gz]\n --out_ff OUT_FF Path to fiber fraction file. [ff.nii.gz]\n --not_all Do not compute all metrics. Then, please provide the output paths of the files you need.\n --mask MASK Optional mask image. Only voxels inside the mask are computed.\n --nbr_integration_steps NBR_INTEGRATION_STEPS\n Number of integration steps along the theta axis for fiber density estimation. [50]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -f Force overwriting of the output files.\n\n[1] T. W. Riffert, J. Schreiber, A. Anwander, and T. R. Kn\u00f6sche, \u201cBeyond\n fractional anisotropy: Extraction of bundle-specific structural metrics\n from crossing fiber models,\u201d NeuroImage, vol. 100, pp. 176-191, Oct. 2014,\n doi: 10.1016/j.neuroimage.2014.06.015.\n\n[2] J. Schreiber, T. Riffert, A. Anwander, and T. R. Kn\u00f6sche, \u201cPlausibility\n Tracking: A method to evaluate anatomical connectivity and microstructural\n properties along fiber pathways,\u201d NeuroImage, vol. 90, pp. 163-178, Apr.\n 2014, doi: 10.1016/j.neuroimage.2014.01.002.\n", - "synonyms": [ - [ - "lobe", - "lobe" - ], - [ - "methods", - "method" - ], - [ - "examine", - "evaluate" - ], - [ - "connectivity", - "connectivity" - ], - [ - "bundles", - "bundle" - ], - [ - "image", - "image" - ], - [ - "pathway", - "pathways", - "pathways" - ], - [ - "processes", - "processes" - ], - [ - "function", - "functions", - "function" - ], - [ - "voxel", - "voxels" - ], - [ - "integrating", - "integration" - ], - [ - "structural", - "structural" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "anatomical", - "anatomy", - "anatomical" - ], - [ - "voxel", - "voxel" - ], - [ - "tracking", - "tracking" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ], - [ - "total", - "total" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_btensor_metrics", - "docstring": "Script to compute microstructure metrics using the DIVIDE method. In order to\noperate, the script needs at leats two different types of b-tensor encodings.\nNote that custom encodings are not yet supported, so that only the linear\ntensor encoding (LTE, b_delta = 1), the planar tensor encoding\n(PTE, b_delta = -0.5), the spherical tensor encoding (STE, b_delta = 0) and\nthe cigar shape tensor encoding (b_delta = 0.5) are available. Moreover, all\nof `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names. Thus, this\nscript outputs the results from the DIVIDE fit or direct derivatives:\nmean diffusivity (MD), isotropic mean kurtosis (mk_i), anisotropic mean\nkurtosis (mk_a), total mean kurtosis (mk_t) and finally micro-FA (uFA).\nSpecific names can be specified using the\nfile flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output. The order parameter can also be computed from the uFA and a\nprecomputed FA, using separate input parameters.\n\n>>> scil_btensor_metrics.py --in_dwis LTE.nii.gz PTE.nii.gz STE.nii.gz\n --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs LTE.bvec PTE.bvec STE.bvec\n --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nIMPORTANT: If the script does not converge to a solution, it is probably due to\nnoise outside the brain. Thus, it is strongly recommanded to provide a brain\nmask with --mask.\n\nBased on Markus Nilsson, Filip Szczepankiewicz, Bj\u00f6rn Lampinen, Andr\u00e9 Ahlgren,\nJo\u00e3o P. de Almeida Martins, Samo Lasic, Carl-Fredrik Westin,\nand Daniel Topgaard. An open-source framework for analysis of multidimensional\ndiffusion MRI data implemented in MATLAB.\nProc. Intl. Soc. Mag. Reson. Med. (26), Paris, France, 2018.\n\nFormerly: scil_compute_divide.py", - "help": "usage: scil_btensor_metrics.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals\n IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS\n [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5}\n [{0,1,-0.5,0.5} ...] [--mask MASK]\n [--tolerance tol] [--skip_b0_check]\n [--fit_iters FIT_ITERS]\n [--random_iters RANDOM_ITERS]\n [--do_weight_bvals] [--do_weight_pa]\n [--do_multiple_s0] [--op OP] [--fa FA]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n [-f] [--not_all] [--md file] [--ufa file]\n [--mk_i file] [--mk_a file] [--mk_t file]\n\nScript to compute microstructure metrics using the DIVIDE method. In order to\noperate, the script needs at leats two different types of b-tensor encodings.\nNote that custom encodings are not yet supported, so that only the linear\ntensor encoding (LTE, b_delta = 1), the planar tensor encoding\n(PTE, b_delta = -0.5), the spherical tensor encoding (STE, b_delta = 0) and\nthe cigar shape tensor encoding (b_delta = 0.5) are available. Moreover, all\nof `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names. Thus, this\nscript outputs the results from the DIVIDE fit or direct derivatives:\nmean diffusivity (MD), isotropic mean kurtosis (mk_i), anisotropic mean\nkurtosis (mk_a), total mean kurtosis (mk_t) and finally micro-FA (uFA).\nSpecific names can be specified using the\nfile flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output. The order parameter can also be computed from the uFA and a\nprecomputed FA, using separate input parameters.\n\n>>> scil_btensor_metrics.py --in_dwis LTE.nii.gz PTE.nii.gz STE.nii.gz\n --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs LTE.bvec PTE.bvec STE.bvec\n --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nIMPORTANT: If the script does not converge to a solution, it is probably due to\nnoise outside the brain. Thus, it is strongly recommanded to provide a brain\nmask with --mask.\n\nBased on Markus Nilsson, Filip Szczepankiewicz, Bj\u00f6rn Lampinen, Andr\u00e9 Ahlgren,\nJo\u00e3o P. de Almeida Martins, Samo Lasic, Carl-Fredrik Westin,\nand Daniel Topgaard. An open-source framework for analysis of multidimensional\ndiffusion MRI data implemented in MATLAB.\nProc. Intl. Soc. Mag. Reson. Med. (26), Paris, France, 2018.\n\nFormerly: scil_compute_divide.py\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n Path to the input diffusion volume for each b-tensor encoding type.\n --in_bvals IN_BVALS [IN_BVALS ...]\n Path to the bval file, in FSL format, for each b-tensor encoding type.\n --in_bvecs IN_BVECS [IN_BVECS ...]\n Path to the bvec file, in FSL format, for each b-tensor encoding type.\n --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...]\n Value of b_delta for each b-tensor encoding type, in the same order as dwi, bval and bvec inputs.\n --mask MASK Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --fit_iters FIT_ITERS\n The number of time the gamma fit will be done [1]\n --random_iters RANDOM_ITERS\n The number of iterations for the initial parameters search. [50]\n --do_weight_bvals If set, does not do a weighting on the bvalues in the gamma fit.\n --do_weight_pa If set, does not do a powder averaging weighting in the gamma fit.\n --do_multiple_s0 If set, does not take into account multiple baseline signals.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --not_all If set, only saves the files specified using the file flags. (Default: False)\n\nOrder parameter (OP):\n --op OP Output filename for the order parameter. The OP will not be output if this is not given. Computation of the OP also requires a precomputed FA map (given using --fa).\n --fa FA Path to a FA map. Needed for calculating the OP.\n\nFile flags:\n --md file Output filename for the MD.\n --ufa file Output filename for the microscopic FA.\n --mk_i file Output filename for the isotropic mean kurtosis.\n --mk_a file Output filename for the anisotropic mean kurtosis.\n --mk_t file Output filename for the total mean kurtosis.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "methods", - "method" - ], - [ - "signal", - "signals" - ], - [ - "maps", - "map" - ], - [ - "level", - "level" - ], - [ - "supported", - "supported" - ], - [ - "high", - "high" - ], - [ - "diffusion", - "diffusion" - ], - [ - "parameter", - "parameter" - ], - [ - "Data", - "data", - "data" - ], - [ - "processes", - "processes" - ], - [ - "based", - "based" - ], - [ - "shape", - "shape" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "specific", - "specific" - ], - [ - "total", - "total" - ], - [ - "parameters", - "parameters" - ], - [ - "analysis", - "analysis" - ], - [ - "false", - "false" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_clean_qbx_clusters", - "docstring": "Render clusters sequentially to either accept or reject them based on\nvisual inspection. Useful for cleaning bundles for RBx, BST or for figures.\nThe VTK window does not handle well opacity of streamlines, this is a\nnormal rendering behavior.\nOften use in pair with scil_tractogram_qbx.py.\n\nKey mapping:\n- a/A: accept displayed clusters\n- r/R: reject displayed clusters\n- z/Z: Rewing one element\n- c/C: Stop rendering of the background concatenation of streamlines\n- q/Q: Early window exist, everything remaining will be rejected", - "help": "usage: scil_bundle_clean_qbx_clusters.py [-h]\n [--out_accepted_dir OUT_ACCEPTED_DIR]\n [--out_rejected_dir OUT_REJECTED_DIR]\n [--min_cluster_size MIN_CLUSTER_SIZE]\n [--background_opacity BACKGROUND_OPACITY]\n [--background_linewidth BACKGROUND_LINEWIDTH]\n [--clusters_linewidth CLUSTERS_LINEWIDTH]\n [--reference REFERENCE]\n [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...]\n out_accepted out_rejected\n\n Render clusters sequentially to either accept or reject them based on\n visual inspection. Useful for cleaning bundles for RBx, BST or for figures.\n The VTK window does not handle well opacity of streamlines, this is a\n normal rendering behavior.\n Often use in pair with scil_tractogram_qbx.py.\n\n Key mapping:\n - a/A: accept displayed clusters\n - r/R: reject displayed clusters\n - z/Z: Rewing one element\n - c/C: Stop rendering of the background concatenation of streamlines\n - q/Q: Early window exist, everything remaining will be rejected\n\npositional arguments:\n in_bundles List of the clusters filename.\n out_accepted Filename of the concatenated accepted clusters.\n out_rejected Filename of the concatenated rejected clusters.\n\noptions:\n -h, --help show this help message and exit\n --out_accepted_dir OUT_ACCEPTED_DIR\n Directory to save all accepted clusters separately.\n --out_rejected_dir OUT_REJECTED_DIR\n Directory to save all rejected clusters separately.\n --min_cluster_size MIN_CLUSTER_SIZE\n Minimum cluster size for consideration [1].Must be at least 1.\n --background_opacity BACKGROUND_OPACITY\n Opacity of the background streamlines.Keep low between 0 and 0.5 [0.1].\n --background_linewidth BACKGROUND_LINEWIDTH\n Linewidth of the background streamlines [1].\n --clusters_linewidth CLUSTERS_LINEWIDTH\n Linewidth of the current cluster [1].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "visual", - "visual" - ], - [ - "streamlines", - "streamlines" - ], - [ - "key", - "key" - ], - [ - "invalid", - "invalid" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "high", - "low" - ], - [ - "rendered", - "rendering", - "rendering" - ], - [ - "based", - "based" - ], - [ - "bundles", - "bundles" - ], - [ - "exist", - "exist" - ], - [ - "level", - "level" - ], - [ - "rendered", - "rendering", - "render" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_compute_centroid", - "docstring": "Compute a single bundle centroid, using an 'infinite' QuickBundles threshold.\n\nFormerly: scil_compute_centroid.py", - "help": "usage: scil_bundle_compute_centroid.py [-h] [--nb_points NB_POINTS]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_centroid\n\nCompute a single bundle centroid, using an 'infinite' QuickBundles threshold.\n\nFormerly: scil_compute_centroid.py\n\npositional arguments:\n in_bundle Fiber bundle file.\n out_centroid Output centroid streamline filename.\n\noptions:\n -h, --help show this help message and exit\n --nb_points NB_POINTS\n Number of points defining the centroid streamline[20].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "level", - "level" - ], - [ - "bundles", - "bundle" - ], - [ - "streamline", - "streamline" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_compute_endpoints_map", - "docstring": "Computes the endpoint map of a bundle. The endpoint map is simply a count of\nthe number of streamlines that start or end in each voxel.\n\nThe idea is to estimate the cortical area affected by the bundle (assuming\nstreamlines start/end in the cortex).\n\nNote: If the streamlines are not ordered the head/tail are random and not\nreally two coherent groups. Use the following script to order streamlines:\nscil_tractogram_uniformize_endpoints.py\n\nFormerly: scil_compute_endpoints_map.py", - "help": "usage: scil_bundle_compute_endpoints_map.py [-h] [--swap] [--binary]\n [--nb_points NB_POINTS]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle endpoints_map_head\n endpoints_map_tail\n\nComputes the endpoint map of a bundle. The endpoint map is simply a count of\nthe number of streamlines that start or end in each voxel.\n\nThe idea is to estimate the cortical area affected by the bundle (assuming\nstreamlines start/end in the cortex).\n\nNote: If the streamlines are not ordered the head/tail are random and not\nreally two coherent groups. Use the following script to order streamlines:\nscil_tractogram_uniformize_endpoints.py\n\nFormerly: scil_compute_endpoints_map.py\n\npositional arguments:\n in_bundle Fiber bundle filename.\n endpoints_map_head Output endpoints map head filename.\n endpoints_map_tail Output endpoints map tail filename.\n\noptions:\n -h, --help show this help message and exit\n --swap Swap head<->tail convention. Can be useful when the reference is not in RAS.\n --binary Save outputs as a binary mask instead of a heat map.\n --nb_points NB_POINTS\n Number of points to consider at the extremities of the streamlines. [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "streamlines", - "streamlines" - ], - [ - "maps", - "map" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "cortex", - "cortical", - "cortex" - ], - [ - "random", - "random" - ], - [ - "area", - "area" - ], - [ - "cortex", - "cortical", - "parietal", - "cortical" - ], - [ - "binary", - "binary" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_diameter", - "docstring": "Script to estimate the diameter of bundle(s) along their length.\nSee also scil_bundle_shape_measures.py, which prints a quick estimate of\nthe diameter (volume / length). The computation here is more complex and done\nfor each section of the bundle.\n\nThe script expects:\n- bundles with coherent endpoints from scil_tractogram_uniformize_endpoints.py\n- labels maps with around 5-50 points scil_bundle_label_map.py\n <5 is not enough, high risk of bad fit\n >50 is too much, high risk of bad fit\n- bundles that are close to a tube\n without major fanning in a single axis\n fanning is in 2 directions (uniform dispersion) good approximation\n\nThe scripts prints a JSON file with mean/std to be compatible with tractometry.\nWARNING: STD is in fact an ERROR measure from the fit and NOT an STD.\n\nSince the estimation and fit quality is not always intuitive for some bundles\nand the tube with varying diameter is not easy to color/visualize,\nthe script comes with its own VTK rendering to allow exploration of the data.\n(optional).\n\nFormerly: scil_estimate_bundles_diameter.py", - "help": "usage: scil_bundle_diameter.py [-h]\n [--fitting_func {lin_up,lin_down,exp,inv,log}]\n [--show_rendering | --save_rendering OUT_FOLDER]\n [--wireframe] [--error_coloring]\n [--width WIDTH] [--opacity OPACITY]\n [--win_dims WIDTH HEIGHT] [--background R G B]\n [--reference REFERENCE] [--indent INDENT]\n [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...] in_labels\n [in_labels ...]\n\nScript to estimate the diameter of bundle(s) along their length.\nSee also scil_bundle_shape_measures.py, which prints a quick estimate of\nthe diameter (volume / length). The computation here is more complex and done\nfor each section of the bundle.\n\nThe script expects:\n- bundles with coherent endpoints from scil_tractogram_uniformize_endpoints.py\n- labels maps with around 5-50 points scil_bundle_label_map.py\n <5 is not enough, high risk of bad fit\n >50 is too much, high risk of bad fit\n- bundles that are close to a tube\n without major fanning in a single axis\n fanning is in 2 directions (uniform dispersion) good approximation\n\nThe scripts prints a JSON file with mean/std to be compatible with tractometry.\nWARNING: STD is in fact an ERROR measure from the fit and NOT an STD.\n\nSince the estimation and fit quality is not always intuitive for some bundles\nand the tube with varying diameter is not easy to color/visualize,\nthe script comes with its own VTK rendering to allow exploration of the data.\n(optional).\n\nFormerly: scil_estimate_bundles_diameter.py\n\npositional arguments:\n in_bundles List of tractography files.\n in_labels List of labels maps that match the bundles.\n\noptions:\n -h, --help show this help message and exit\n --fitting_func {lin_up,lin_down,exp,inv,log}\n Function to weigh points using their distance.\n [Default: None]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nVisualization options:\n --show_rendering Display VTK window (optional).\n --save_rendering OUT_FOLDER\n Save VTK render in the specified folder (optional)\n --wireframe Use wireframe for the tube rendering.\n --error_coloring Use the fitting error to color the tube.\n --width WIDTH Width of tubes or lines representing streamlines\n [Default: 0.2]\n --opacity OPACITY Opacity for the streamlines rendered with the tube.\n [Default: 0.2]\n --win_dims WIDTH HEIGHT\n The dimensions for the vtk window. [(1920, 1080)]\n --background R G B RBG values [0, 255] of the color of the background.\n [Default: [1, 1, 1]]\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "high", - "high" - ], - [ - "rendered", - "rendering", - "rendering" - ], - [ - "maps", - "maps" - ], - [ - "rendered", - "rendering", - "rendered" - ], - [ - "Data", - "data", - "data" - ], - [ - "function", - "functions", - "function" - ], - [ - "tractography", - "tractography" - ], - [ - "bundles", - "bundles" - ], - [ - "complex", - "complex" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "level", - "level" - ], - [ - "error", - "error" - ], - [ - "exploration", - "exploration" - ], - [ - "rendered", - "rendering", - "render" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_filter_by_occurence", - "docstring": "Use multiple versions of a same bundle and detect the most probable voxels by\nusing a threshold on the occurence, voxel-wise. With threshold 0.5, this is\na majority vote. This is useful to generate an average representation from\nbundles of a given population.\n\nIf streamlines originate from the same tractogram (ex, to compare various\nbundle clustering techniques), streamline-wise vote is available to find the\nstreamlines most often included in the bundle.\n\nFormerly: scil_perform_majority_vote.py", - "help": "usage: scil_bundle_filter_by_occurence.py [-h] [--ratio_voxels [RATIO_VOXELS]]\n [--ratio_streamlines [RATIO_STREAMLINES]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...]\n output_prefix\n\nUse multiple versions of a same bundle and detect the most probable voxels by\nusing a threshold on the occurence, voxel-wise. With threshold 0.5, this is\na majority vote. This is useful to generate an average representation from\nbundles of a given population.\n\nIf streamlines originate from the same tractogram (ex, to compare various\nbundle clustering techniques), streamline-wise vote is available to find the\nstreamlines most often included in the bundle.\n\nFormerly: scil_perform_majority_vote.py\n\npositional arguments:\n in_bundles Input bundles filename(s). All tractograms must have identical headers.\n output_prefix Output prefix. Ex: my_path/voting_. The suffixes will be: streamlines.trk and voxels.nii.gz\n\noptions:\n -h, --help show this help message and exit\n --ratio_voxels [RATIO_VOXELS]\n Threshold on the ratio of bundles with at least one streamine in a \n given voxel to consider it as part of the 'gold standard'. Default if set: 0.5.\n --ratio_streamlines [RATIO_STREAMLINES]\n If all bundles come from the same tractogram, use this to generate \n a voting for streamlines too. The associated value is the threshold on the ratio of \n bundles including the streamline to consider it as part of the 'gold standard'. [0.5]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "variety", - "various" - ], - [ - "population", - "population" - ], - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "voxel", - "voxels" - ], - [ - "occurrence", - "occurence" - ], - [ - "bundles", - "bundles" - ], - [ - "methods", - "techniques" - ], - [ - "average", - "average" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_generate_priors", - "docstring": "Generation of priors and enhanced-FOD from an example/template bundle.\nThe bundle must have been cleaned thorougly before use. The E-FOD can then\nbe used for bundle-specific tractography, but not for FOD metrics.\n\nFormerly: scil_generate_priors_from_bundle.py", - "help": "usage: scil_bundle_generate_priors.py [-h]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--todi_sigma {0,1,2,3,4}]\n [--sf_threshold SF_THRESHOLD]\n [--out_prefix OUT_PREFIX]\n [--out_dir OUT_DIR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle in_fodf in_mask\n\nGeneration of priors and enhanced-FOD from an example/template bundle.\nThe bundle must have been cleaned thorougly before use. The E-FOD can then\nbe used for bundle-specific tractography, but not for FOD metrics.\n\nFormerly: scil_generate_priors_from_bundle.py\n\npositional arguments:\n in_bundle Input bundle filename.\n in_fodf Input FOD filename.\n in_mask Mask to constrain the TODI spatial smoothing,\n for example a WM mask.\n\noptions:\n -h, --help show this help message and exit\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --todi_sigma {0,1,2,3,4}\n Smooth the orientation histogram.\n --sf_threshold SF_THRESHOLD\n Relative threshold for sf masking (0.0-1.0).\n --out_prefix OUT_PREFIX\n Add a prefix to all output filename, \n default is no prefix.\n --out_dir OUT_DIR Output directory for all generated files,\n default is current directory.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Rheault, Francois, et al. \"Bundle-specific tractography with\n incorporated anatomical and orientational priors.\"\n NeuroImage 186 (2019): 382-398\n \n", - "synonyms": [ - [ - "spatial", - "spatial" - ], - [ - "orientation", - "orientation" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "tractography", - "tractography" - ], - [ - "anatomical", - "anatomy", - "anatomical" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_label_map", - "docstring": "Compute the label image (Nifti) from a centroid and tractograms (all\nrepresenting the same bundle). The label image represents the coverage of\nthe bundle, segmented into regions labelled from 0 to --nb_pts, starting from\nthe head, ending in the tail.\n\nEach voxel will have the label of its nearest centroid point.\n\nThe number of labels will be the same as the centroid's number of points.\n\nFormerly: scil_compute_bundle_voxel_label_map.py", - "help": "usage: scil_bundle_label_map.py [-h] [--nb_pts NB_PTS] [--colormap COLORMAP]\n [--new_labelling] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...] in_centroid\n out_dir\n\nCompute the label image (Nifti) from a centroid and tractograms (all\nrepresenting the same bundle). The label image represents the coverage of\nthe bundle, segmented into regions labelled from 0 to --nb_pts, starting from\nthe head, ending in the tail.\n\nEach voxel will have the label of its nearest centroid point.\n\nThe number of labels will be the same as the centroid's number of points.\n\nFormerly: scil_compute_bundle_voxel_label_map.py\n\npositional arguments:\n in_bundles Fiber bundle file.\n in_centroid Centroid streamline corresponding to bundle.\n out_dir Directory to save all mapping and coloring files:\n - correlation_map.nii.gz\n - session_x/labels_map.nii.gz\n - session_x/distance_map.nii.gz\n - session_x/correlation_map.nii.gz\n - session_x/labels.trk\n - session_x/distance.trk\n - session_x/correlation.trk\n Where session_x is numbered with each bundle.\n\noptions:\n -h, --help show this help message and exit\n --nb_pts NB_PTS Number of divisions for the bundles.\n Default is the number of points of the centroid.\n --colormap COLORMAP Select the colormap for colored trk (data_per_point) [jet].\n --new_labelling Use the new labelling method (multi-centroids).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "methods", - "method" - ], - [ - "streamline", - "streamline" - ], - [ - "region", - "regions", - "regions" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "image", - "image" - ], - [ - "bundles", - "bundles" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_mean_fixel_afd", - "docstring": "Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps along a bundle.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof the bundle provided, averaged at every voxel.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_fixel_afd_from_bundles.py", - "help": "usage: scil_bundle_mean_fixel_afd.py [-h] [--length_weighting]\n [--reference REFERENCE]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle in_fodf afd_mean_map\n\nCompute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps along a bundle.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof the bundle provided, averaged at every voxel.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_fixel_afd_from_bundles.py\n\npositional arguments:\n in_bundle Path of the bundle file.\n in_fodf Path of the fODF volume in spherical harmonics (SH).\n afd_mean_map Path of the output mean AFD map.\n\noptions:\n -h, --help show this help message and exit\n --length_weighting If set, will weigh the AFD values according to segment lengths. [False]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReference:\n [1] Raffelt, D., Tournier, JD., Rose, S., Ridgway, GR., Henderson, R.,\n Crozier, S., Salvado, O., & Connelly, A. (2012).\n Apparent Fibre Density: a novel measure for the analysis of\n diffusion-weighted magnetic resonance images. NeuroImage, 59(4),\n 3976--3994.\n", - "synonyms": [ - [ - "streamline", - "streamline" - ], - [ - "maps", - "map" - ], - [ - "weighted", - "weighted" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "diffusion", - "diffusion" - ], - [ - "maps", - "maps" - ], - [ - "based", - "based" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "false", - "false" - ], - [ - "analysis", - "analysis" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_mean_fixel_afd_from_hdf5", - "docstring": "Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps for every connections within a hdf5 (.h5) file.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof each connection, averaged at every voxel.\n\nPlease use a hdf5 (.h5) file containing decomposed connections\n\nFormerly: scil_compute_fixel_afd_from_hdf5.py", - "help": "usage: scil_bundle_mean_fixel_afd_from_hdf5.py [-h] [--length_weighting]\n [--processes NBR]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_hdf5 in_fodf out_hdf5\n\nCompute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF)\nmaps for every connections within a hdf5 (.h5) file.\n\nThis is the \"real\" fixel-based fODF amplitude along every streamline\nof each connection, averaged at every voxel.\n\nPlease use a hdf5 (.h5) file containing decomposed connections\n\nFormerly: scil_compute_fixel_afd_from_hdf5.py\n\npositional arguments:\n in_hdf5 HDF5 filename (.h5) containing decomposed connections.\n in_fodf Path of the fODF volume in spherical harmonics (SH).\n out_hdf5 Path of the output HDF5 filenames (.h5).\n\noptions:\n -h, --help show this help message and exit\n --length_weighting If set, will weigh the AFD values according to segment lengths. [False]\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReference:\n [1] Raffelt, D., Tournier, JD., Rose, S., Ridgway, GR., Henderson, R.,\n Crozier, S., Salvado, O., & Connelly, A. (2012).\n Apparent Fibre Density: a novel measure for the analysis of\n diffusion-weighted magnetic resonance images. NeuroImage,\n 59(4), 3976--3994.\n", - "synonyms": [ - [ - "streamline", - "streamline" - ], - [ - "weighted", - "weighted" - ], - [ - "diffusion", - "diffusion" - ], - [ - "maps", - "maps" - ], - [ - "processes", - "processes" - ], - [ - "connections", - "connections" - ], - [ - "based", - "based" - ], - [ - "connection", - "connection" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "false", - "false" - ], - [ - "analysis", - "analysis" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_mean_fixel_bingham_metric", - "docstring": "Given a bundle and Bingham coefficients, compute the average Bingham\nmetric at each voxel intersected by the bundle. Intersected voxels are\nfound by computing the intersection between the voxel grid and each streamline\nin the input tractogram.\n\nThis script behaves like scil_compute_mean_fixel_afd_from_bundles.py for fODFs,\nbut here for Bingham distributions. These add the unique possibility to capture\nfixel-based fiber spread (FS) and fiber fraction (FF). FD from the bingham\nshould be \"equivalent\" to the AFD_fixel we are used to.\n\nBingham coefficients volume must come from scil_fodf_to_bingham.py\nand Bingham metrics comes from scil_bingham_metrics.py.\n\nBingham metrics are extracted from Bingham distributions fitted to fODF. There\nare as many values per voxel as there are lobes extracted. The values chosen\nfor a given voxelis the one belonging to the lobe better aligned with the\ncurrent streamline segment.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_mean_fixel_obe_metric_from_bundles.py", - "help": "usage: scil_bundle_mean_fixel_bingham_metric.py [-h] [--length_weighting]\n [--max_theta MAX_THETA]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bundle in_bingham\n in_bingham_metric out_mean_map\n\nGiven a bundle and Bingham coefficients, compute the average Bingham\nmetric at each voxel intersected by the bundle. Intersected voxels are\nfound by computing the intersection between the voxel grid and each streamline\nin the input tractogram.\n\nThis script behaves like scil_compute_mean_fixel_afd_from_bundles.py for fODFs,\nbut here for Bingham distributions. These add the unique possibility to capture\nfixel-based fiber spread (FS) and fiber fraction (FF). FD from the bingham\nshould be \"equivalent\" to the AFD_fixel we are used to.\n\nBingham coefficients volume must come from scil_fodf_to_bingham.py\nand Bingham metrics comes from scil_bingham_metrics.py.\n\nBingham metrics are extracted from Bingham distributions fitted to fODF. There\nare as many values per voxel as there are lobes extracted. The values chosen\nfor a given voxelis the one belonging to the lobe better aligned with the\ncurrent streamline segment.\n\nPlease use a bundle file rather than a whole tractogram.\n\nFormerly: scil_compute_mean_fixel_obe_metric_from_bundles.py\n\npositional arguments:\n in_bundle Path of the bundle file.\n in_bingham Path of the Bingham volume.\n in_bingham_metric Path of the Bingham metric (FD, FS, or FF) volume.\n out_mean_map Path of the output mean map.\n\noptions:\n -h, --help show this help message and exit\n --length_weighting If set, will weigh the FD values according to segment lengths.\n --max_theta MAX_THETA\n Maximum angle (in degrees) condition on lobe alignment. [60]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "lobe", - "lobe" - ], - [ - "streamline", - "streamline" - ], - [ - "maps", - "map" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "possibility", - "possibility" - ], - [ - "unique", - "unique" - ], - [ - "voxel", - "voxels" - ], - [ - "lobes", - "lobes" - ], - [ - "based", - "based" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "average", - "average" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "intersected", - "intersected" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_mean_std", - "docstring": "Compute mean and std for each metric.\n\n- Default: For the whole bundle. This is achieved by averaging the metric\n values of all voxels occupied by the bundle.\n- Option --per_point: For all streamlines points in the bundle for each metric\n combination, along the bundle, i.e. for each point.\n **To create label_map and distance_map, see\n scil_bundle_label_map.py\n\nDensity weighting modifies the contribution of voxel with lower/higher\nstreamline count to reduce influence of spurious streamlines.\n\nFormerly: scil_compute_bundle_mean_std_per_point.py or\nscil_compute_bundle_mean_std.py", - "help": "usage: scil_bundle_mean_std.py [-h] [--per_point in_labels | --include_dps]\n [--density_weighting]\n [--distance_weighting DISTANCE_NII]\n [--correlation_weighting CORRELATION_NII]\n [--out_json OUT_JSON] [--reference REFERENCE]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n in_bundle in_metrics [in_metrics ...]\n\nCompute mean and std for each metric.\n\n- Default: For the whole bundle. This is achieved by averaging the metric\n values of all voxels occupied by the bundle.\n- Option --per_point: For all streamlines points in the bundle for each metric\n combination, along the bundle, i.e. for each point.\n **To create label_map and distance_map, see\n scil_bundle_label_map.py\n\nDensity weighting modifies the contribution of voxel with lower/higher\nstreamline count to reduce influence of spurious streamlines.\n\nFormerly: scil_compute_bundle_mean_std_per_point.py or\nscil_compute_bundle_mean_std.py\n\npositional arguments:\n in_bundle Fiber bundle file to compute statistics on.\n in_metrics Nifti file to compute statistics on. Probably some tractometry measure(s) such as FA, MD, RD, ...\n\noptions:\n -h, --help show this help message and exit\n --per_point in_labels\n If set, computes the metrics per point instead of on the whole bundle.\n You must then give the label map (.nii.gz) of the corresponding fiber bundle.\n --include_dps Save values from data_per_streamline.\n Currently not offered with option --per_point.\n --density_weighting If set, weights statistics by the number of fibers passing through each voxel.\n --distance_weighting DISTANCE_NII\n If set, weights statistics by the inverse of the distance between a streamline and the centroid.\n --correlation_weighting CORRELATION_NII\n If set, weight statistics by the correlation strength between longitudinal data.\n --out_json OUT_JSON Path of the output file. If not given, the output is simply printed on screen.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "maps", - "map" - ], - [ - "higher", - "lower" - ], - [ - "create", - "create" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "Data", - "data", - "data" - ], - [ - "voxel", - "voxels" - ], - [ - "longitudinal", - "longitudinal" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "higher", - "higher" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_pairwise_comparison", - "docstring": "Evaluate pair-wise similarity measures of bundles.\nAll tractograms must be in the same space (aligned to one reference).\n\nFor the voxel representation, the computed similarity measures are:\n bundle_adjacency_voxels, dice_voxels, w_dice_voxels, density_correlation\n volume_overlap, volume_overreach\nThe same measures are also evluated for the endpoints.\n\nFor the streamline representation, the computed similarity measures are:\n bundle_adjacency_streamlines, dice_streamlines, streamlines_count_overlap,\n streamlines_count_overreach\n\nFormerly: scil_evaluate_bundles_pairwise_agreement_measures.py", - "help": "usage: scil_bundle_pairwise_comparison.py [-h] [--streamline_dice]\n [--bundle_adjency_no_overlap]\n [--disable_streamline_distance]\n [--single_compare SINGLE_COMPARE]\n [--keep_tmp] [--ratio]\n [--processes NBR]\n [--reference REFERENCE]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...] out_json\n\nEvaluate pair-wise similarity measures of bundles.\nAll tractograms must be in the same space (aligned to one reference).\n\nFor the voxel representation, the computed similarity measures are:\n bundle_adjacency_voxels, dice_voxels, w_dice_voxels, density_correlation\n volume_overlap, volume_overreach\nThe same measures are also evluated for the endpoints.\n\nFor the streamline representation, the computed similarity measures are:\n bundle_adjacency_streamlines, dice_streamlines, streamlines_count_overlap,\n streamlines_count_overreach\n\nFormerly: scil_evaluate_bundles_pairwise_agreement_measures.py\n\npositional arguments:\n in_bundles Path of the input bundles.\n out_json Path of the output json file.\n\noptions:\n -h, --help show this help message and exit\n --streamline_dice Compute streamline-wise dice coefficient.\n Tractograms must be identical [False].\n --bundle_adjency_no_overlap\n If set, do not count zeros in the average BA.\n --disable_streamline_distance\n Will not compute the streamlines distance \n [False].\n --single_compare SINGLE_COMPARE\n Compare inputs to this single file.\n --keep_tmp Will not delete the tmp folder at the end.\n --ratio Compute overlap and overreach as a ratio over the\n reference tractogram in a Tractometer-style way.\n Can only be used if also using the `single_compare` option.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "similarity", - "similarity" - ], - [ - "examine", - "evaluate" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "space", - "space" - ], - [ - "processes", - "processes" - ], - [ - "bundles", - "bundles" - ], - [ - "average", - "average" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "false", - "false" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_reject_outliers", - "docstring": "Clean a bundle (inliers/outliers) using hiearchical clustering.\nhttp://archive.ismrm.org/2015/2844.html\n\nIf spurious streamlines are dense, it is possible they will not be recognized\nas outliers. Manual cleaning may be required to overcome this limitation.", - "help": "usage: scil_bundle_reject_outliers.py [-h]\n [--remaining_bundle REMAINING_BUNDLE]\n [--alpha ALPHA] [--display_counts]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_bundle\n\nClean a bundle (inliers/outliers) using hiearchical clustering.\nhttp://archive.ismrm.org/2015/2844.html\n\nIf spurious streamlines are dense, it is possible they will not be recognized\nas outliers. Manual cleaning may be required to overcome this limitation.\n\npositional arguments:\n in_bundle Fiber bundle file to remove outliers from.\n out_bundle Fiber bundle without outliers.\n\noptions:\n -h, --help show this help message and exit\n --remaining_bundle REMAINING_BUNDLE\n Removed outliers.\n --alpha ALPHA Percent of the length of the tree that clusters of individual streamlines will be pruned. [0.6]\n --display_counts Print streamline count before and after filtering\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "limitation", - "limitations", - "limitation" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "individual", - "individual" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_score_many_bundles_one_tractogram", - "docstring": "This script is intended to score all bundles from a single tractogram. Each\nvalid bundle is compared to its ground truth.\nEx: It was used for the ISMRM 2015 Challenge scoring.\n\nSee also scil_bundle_score_same_bundle_many_segmentations.py to score many\nversions of a same bundle, compared to ONE ground truth / gold standard.\n\nThis script is the second part of script scil_score_tractogram, which also\nsegments the wholebrain tractogram into bundles first.\n\nHere we suppose that the bundles are already segmented and saved as follows:\n main_dir/\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (optional)\n segmented_WPC/*_wpc.trk (optional)\n IS.trk OR NC.trk (if segmented_IB is present)\n\nConfig file\n-----------\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n }\n}\n\nFormerly: scil_score_bundles.py", - "help": "usage: scil_bundle_score_many_bundles_one_tractogram.py [-h] [--json_prefix p]\n [--gt_dir DIR]\n [--indent INDENT]\n [--sort_keys]\n [--reference REFERENCE]\n [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n gt_config bundles_dir\n\nThis script is intended to score all bundles from a single tractogram. Each\nvalid bundle is compared to its ground truth.\nEx: It was used for the ISMRM 2015 Challenge scoring.\n\nSee also scil_bundle_score_same_bundle_many_segmentations.py to score many\nversions of a same bundle, compared to ONE ground truth / gold standard.\n\nThis script is the second part of script scil_score_tractogram, which also\nsegments the wholebrain tractogram into bundles first.\n\nHere we suppose that the bundles are already segmented and saved as follows:\n main_dir/\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (optional)\n segmented_WPC/*_wpc.trk (optional)\n IS.trk OR NC.trk (if segmented_IB is present)\n\nConfig file\n-----------\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n }\n}\n\nFormerly: scil_score_bundles.py\n\nTractometry\n-----------\nGlobal connectivity metrics:\n\n- Computed by default:\n - VS: valid streamlines, belonging to a bundle (i.e. respecting all the\n criteria for that bundle; endpoints, limit_mask, gt_mask.).\n - IS: invalid streamlines. All other streamlines. IS = IC + NC.\n\n- Optional:\n - WPC: wrong path connections, streamlines connecting correct ROIs but not\n respecting the other criteria for that bundle. Such streamlines always\n exist but they are only saved separately if specified in the options.\n Else, they are merged back with the IS.\n By definition. WPC are only computed if \"limits masks\" are provided.\n - IC: invalid connections, streamlines joining an incorrect combination of\n ROIs. Use carefully, quality depends on the quality of your ROIs and no\n analysis is done on the shape of the streamlines.\n - NC: no connections. Invalid streamlines minus invalid connections.\n\n- Fidelity metrics:\n - OL: Overlap. Percentage of ground truth voxels containing streamline(s)\n for a given bundle.\n - OR: Overreach. Amount of voxels containing streamline(s) when they\n shouldn't, for a given bundle. We compute two versions :\n OR_pct_vs = divided by the total number of voxel covered by the bundle.\n (percentage of the voxels touched by VS).\n Values range between 0 and 100%. Values are not defined when we\n recovered no streamline for a bundle, but we set the OR_pct_vs to 0\n in that case.\n OR_pct_gt = divided by the total size of the ground truth bundle mask.\n Values could be higher than 100%.\n - f1 score: which is the same as the Dice score.\n\npositional arguments:\n gt_config .json dict configured as specified above.\n bundles_dir Directory containing all bundles.\n (Ex: Output directory for scil_score_tractogram).\n It is expected to contain a file IS.trk and \n files segmented_VB/*_VS.trk, with, possibly, files \n segmented_WPC/*_wpc.trk and segmented_IC/\n\noptions:\n -h, --help show this help message and exit\n --json_prefix p Prefix of the output json file. Ex: 'study_x_'.\n Suffix will be results.json. File will be saved inside bundles_dir.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nAdditions to gt_config:\n --gt_dir DIR Root path of the ground truth files listed in the gt_config.\n If not set, filenames in the config file are considered \n as absolute paths.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "connect", - "connecting", - "connects", - "connecting" - ], - [ - "streamline", - "streamline" - ], - [ - "connectivity", - "connectivity" - ], - [ - "invalid", - "invalid" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "global", - "global" - ], - [ - "bundles", - "bundle" - ], - [ - "connections", - "connections" - ], - [ - "voxel", - "voxels" - ], - [ - "result", - "result" - ], - [ - "valid", - "valid" - ], - [ - "bundles", - "bundles" - ], - [ - "exist", - "exist" - ], - [ - "shape", - "shape" - ], - [ - "considered", - "considered" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "total", - "total" - ], - [ - "higher", - "higher" - ], - [ - "analysis", - "analysis" - ], - [ - "defined", - "defined" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_score_same_bundle_many_segmentations", - "docstring": "This script is intended to score many versions of a same bundle, compared to\nONE ground truth / gold standard.\n\nSee also scil_bundle_score_many_bundles_one_tractogram.py to score all bundles\nfrom a single tractogram by comparing each valid bundle to its ground truth.\n\nAll tractograms must be in the same space (aligned to one reference).\nThe measures can be applied to a voxel-wise or streamline-wise representation.\n\nA gold standard must be provided for the desired representation.\nA gold standard would be a segmentation from an expert or a group of experts.\nIf only the streamline-wise representation is provided without a voxel-wise\ngold standard, it will be computed from the provided streamlines.\nAt least one of the two representations is required.\n\nThe gold standard tractogram is the tractogram (whole brain most likely) from\nwhich the segmentation is performed.\nThe gold standard tracking mask is the tracking mask used by the tractography\nalgorighm to generate the gold standard tractogram.\n\nThe computed binary classification measures are:\nsensitivity, specificity, precision, accuracy, dice, kappa, youden for both\nthe streamline and voxel representation (if provided).\n\nFormerly: scil_evaluate_bundles_binary_classification_measures.py", - "help": "usage: scil_bundle_score_same_bundle_many_segmentations.py [-h]\n [--streamlines_measures GOLD_STANDARD_STREAMLINES TRACTOGRAM]\n [--voxels_measures GOLD_STANDARD_MASK TRACKING MASK]\n [--processes NBR]\n [--reference REFERENCE]\n [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bundles\n [in_bundles ...]\n out_json\n\nThis script is intended to score many versions of a same bundle, compared to\nONE ground truth / gold standard.\n\nSee also scil_bundle_score_many_bundles_one_tractogram.py to score all bundles\nfrom a single tractogram by comparing each valid bundle to its ground truth.\n\nAll tractograms must be in the same space (aligned to one reference).\nThe measures can be applied to a voxel-wise or streamline-wise representation.\n\nA gold standard must be provided for the desired representation.\nA gold standard would be a segmentation from an expert or a group of experts.\nIf only the streamline-wise representation is provided without a voxel-wise\ngold standard, it will be computed from the provided streamlines.\nAt least one of the two representations is required.\n\nThe gold standard tractogram is the tractogram (whole brain most likely) from\nwhich the segmentation is performed.\nThe gold standard tracking mask is the tracking mask used by the tractography\nalgorighm to generate the gold standard tractogram.\n\nThe computed binary classification measures are:\nsensitivity, specificity, precision, accuracy, dice, kappa, youden for both\nthe streamline and voxel representation (if provided).\n\nFormerly: scil_evaluate_bundles_binary_classification_measures.py\n\npositional arguments:\n in_bundles Path of the input bundles.\n out_json Path of the output json.\n\noptions:\n -h, --help show this help message and exit\n --streamlines_measures GOLD_STANDARD_STREAMLINES TRACTOGRAM\n The gold standard bundle and the original tractogram.\n --voxels_measures GOLD_STANDARD_MASK TRACKING MASK\n The gold standard mask and the original tracking mask.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "precision", - "precision" - ], - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "space", - "space" - ], - [ - "processes", - "processes" - ], - [ - "valid", - "valid" - ], - [ - "tractography", - "tractography" - ], - [ - "bundles", - "bundles" - ], - [ - "applied", - "applied" - ], - [ - "binary", - "binary" - ], - [ - "voxel", - "voxel" - ], - [ - "tracking", - "tracking" - ], - [ - "level", - "level" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_shape_measures", - "docstring": "Evaluate basic measurements of bundle(s).\n\nThe computed measures are:\n - volume_info: volume, volume_endpoints\n - streamlines_info: streamlines_count, avg_length (in mm or in number of\n point), average step size, min_length, max_length.\n ** You may also get this information with scil_tractogram_print_info.py.\n - shape_info: span, curl, diameter, elongation, surface area,\n irregularity, end surface area, radius, end surface irregularity,\n mean_curvature, fractal dimension.\n ** The diameter, here, is a simple estimation using volume / length.\n For a more complex calculation, see scil_bundle_diameter.py.\n\nWith more than one bundle, the measures are averaged over bundles. All\ntractograms must be in the same space.\n\nThe set average contains the average measures of all input bundles. The\nmeasures that are dependent on the streamline count are weighted by the number\nof streamlines of each bundle. Each of these average measure is computed by\nfirst summing the multiple of a measure and the streamline count of each\nbundle and divide the sum by the total number of streamlines. Thus, measures\nincluding length and span are essentially averages of all the streamlines.\nOther streamline-related set measure are computed with other set averages.\nWhereas bundle-related measures are computed as an average of all bundles.\nThese measures include volume and surface area.\n\nThe fractal dimension is dependent on the voxel size and the number of voxels.\nIf data comparison is performed, the bundles MUST be in same resolution.\n\nFormerly: scil_compute_bundle_volume.py or\nscil_evaluate_bundles_individual_measures.py", - "help": "usage: scil_bundle_shape_measures.py [-h] [--out_json OUT_JSON]\n [--group_statistics] [--no_uniformize]\n [--reference REFERENCE] [--processes NBR]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundles [in_bundles ...]\n\nEvaluate basic measurements of bundle(s).\n\nThe computed measures are:\n - volume_info: volume, volume_endpoints\n - streamlines_info: streamlines_count, avg_length (in mm or in number of\n point), average step size, min_length, max_length.\n ** You may also get this information with scil_tractogram_print_info.py.\n - shape_info: span, curl, diameter, elongation, surface area,\n irregularity, end surface area, radius, end surface irregularity,\n mean_curvature, fractal dimension.\n ** The diameter, here, is a simple estimation using volume / length.\n For a more complex calculation, see scil_bundle_diameter.py.\n\nWith more than one bundle, the measures are averaged over bundles. All\ntractograms must be in the same space.\n\nThe set average contains the average measures of all input bundles. The\nmeasures that are dependent on the streamline count are weighted by the number\nof streamlines of each bundle. Each of these average measure is computed by\nfirst summing the multiple of a measure and the streamline count of each\nbundle and divide the sum by the total number of streamlines. Thus, measures\nincluding length and span are essentially averages of all the streamlines.\nOther streamline-related set measure are computed with other set averages.\nWhereas bundle-related measures are computed as an average of all bundles.\nThese measures include volume and surface area.\n\nThe fractal dimension is dependent on the voxel size and the number of voxels.\nIf data comparison is performed, the bundles MUST be in same resolution.\n\nFormerly: scil_compute_bundle_volume.py or\nscil_evaluate_bundles_individual_measures.py\n\npositional arguments:\n in_bundles Path of the input bundles.\n\noptions:\n -h, --help show this help message and exit\n --out_json OUT_JSON Path of the output file. If not given, the output is simply printed on screen.\n --group_statistics Show average measures [False].\n --no_uniformize Do NOT automatically uniformize endpoints for theendpoints related metrics.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\nReferences:\n[1] Fang-Cheng Yeh. 2020.\n Shape analysis of the human association pathways. NeuroImage.\n", - "synonyms": [ - [ - "human", - "human" - ], - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "weighted", - "weighted" - ], - [ - "examine", - "evaluate" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "association", - "association" - ], - [ - "space", - "space" - ], - [ - "pathway", - "pathways", - "pathways" - ], - [ - "Data", - "data", - "data" - ], - [ - "processes", - "processes" - ], - [ - "step", - "step" - ], - [ - "area", - "area" - ], - [ - "voxel", - "voxels" - ], - [ - "bundles", - "bundles" - ], - [ - "shape", - "shape" - ], - [ - "complex", - "complex" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "average", - "average" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "total", - "total" - ], - [ - "false", - "false" - ], - [ - "analysis", - "analysis" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_uniformize_endpoints", - "docstring": "Uniformize streamlines' endpoints according to a defined axis.\nUseful for tractometry or models creation.\n\nThe --auto option will automatically calculate the main orientation.\nIf the input bundle is poorly defined, it is possible heuristic will be wrong.\n\nThe default is to flip each streamline so their first point's coordinate in the\ndefined axis is smaller than their last point (--swap does the opposite).\n\nThe --target_roi option will use the barycenter of the target mask to define\nthe axis. The target mask can be a binary mask or an atlas. If an atlas is\nused, labels are expected in the form of --target_roi atlas.nii.gz 2 3 5:7.\n\nFormerly: scil_uniformize_streamlines_endpoints.py", - "help": "usage: scil_bundle_uniformize_endpoints.py [-h]\n (--axis {x,y,z} | --auto | --centroid tractogram | --target_roi TARGET_ROI [TARGET_ROI ...])\n [--swap] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_bundle\n\nUniformize streamlines' endpoints according to a defined axis.\nUseful for tractometry or models creation.\n\nThe --auto option will automatically calculate the main orientation.\nIf the input bundle is poorly defined, it is possible heuristic will be wrong.\n\nThe default is to flip each streamline so their first point's coordinate in the\ndefined axis is smaller than their last point (--swap does the opposite).\n\nThe --target_roi option will use the barycenter of the target mask to define\nthe axis. The target mask can be a binary mask or an atlas. If an atlas is\nused, labels are expected in the form of --target_roi atlas.nii.gz 2 3 5:7.\n\nFormerly: scil_uniformize_streamlines_endpoints.py\n\npositional arguments:\n in_bundle Input path of the tractography file.\n out_bundle Output path of the uniformized file.\n\noptions:\n -h, --help show this help message and exit\n --axis {x,y,z} Match endpoints of the streamlines along this axis.\n SUGGESTION: Commissural = x, Association = y, Projection = z\n --auto Match endpoints of the streamlines along an automatically determined axis.\n --centroid tractogram\n Match endpoints of the streamlines to align it to a reference unique streamline (centroid).\n --target_roi TARGET_ROI [TARGET_ROI ...]\n Provide a target ROI: either a binary mask or a label map and the labels to use.\n Will align heads to be closest to the mask barycenter.\n (atlas: if no labels are provided, all labels will be used.\n --swap Swap head <-> tail convention. Can be useful when the reference is not in RAS.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "maps", - "map" - ], - [ - "main", - "main" - ], - [ - "form", - "form" - ], - [ - "orientation", - "orientation" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "association", - "association" - ], - [ - "larger", - "smaller" - ], - [ - "unique", - "unique" - ], - [ - "projection", - "projection" - ], - [ - "atlas", - "atlas" - ], - [ - "tractography", - "tractography" - ], - [ - "binary", - "binary" - ], - [ - "level", - "level" - ], - [ - "defined", - "defined" - ] - ], - "keywords": [] - }, - { - "name": "scil_bundle_volume_per_label", - "docstring": "Compute bundle volume per label in mm3. This script supports anisotropic voxels\nresolution. Volume is estimated by counting the number of voxel occupied by\neach label and multiplying it by the volume of a single voxel.\n\nThe labels can be obtained by scil_bundle_label_map.py.\n\nThis estimation is typically performed at resolution around 1mm3.\n\nTo get the volume and other measures directly from the (whole) bundle, use\nscil_bundle_shape_measures.py.\n\nFormerly: scil_compute_bundle_volume_per_label.py", - "help": "usage: scil_bundle_volume_per_label.py [-h] [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n voxel_label_map bundle_name\n\nCompute bundle volume per label in mm3. This script supports anisotropic voxels\nresolution. Volume is estimated by counting the number of voxel occupied by\neach label and multiplying it by the volume of a single voxel.\n\nThe labels can be obtained by scil_bundle_label_map.py.\n\nThis estimation is typically performed at resolution around 1mm3.\n\nTo get the volume and other measures directly from the (whole) bundle, use\nscil_bundle_shape_measures.py.\n\nFormerly: scil_compute_bundle_volume_per_label.py\n\npositional arguments:\n voxel_label_map Fiber bundle file.\n bundle_name Bundle name.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "bundles", - "bundle" - ], - [ - "voxel", - "voxels" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_compare_populations", - "docstring": "Performs a network-based statistical comparison for populations g1 and g2. The\noutput is a matrix of the same size as the input connectivity matrices, with\np-values at each edge.\nAll input matrices must have the same shape (NxN). For paired t-test, both\ngroups must have the same number of observations.\n\nFor example, if you have streamline count weighted matrices for a MCI and a\ncontrol group and you want to investiguate differences in their connectomes:\n >>> scil_connectivity_compare_populations.py pval.npy\n --g1 MCI/*_sc.npy --g2 CTL/*_sc.npy\n\n--filtering_mask will simply multiply the binary mask to all input\nmatrices before performing the statistical comparison. Reduces the number of\nstatistical tests, useful when using --fdr or --bonferroni.\n\nFormerly: scil_compare_connectivity.py", - "help": "usage: scil_connectivity_compare_populations.py [-h] --in_g1 IN_G1 [IN_G1 ...]\n --in_g2 IN_G2 [IN_G2 ...]\n [--tail {left,right,both}]\n [--paired]\n [--fdr | --bonferroni]\n [--p_threshold THRESH OUT_FILE]\n [--filtering_mask FILTERING_MASK]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n out_pval_matrix\n\nPerforms a network-based statistical comparison for populations g1 and g2. The\noutput is a matrix of the same size as the input connectivity matrices, with\np-values at each edge.\nAll input matrices must have the same shape (NxN). For paired t-test, both\ngroups must have the same number of observations.\n\nFor example, if you have streamline count weighted matrices for a MCI and a\ncontrol group and you want to investiguate differences in their connectomes:\n >>> scil_connectivity_compare_populations.py pval.npy\n --g1 MCI/*_sc.npy --g2 CTL/*_sc.npy\n\n--filtering_mask will simply multiply the binary mask to all input\nmatrices before performing the statistical comparison. Reduces the number of\nstatistical tests, useful when using --fdr or --bonferroni.\n\nFormerly: scil_compare_connectivity.py\n\npositional arguments:\n out_pval_matrix Output matrix (.npy) containing the edges p-value.\n\noptions:\n -h, --help show this help message and exit\n --in_g1 IN_G1 [IN_G1 ...]\n List of matrices for the first population (.npy).\n --in_g2 IN_G2 [IN_G2 ...]\n List of matrices for the second population (.npy).\n --tail {left,right,both}\n Enables specification of an alternative hypothesis:\n left: mean of g1 < mean of g2,\n right: mean of g2 < mean of g1,\n both: both means are not equal (default).\n --paired Use paired sample t-test instead of population t-test.\n --in_g1 and --in_g2 must be ordered the same way.\n --fdr Perform a false discovery rate (FDR) correction for the p-values.\n Uses the number of non-zero edges as number of tests (value between 0.01 and 0.1).\n --bonferroni Perform a Bonferroni correction for the p-values.\n Uses the number of non-zero edges as number of tests.\n --p_threshold THRESH OUT_FILE\n Threshold the final p-value matrix and save the binary matrix (.npy).\n --filtering_mask FILTERING_MASK\n Binary filtering mask (.npy) to apply before computing the measures.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Rubinov, Mikail, and Olaf Sporns. \"Complex network measures of brain\n connectivity: uses and interpretations.\" Neuroimage 52.3 (2010):\n 1059-1069.\n[2] Zalesky, Andrew, Alex Fornito, and Edward T. Bullmore. \"Network-based\n statistic: identifying differences in brain networks.\" Neuroimage 53.4\n (2010): 1197-1207.\n", - "synonyms": [ - [ - "population", - "population" - ], - [ - "network", - "networks", - "network" - ], - [ - "streamline", - "streamline" - ], - [ - "weighted", - "weighted" - ], - [ - "network", - "networks", - "networks" - ], - [ - "differences", - "differences" - ], - [ - "connectivity", - "connectivity" - ], - [ - "matrices", - "matrices" - ], - [ - "applied", - "apply" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "left", - "left" - ], - [ - "based", - "based" - ], - [ - "population", - "populations" - ], - [ - "shape", - "shape" - ], - [ - "complex", - "complex" - ], - [ - "discovery", - "discovery" - ], - [ - "binary", - "binary" - ], - [ - "level", - "level" - ], - [ - "false", - "false" - ], - [ - "brain", - "brain" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_compute_matrices", - "docstring": "This script computes a variety of measures in the form of connectivity\nmatrices. This script is made to follow\nscil_tractogram_segment_bundles_for_connectivity.py and\nuses the same labels list as input.\n\nThe script expects a folder containing all relevants bundles following the\nnaming convention LABEL1_LABEL2.trk and a text file containing the list of\nlabels that should be part of the matrices. The ordering of labels in the\nmatrices will follow the same order as the list.\nThis script only generates matrices in the form of array, does not visualize\nor reorder the labels (node).\n\nThe parameter --similarity expects a folder with density maps\n(LABEL1_LABEL2.nii.gz) following the same naming convention as the input\ndirectory.\nThe bundles should be averaged version in the same space. This will\ncompute the weighted-dice between each node and their homologuous average\nversion.\n\nThe parameters --metrics can be used more than once and expect a map (t1, fa,\netc.) in the same space and each will generate a matrix. The average value in\nthe volume occupied by the bundle will be reported in the matrices nodes.\n\nThe parameters --maps can be used more than once and expect a folder with\npre-computed maps (LABEL1_LABEL2.nii.gz) following the same naming convention\nas the input directory. Each will generate a matrix. The average non-zeros\nvalue in the map will be reported in the matrices nodes.\n\nThe parameters --lesion_load will compute 3 lesion(s) related matrices:\nlesion_count.npy, lesion_vol.npy, lesion_sc.npy and put it inside of a\nspecified folder. They represent the number of lesion, the total volume of\nlesion(s) and the total of streamlines going through the lesion(s) for of each\nconnection. Each connection can be seen as a 'bundle' and then something\nsimilar to scil_analyse_lesion_load.py is run for each 'bundle'.\n\nFormerly: scil_compute_connectivity.py", - "help": "usage: scil_connectivity_compute_matrices.py [-h] [--volume OUT_FILE]\n [--streamline_count OUT_FILE]\n [--length OUT_FILE]\n [--similarity IN_FOLDER OUT_FILE]\n [--maps IN_FOLDER OUT_FILE]\n [--metrics IN_FILE OUT_FILE]\n [--lesion_load IN_FILE OUT_DIR]\n [--min_lesion_vol MIN_LESION_VOL]\n [--density_weighting]\n [--no_self_connection]\n [--include_dps OUT_DIR]\n [--force_labels_list FORCE_LABELS_LIST]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_hdf5 in_labels\n\nThis script computes a variety of measures in the form of connectivity\nmatrices. This script is made to follow\nscil_tractogram_segment_bundles_for_connectivity.py and\nuses the same labels list as input.\n\nThe script expects a folder containing all relevants bundles following the\nnaming convention LABEL1_LABEL2.trk and a text file containing the list of\nlabels that should be part of the matrices. The ordering of labels in the\nmatrices will follow the same order as the list.\nThis script only generates matrices in the form of array, does not visualize\nor reorder the labels (node).\n\nThe parameter --similarity expects a folder with density maps\n(LABEL1_LABEL2.nii.gz) following the same naming convention as the input\ndirectory.\nThe bundles should be averaged version in the same space. This will\ncompute the weighted-dice between each node and their homologuous average\nversion.\n\nThe parameters --metrics can be used more than once and expect a map (t1, fa,\netc.) in the same space and each will generate a matrix. The average value in\nthe volume occupied by the bundle will be reported in the matrices nodes.\n\nThe parameters --maps can be used more than once and expect a folder with\npre-computed maps (LABEL1_LABEL2.nii.gz) following the same naming convention\nas the input directory. Each will generate a matrix. The average non-zeros\nvalue in the map will be reported in the matrices nodes.\n\nThe parameters --lesion_load will compute 3 lesion(s) related matrices:\nlesion_count.npy, lesion_vol.npy, lesion_sc.npy and put it inside of a\nspecified folder. They represent the number of lesion, the total volume of\nlesion(s) and the total of streamlines going through the lesion(s) for of each\nconnection. Each connection can be seen as a 'bundle' and then something\nsimilar to scil_analyse_lesion_load.py is run for each 'bundle'.\n\nFormerly: scil_compute_connectivity.py\n\npositional arguments:\n in_hdf5 Input filename for the hdf5 container (.h5).\n Obtained from scil_tractogram_segment_bundles_for_connectivity.py.\n in_labels Labels file name (nifti).\n This generates a NxN connectivity matrix.\n\noptions:\n -h, --help show this help message and exit\n --volume OUT_FILE Output file for the volume weighted matrix (.npy).\n --streamline_count OUT_FILE\n Output file for the streamline count weighted matrix (.npy).\n --length OUT_FILE Output file for the length weighted matrix (.npy).\n --similarity IN_FOLDER OUT_FILE\n Input folder containing the averaged bundle density\n maps (.nii.gz) and output file for the similarity weighted matrix (.npy).\n --maps IN_FOLDER OUT_FILE\n Input folder containing pre-computed maps (.nii.gz)\n and output file for the weighted matrix (.npy).\n --metrics IN_FILE OUT_FILE\n Input (.nii.gz). and output file (.npy) for a metric weighted matrix.\n --lesion_load IN_FILE OUT_DIR\n Input binary mask (.nii.gz) and output directory for all lesion-related matrices.\n --min_lesion_vol MIN_LESION_VOL\n Minimum lesion volume in mm3 [7].\n --density_weighting Use density-weighting for the metric weightedmatrix.\n --no_self_connection Eliminate the diagonal from the matrices.\n --include_dps OUT_DIR\n Save matrices from data_per_streamline in the output directory.\n COMMIT-related values will be summed instead of averaged.\n Will always overwrite files.\n --force_labels_list FORCE_LABELS_LIST\n Path to a labels list (.txt) in case of missing labels in the atlas.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "reported", - "reported" - ], - [ - "order", - "order" - ], - [ - "streamlines", - "streamlines" - ], - [ - "represent", - "represent" - ], - [ - "streamline", - "streamline" - ], - [ - "maps", - "map" - ], - [ - "weighted", - "weighted" - ], - [ - "similarity", - "similarity" - ], - [ - "form", - "form" - ], - [ - "connectivity", - "connectivity" - ], - [ - "variety", - "variety" - ], - [ - "matrices", - "matrices" - ], - [ - "bundles", - "bundle" - ], - [ - "maps", - "maps" - ], - [ - "parameter", - "parameter" - ], - [ - "space", - "space" - ], - [ - "processes", - "processes" - ], - [ - "atlas", - "atlas" - ], - [ - "bundles", - "bundles" - ], - [ - "connection", - "connection" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "average", - "average" - ], - [ - "level", - "level" - ], - [ - "total", - "total" - ], - [ - "parameters", - "parameters" - ], - [ - "naming", - "naming" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_compute_pca", - "docstring": "Script to compute PCA analysis on diffusion metrics. Output returned is all\nsignificant principal components (e.g. presenting eigenvalues > 1) in a\nconnectivity matrix format. This script can take into account all edges from\nevery subject in a population or only non-zero edges across all subjects.\n\nThe script can take directly as input a connectoflow output folder. Simply use\nthe --input_connectoflow flag. For other type of folder input, the script\nexpects a single folder containing all matrices for all subjects.\nExample:\n [in_folder]\n |--- sub-01_ad.npy\n |--- sub-01_md.npy\n |--- sub-02_ad.npy\n |--- sub-02_md.npy\n |--- ...\n\nThe plots, tables and principal components matrices will be outputted in the\ndesignated folder from the argument. If you want to move back your\nprincipal components matrices in your connectoflow output, you can use a\nsimilar bash command for all principal components:\nfor sub in `cat list_id.txt`;\ndo\n cp out_folder/${sub}_PC1.npy connectoflow_output/$sub/Compute_Connectivity/\ndone\n\nInterpretation of resulting principal components can be done by evaluating the\nloadings values for each metrics. A value near 0 means that this metric doesn't\ncontribute to this specific component whereas high positive or negative values\nmean a larger contribution. Components can then be labeled based on which\nmetric contributes the highest. For example, a principal component showing a\nhigh loading for afd_fixel and near 0 loading for all other metrics can be\ninterpreted as axonal density (see Gagnon et al. 2022 for this specific example\nor ref [3] for an introduction to PCA).\n\nEXAMPLE USAGE:\nscil_connectivity_compute_pca.py input_folder/ output_folder/\n --metrics ad fa md rd [...] --list_ids list_ids.txt", - "help": "usage: scil_connectivity_compute_pca.py [-h] --metrics METRICS [METRICS ...]\n --list_ids FILE [--not_only_common]\n [--input_connectoflow]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_folder out_folder\n\nScript to compute PCA analysis on diffusion metrics. Output returned is all\nsignificant principal components (e.g. presenting eigenvalues > 1) in a\nconnectivity matrix format. This script can take into account all edges from\nevery subject in a population or only non-zero edges across all subjects.\n\nThe script can take directly as input a connectoflow output folder. Simply use\nthe --input_connectoflow flag. For other type of folder input, the script\nexpects a single folder containing all matrices for all subjects.\nExample:\n [in_folder]\n |--- sub-01_ad.npy\n |--- sub-01_md.npy\n |--- sub-02_ad.npy\n |--- sub-02_md.npy\n |--- ...\n\nThe plots, tables and principal components matrices will be outputted in the\ndesignated folder from the argument. If you want to move back your\nprincipal components matrices in your connectoflow output, you can use a\nsimilar bash command for all principal components:\nfor sub in `cat list_id.txt`;\ndo\n cp out_folder/${sub}_PC1.npy connectoflow_output/$sub/Compute_Connectivity/\ndone\n\nInterpretation of resulting principal components can be done by evaluating the\nloadings values for each metrics. A value near 0 means that this metric doesn't\ncontribute to this specific component whereas high positive or negative values\nmean a larger contribution. Components can then be labeled based on which\nmetric contributes the highest. For example, a principal component showing a\nhigh loading for afd_fixel and near 0 loading for all other metrics can be\ninterpreted as axonal density (see Gagnon et al. 2022 for this specific example\nor ref [3] for an introduction to PCA).\n\nEXAMPLE USAGE:\nscil_connectivity_compute_pca.py input_folder/ output_folder/\n --metrics ad fa md rd [...] --list_ids list_ids.txt\n\npositional arguments:\n in_folder Path to the input folder. See explanation above for its expected organization.\n out_folder Path to the output folder to export graphs, tables and principal \n components matrices.\n\noptions:\n -h, --help show this help message and exit\n --metrics METRICS [METRICS ...]\n Suffixes of all metrics to include in PCA analysis (ex: ad md fa rd). \n They must be immediately followed by the .npy extension.\n --list_ids FILE Path to a .txt file containing a list of all ids.\n --not_only_common If true, will include all edges from all subjects and not only \n common edges (Not recommended)\n --input_connectoflow If true, script will assume the input folder is a Connectoflow output.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Chamberland M, Raven EP, Genc S, Duffy K, Descoteaux M, Parker GD, Tax CMW,\n Jones DK. Dimensionality reduction of diffusion MRI measures for improved\n tractometry of the human brain. Neuroimage. 2019 Oct 15;200:89-100.\n doi: 10.1016/j.neuroimage.2019.06.020. Epub 2019 Jun 20. PMID: 31228638;\n PMCID: PMC6711466.\n[2] Gagnon A., Grenier G., Bocti C., Gillet V., Lepage J.-F., Baccarelli A. A.,\n Posner J., Descoteaux M., Takser L. (2022). White matter microstructural\n variability linked to differential attentional skills and impulsive behavior\n in a pediatric population. Cerebral Cortex.\n https://doi.org/10.1093/cercor/bhac180\n[3] https://towardsdatascience.com/what-are-pca-loadings-and-biplots-9a7897f2e559\n \n", - "synonyms": [ - [ - "human", - "human" - ], - [ - "population", - "population" - ], - [ - "principal", - "principal" - ], - [ - "subject", - "subject" - ], - [ - "connectivity", - "connectivity" - ], - [ - "white", - "white" - ], - [ - "larger", - "larger" - ], - [ - "positive", - "negative" - ], - [ - "matrices", - "matrices" - ], - [ - "high", - "high" - ], - [ - "diffusion", - "diffusion" - ], - [ - "cortex", - "cortical", - "cortex" - ], - [ - "variability", - "variability" - ], - [ - "positive", - "positive" - ], - [ - "true", - "true" - ], - [ - "subjects", - "subjects" - ], - [ - "based", - "based" - ], - [ - "matter", - "matter" - ], - [ - "specific", - "specific" - ], - [ - "level", - "level" - ], - [ - "axonal", - "axonal" - ], - [ - "analysis", - "analysis" - ], - [ - "highest", - "highest" - ], - [ - "brain", - "brain" - ], - [ - "presented", - "presenting" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_filter", - "docstring": "Script to facilitate filtering of connectivity matrices.\nThe same could be achieved through a complex sequence of\nscil_connectivity_math.py.\n\nCan be used with any connectivity matrix from\nscil_connectivity_compute_matrices.py.\n\nFor example, a simple filtering (Jasmeen style) would be:\nscil_connectivity_filter.py out_mask.npy\n --greater_than */sc.npy 1 0.90\n --lower_than */sim.npy 2 0.90\n --greater_than */len.npy 40 0.90 -v;\n\nThis will result in a binary mask where each node with a value of 1 represents\na node with at least 90% of the population having at least 1 streamline,\n90% of the population is similar to the average (2mm) and 90% of the\npopulation having at least 40mm of average streamlines length.\n\nAll operation are stricly > or <, there is no >= or <=.\n\n--greater_than or --lower_than expect the same convention:\n MATRICES_LIST VALUE_THR POPULATION_PERC\nIt is strongly recommended (but not enforced) that the same number of\nconnectivity matrices is used for each condition.\n\nThis script performs an intersection of all conditions, meaning that all\nconditions must be met in order not to be filtered.\nIf the user wants to manually handle the requirements, --keep_condition_count\ncan be used and manually binarized using scil_connectivity_math.py\n\nFormerly: scil_filter_connectivity.py", - "help": "usage: scil_connectivity_filter.py [-h] [--lower_than [LOWER_THAN ...]]\n [--greater_than [GREATER_THAN ...]]\n [--keep_condition_count] [--inverse_mask]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_matrix_mask\n\nScript to facilitate filtering of connectivity matrices.\nThe same could be achieved through a complex sequence of\nscil_connectivity_math.py.\n\nCan be used with any connectivity matrix from\nscil_connectivity_compute_matrices.py.\n\nFor example, a simple filtering (Jasmeen style) would be:\nscil_connectivity_filter.py out_mask.npy\n --greater_than */sc.npy 1 0.90\n --lower_than */sim.npy 2 0.90\n --greater_than */len.npy 40 0.90 -v;\n\nThis will result in a binary mask where each node with a value of 1 represents\na node with at least 90% of the population having at least 1 streamline,\n90% of the population is similar to the average (2mm) and 90% of the\npopulation having at least 40mm of average streamlines length.\n\nAll operation are stricly > or <, there is no >= or <=.\n\n--greater_than or --lower_than expect the same convention:\n MATRICES_LIST VALUE_THR POPULATION_PERC\nIt is strongly recommended (but not enforced) that the same number of\nconnectivity matrices is used for each condition.\n\nThis script performs an intersection of all conditions, meaning that all\nconditions must be met in order not to be filtered.\nIf the user wants to manually handle the requirements, --keep_condition_count\ncan be used and manually binarized using scil_connectivity_math.py\n\nFormerly: scil_filter_connectivity.py\n\npositional arguments:\n out_matrix_mask Output mask (matrix) resulting from the provided conditions (.npy).\n\noptions:\n -h, --help show this help message and exit\n --lower_than [LOWER_THAN ...]\n Lower than condition using the VALUE_THR in at least POPULATION_PERC (from MATRICES_LIST).\n See description for more details.\n --greater_than [GREATER_THAN ...]\n Greater than condition using the VALUE_THR in at least POPULATION_PERC (from MATRICES_LIST).\n See description for more details.\n --keep_condition_count\n Report the number of condition(s) that pass/fail rather than a binary mask.\n --inverse_mask Inverse the final mask. 0 where all conditions are respected and 1 where at least one fail.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "population", - "population" - ], - [ - "greater", - "greater" - ], - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "higher", - "lower" - ], - [ - "meaning", - "meaning" - ], - [ - "connectivity", - "connectivity" - ], - [ - "matrices", - "matrices" - ], - [ - "pass", - "pass" - ], - [ - "conditions", - "conditions" - ], - [ - "result", - "result" - ], - [ - "complex", - "complex" - ], - [ - "binary", - "binary" - ], - [ - "average", - "average" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_graph_measures", - "docstring": "Evaluate graph theory measures from connectivity matrices.\nA length weighted and a streamline count weighted matrix are required since\nsome measures require one or the other.\n\nThis script evaluates the measures one subject at the time. To generate a\npopulation dictionary (similarly to other scil_connectivity_*.py scripts), use\nthe --append_json option as well as using the same output filename.\n>>> for i in hcp/*/; do scil_connectivity_graph_measures.py ${i}/sc_prob.npy\n ${i}/len_prob.npy hcp_prob.json --append_json --avg_node_wise; done\n\nSome measures output one value per node, the default behavior is to list\nthem all into a list. To obtain only the average use the\n--avg_node_wise option.\n\nThe computed connectivity measures are:\ncentrality, modularity, assortativity, participation, clustering,\nnodal_strength, local_efficiency, global_efficiency, density, rich_club,\npath_length, edge_count, omega, sigma\n\nFor more details about the measures, please refer to\n- https://sites.google.com/site/bctnet/measures\n- https://github.com/aestrivex/bctpy/wiki\n\nThis script is under the GNU GPLv3 license, for more detail please refer to\nhttps://www.gnu.org/licenses/gpl-3.0.en.html\n\nFormerly: scil_evaluate_connectivity_graph_measures.py", - "help": "usage: scil_connectivity_graph_measures.py [-h]\n [--filtering_mask FILTERING_MASK]\n [--avg_node_wise] [--append_json]\n [--small_world] [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_conn_matrix in_length_matrix\n out_json\n\nEvaluate graph theory measures from connectivity matrices.\nA length weighted and a streamline count weighted matrix are required since\nsome measures require one or the other.\n\nThis script evaluates the measures one subject at the time. To generate a\npopulation dictionary (similarly to other scil_connectivity_*.py scripts), use\nthe --append_json option as well as using the same output filename.\n>>> for i in hcp/*/; do scil_connectivity_graph_measures.py ${i}/sc_prob.npy\n ${i}/len_prob.npy hcp_prob.json --append_json --avg_node_wise; done\n\nSome measures output one value per node, the default behavior is to list\nthem all into a list. To obtain only the average use the\n--avg_node_wise option.\n\nThe computed connectivity measures are:\ncentrality, modularity, assortativity, participation, clustering,\nnodal_strength, local_efficiency, global_efficiency, density, rich_club,\npath_length, edge_count, omega, sigma\n\nFor more details about the measures, please refer to\n- https://sites.google.com/site/bctnet/measures\n- https://github.com/aestrivex/bctpy/wiki\n\nThis script is under the GNU GPLv3 license, for more detail please refer to\nhttps://www.gnu.org/licenses/gpl-3.0.en.html\n\nFormerly: scil_evaluate_connectivity_graph_measures.py\n\npositional arguments:\n in_conn_matrix Input connectivity matrix (.npy).\n Typically a streamline count weighted matrix.\n in_length_matrix Input length weighted matrix (.npy).\n out_json Path of the output json.\n\noptions:\n -h, --help show this help message and exit\n --filtering_mask FILTERING_MASK\n Binary filtering mask to apply before computing the measures.\n --avg_node_wise Return a single value for node-wise measures.\n --append_json If the file already exists, will append to the dictionary.\n --small_world Compute measure related to small worldness (omega and sigma).\n This option is much slower.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\n[1] Rubinov, Mikail, and Olaf Sporns. \"Complex network measures of brain\n connectivity: uses and interpretations.\" Neuroimage 52.3 (2010):\n 1059-1069.\n", - "synonyms": [ - [ - "population", - "population" - ], - [ - "streamline", - "streamline" - ], - [ - "network", - "networks", - "network" - ], - [ - "weighted", - "weighted" - ], - [ - "subject", - "subject" - ], - [ - "examine", - "evaluate" - ], - [ - "connectivity", - "connectivity" - ], - [ - "matrices", - "matrices" - ], - [ - "applied", - "apply" - ], - [ - "large", - "small" - ], - [ - "complex", - "complex" - ], - [ - "binary", - "binary" - ], - [ - "average", - "average" - ], - [ - "level", - "level" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_hdf5_average_density_map", - "docstring": "Compute a density map for each connection from a hdf5 file.\nTypically use after scil_tractogram_segment_bundles_for_connectivity.py in\norder to obtain the average density map of each connection to allow the use\nof --similarity in scil_connectivity_compute_matrices.py.\n\nThis script is parallelized, but will run much slower on non-SSD if too many\nprocesses are used. The output is a directory containing the thousands of\nconnections:\nout_dir/\n |-- LABEL1_LABEL1.nii.gz\n |-- LABEL1_LABEL2.nii.gz\n |-- [...]\n |-- LABEL90_LABEL90.nii.gz\n\nFormerly: scil_compute_hdf5_average_density_map.py", - "help": "usage: scil_connectivity_hdf5_average_density_map.py [-h] [--binary]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_hdf5 [in_hdf5 ...]\n out_dir\n\nCompute a density map for each connection from a hdf5 file.\nTypically use after scil_tractogram_segment_bundles_for_connectivity.py in\norder to obtain the average density map of each connection to allow the use\nof --similarity in scil_connectivity_compute_matrices.py.\n\nThis script is parallelized, but will run much slower on non-SSD if too many\nprocesses are used. The output is a directory containing the thousands of\nconnections:\nout_dir/\n |-- LABEL1_LABEL1.nii.gz\n |-- LABEL1_LABEL2.nii.gz\n |-- [...]\n |-- LABEL90_LABEL90.nii.gz\n\nFormerly: scil_compute_hdf5_average_density_map.py\n\npositional arguments:\n in_hdf5 List of HDF5 filenames (.h5) from scil_tractogram_segment_bundles_for_connectivity.py.\n out_dir Path of the output directory.\n\noptions:\n -h, --help show this help message and exit\n --binary Binarize density maps before the population average.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "population", - "population" - ], - [ - "maps", - "map" - ], - [ - "similarity", - "similarity" - ], - [ - "maps", - "maps" - ], - [ - "processes", - "processes" - ], - [ - "connections", - "connections" - ], - [ - "connection", - "connection" - ], - [ - "binary", - "binary" - ], - [ - "average", - "average" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_math", - "docstring": "Performs an operation on a list of matrices. The supported operations are\nlisted below.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of matrices.\n> scil_connectivity_math.py multiplication mat.npy 10 mult_10.npy", - "help": "usage: scil_connectivity_math.py [-h] [--data_type DATA_TYPE]\n [--exclude_background]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference}\n in_matrices [in_matrices ...] out_matrix\n\nPerforms an operation on a list of matrices. The supported operations are\nlisted below.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of matrices.\n> scil_connectivity_math.py multiplication mat.npy 10 mult_10.npy\n\n lower_threshold: MAT THRESHOLD\n All values below the threshold will be set to zero.\n All values above the threshold will be set to one.\n \n upper_threshold: MAT THRESHOLD\n All values below the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_eq: MAT THRESHOLD\n All values below the threshold will be set to zero.\n All values above or equal the threshold will be set to one.\n \n upper_threshold_eq: MAT THRESHOLD\n All values below or equal the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_otsu: MAT\n All values below or equal to the Otsu threshold will be set to zero.\n All values above the Otsu threshold will be set to one.\n (Otsu's method is an algorithm to perform automatic matrix thresholding\n of the background.)\n \n upper_threshold_otsu: MAT\n All values below the Otsu threshold will be set to one.\n All values above or equal to the Otsu threshold will be set to zero.\n Equivalent to lower_threshold_otsu followed by an inversion.\n \n lower_clip: MAT THRESHOLD\n All values below the threshold will be set to threshold.\n \n upper_clip: MAT THRESHOLD\n All values above the threshold will be set to threshold.\n \n absolute_value: MAT\n All negative values will become positive.\n \n round: MAT\n Round all decimal values to the closest integer.\n \n ceil: MAT\n Ceil all decimal values to the next integer.\n \n floor: MAT\n Floor all decimal values to the previous integer.\n \n normalize_sum: MAT\n Normalize the matrix so the sum of all values is one.\n \n normalize_max: MAT\n Normalize the matrix so the maximum value is one.\n \n log_10: MAT\n Apply a log (base 10) to all non zeros values of an matrix.\n \n log_e: MAT\n Apply a natural log to all non zeros values of an matrix.\n \n convert: MAT\n Perform no operation, but simply change the data type.\n \n invert: MAT\n Operation on binary matrix to interchange 0s and 1s in a binary mask.\n \n addition: MATs\n Add multiple matrices together.\n \n subtraction: MAT_1 MAT_2\n Subtract first matrix by the second (MAT_1 - MAT_2).\n \n multiplication: MATs\n Multiply multiple matrices together (danger of underflow and overflow)\n \n division: MAT_1 MAT_2\n Divide first matrix by the second (danger of underflow and overflow)\n Ignore zeros values, excluded from the operation.\n \n mean: MATs\n Compute the mean of matrices.\n If a single 4D matrix is provided, average along the last dimension.\n \n std: MATs\n Compute the standard deviation average of multiple matrices.\n If a single 4D matrix is provided, compute the STD along the last\n dimension.\n \n correlation: MATs\n Computes the correlation of the 3x3x3 neighborhood of each voxel, for\n all pair of input matrices. The final matrix is the average correlation\n (through all pairs).\n For a given pair of matrices\n - Background is considered as 0. May lead to very high correlations\n close to the border of the background regions, or very poor ones if the\n background in both matrices differ.\n - Images are zero-padded. For the same reason as higher, may lead to\n very high correlations if you have data close to the border of the\n matrix.\n - NaN values (if a voxel's neighborhood is entirely uniform; std 0) are\n replaced by\n - 0 if at least one neighborhood was entirely containing background.\n - 1 if the voxel's neighborhoods are uniform in both matrices\n - 0 if the voxel's neighborhoods is uniform in one matrix, but not\n the other.\n\n UPDATE AS OF VERSION 2.0: Random noise was previously added in the\n process to help avoid NaN values. Now replaced by either 0 or 1 as\n explained above.\n \n union: MATs\n Operation on binary matrix to keep voxels, that are non-zero, in at\n least one file.\n \n intersection: MATs\n Operation on binary matrix to keep the voxels, that are non-zero,\n are present in all files.\n \n difference: MAT_1 MAT_2\n Operation on binary matrix to keep voxels from the first file that are\n not in the second file (non-zeros).\n \n\npositional arguments:\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference}\n The type of operation to be performed on the matrices.\n in_matrices The list of matrices files or parameters.\n out_matrix Output matrix path.\n\noptions:\n -h, --help show this help message and exit\n --data_type DATA_TYPE\n Data type of the output image. Use the format: uint8, float16, int32.\n --exclude_background Does not affect the background of the original matrices.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "process", - "process" - ], - [ - "methods", - "method" - ], - [ - "region", - "regions", - "regions" - ], - [ - "positive", - "negative" - ], - [ - "matrices", - "matrices" - ], - [ - "supported", - "supported" - ], - [ - "image", - "image" - ], - [ - "high", - "high" - ], - [ - "algorithm", - "algorithm" - ], - [ - "applied", - "apply" - ], - [ - "positive", - "positive" - ], - [ - "random", - "random" - ], - [ - "Data", - "data", - "data" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "voxel", - "voxels" - ], - [ - "binary", - "binary" - ], - [ - "average", - "average" - ], - [ - "considered", - "considered" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "higher", - "higher" - ], - [ - "parameters", - "parameters" - ], - [ - "difference", - "difference" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_normalize", - "docstring": "Normalize a connectivity matrix coming from\nscil_tractogram_segment_bundles_for_connectivity.py.\n3 categories of normalization are available:\n-- Edge attributes\n - length: Multiply each edge by the average bundle length.\n Compensate for far away connections when using interface seeding.\n Cannot be used with inverse_length.\n\n - inverse_length: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n Cannot be used with length.\n\n - bundle_volume: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n\n-- Node attributes (Mutually exclusive)\n - parcel_volume: Divide each edge by the sum of node volume.\n Compensate for the likelihood of ending in the node.\n Compensate seeding bias when using interface seeding.\n\n - parcel_surface: Divide each edge by the sum of the node surface.\n Compensate for the likelihood of ending in the node.\n Compensate for seeding bias when using interface seeding.\n\n-- Matrix scaling (Mutually exclusive)\n - max_at_one: Maximum value of the matrix will be set to one.\n - sum_to_one: Ensure the sum of all edges weight is one\n - log_10: Apply a base 10 logarithm to all edges weight\n\nThe volume and length matrix should come from the\nscil_tractogram_segment_bundles_for_connectivity.py script.\n\nA review of the type of normalization is available in:\nColon-Perez, Luis M., et al. \"Dimensionless, scale-invariant, edge weight\nmetric for the study of complex structural networks.\" PLOS one 10.7 (2015).\n\nHowever, the proposed weighting of edge presented in this publication is not\nimplemented.\n\nFormerly: scil_normalize_connectivity.py", - "help": "usage: scil_connectivity_normalize.py [-h]\n [--length LENGTH_MATRIX | --inverse_length LENGTH_MATRIX]\n [--bundle_volume VOLUME_MATRIX]\n [--parcel_volume ATLAS LABELS_LIST | --parcel_surface ATLAS LABELS_LIST]\n [--max_at_one | --sum_to_one | --log_10]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_matrix out_matrix\n\nNormalize a connectivity matrix coming from\nscil_tractogram_segment_bundles_for_connectivity.py.\n3 categories of normalization are available:\n-- Edge attributes\n - length: Multiply each edge by the average bundle length.\n Compensate for far away connections when using interface seeding.\n Cannot be used with inverse_length.\n\n - inverse_length: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n Cannot be used with length.\n\n - bundle_volume: Divide each edge by the average bundle length.\n Compensate for big connections when using white matter seeding.\n\n-- Node attributes (Mutually exclusive)\n - parcel_volume: Divide each edge by the sum of node volume.\n Compensate for the likelihood of ending in the node.\n Compensate seeding bias when using interface seeding.\n\n - parcel_surface: Divide each edge by the sum of the node surface.\n Compensate for the likelihood of ending in the node.\n Compensate for seeding bias when using interface seeding.\n\n-- Matrix scaling (Mutually exclusive)\n - max_at_one: Maximum value of the matrix will be set to one.\n - sum_to_one: Ensure the sum of all edges weight is one\n - log_10: Apply a base 10 logarithm to all edges weight\n\nThe volume and length matrix should come from the\nscil_tractogram_segment_bundles_for_connectivity.py script.\n\nA review of the type of normalization is available in:\nColon-Perez, Luis M., et al. \"Dimensionless, scale-invariant, edge weight\nmetric for the study of complex structural networks.\" PLOS one 10.7 (2015).\n\nHowever, the proposed weighting of edge presented in this publication is not\nimplemented.\n\nFormerly: scil_normalize_connectivity.py\n\npositional arguments:\n in_matrix Input connectivity matrix. This is typically a streamline_count matrix (.npy).\n out_matrix Output normalized matrix (.npy).\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nEdge-wise options:\n --length LENGTH_MATRIX\n Length matrix used for edge-wise multiplication.\n --inverse_length LENGTH_MATRIX\n Length matrix used for edge-wise division.\n --bundle_volume VOLUME_MATRIX\n Volume matrix used for edge-wise division.\n --parcel_volume ATLAS LABELS_LIST\n Atlas and labels list for edge-wise division.\n --parcel_surface ATLAS LABELS_LIST\n Atlas and labels list for edge-wise division.\n\nScaling options:\n --max_at_one Scale matrix with maximum value at one.\n --sum_to_one Scale matrix with sum of all elements at one.\n --log_10 Apply a base 10 logarithm to the matrix.\n", - "synonyms": [ - [ - "seeding", - "seeding" - ], - [ - "network", - "networks", - "networks" - ], - [ - "connectivity", - "connectivity" - ], - [ - "white", - "white" - ], - [ - "presented", - "presented" - ], - [ - "probability", - "likelihood" - ], - [ - "bundles", - "bundle" - ], - [ - "applied", - "apply" - ], - [ - "connections", - "connections" - ], - [ - "atlas", - "atlas" - ], - [ - "matter", - "matter" - ], - [ - "complex", - "complex" - ], - [ - "structural", - "structural" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "average", - "average" - ], - [ - "studies", - "study", - "study" - ], - [ - "proposed", - "proposed" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_pairwise_agreement", - "docstring": "Evaluate pair-wise similarity measures of connectivity matrix.\n\nThe computed similarity measures are:\nsum of square difference and pearson correlation coefficent\n\nFormerly: scil_evaluate_connectivity_pairwaise_agreement_measures.py", - "help": "usage: scil_connectivity_pairwise_agreement.py [-h] [--single_compare matrix]\n [--normalize] [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_matrices [in_matrices ...]\n out_json\n\nEvaluate pair-wise similarity measures of connectivity matrix.\n\nThe computed similarity measures are:\nsum of square difference and pearson correlation coefficent\n\nFormerly: scil_evaluate_connectivity_pairwaise_agreement_measures.py\n\npositional arguments:\n in_matrices Path of the input matricies.\n out_json Path of the output json file.\n\noptions:\n -h, --help show this help message and exit\n --single_compare matrix\n Compare inputs to this single file.\n (Else, compute all pairs in in_matrices).\n --normalize If set, will normalize all matrices from zero to one.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "similarity", - "similarity" - ], - [ - "examine", - "evaluate" - ], - [ - "connectivity", - "connectivity" - ], - [ - "matrices", - "matrices" - ], - [ - "level", - "level" - ], - [ - "difference", - "difference" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_print_filenames", - "docstring": "Output the list of filenames using the coordinates from a binary connectivity\nmatrix. Typically used to move around files that are considered valid after\nthe scil_connectivity_filter.py script.\n\nExample:\n# Keep connections with more than 1000 streamlines for 100% of a population\nscil_connectivity_filter.py filtering_mask.npy\n --greater_than */streamlines_count.npy 1000 1.0\nscil_connectivity_print_filenames.py filtering_mask.npy\n labels_list.txt pass.txt\nfor file in $(cat pass.txt);\n do mv ${SOMEWHERE}/${FILE} ${SOMEWHERE_ELSE}/;\ndone\n\nFormerly: scil_print_connectivity_filenames.py", - "help": "usage: scil_connectivity_print_filenames.py [-h] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_matrix labels_list out_txt\n\nOutput the list of filenames using the coordinates from a binary connectivity\nmatrix. Typically used to move around files that are considered valid after\nthe scil_connectivity_filter.py script.\n\nExample:\n# Keep connections with more than 1000 streamlines for 100% of a population\nscil_connectivity_filter.py filtering_mask.npy\n --greater_than */streamlines_count.npy 1000 1.0\nscil_connectivity_print_filenames.py filtering_mask.npy\n labels_list.txt pass.txt\nfor file in $(cat pass.txt);\n do mv ${SOMEWHERE}/${FILE} ${SOMEWHERE_ELSE}/;\ndone\n\nFormerly: scil_print_connectivity_filenames.py\n\npositional arguments:\n in_matrix Binary matrix in numpy (.npy) format.\n Typically from scil_connectivity_filter.py\n labels_list List saved by the decomposition script.\n out_txt Output text file containing all filenames.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "population", - "population" - ], - [ - "streamlines", - "streamlines" - ], - [ - "connectivity", - "connectivity" - ], - [ - "pass", - "pass" - ], - [ - "connections", - "connections" - ], - [ - "valid", - "valid" - ], - [ - "binary", - "binary" - ], - [ - "considered", - "considered" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_connectivity_reorder_rois", - "docstring": "Re-order one or many connectivity matrices using a text file format.\nThe first row are the (x) and the second row the (y), must be space separated.\nThe resulting matrix does not have to be square (support unequal number of\nx and y).\n\nThe values refer to the coordinates (starting at 0) in the matrix, but if the\n--labels_list parameter is used, the values will refer to the label which will\nbe converted to the appropriate coordinates. This file must be the same as the\none provided to the scil_tractogram_segment_bundles_for_connectivity.py.\n\nTo subsequently use scil_visualize_connectivity.py with a lookup table, you\nmust use a label-based reording json and use --labels_list.\n\nYou can also use the Optimal Leaf Ordering(OLO) algorithm to transform a\nsparse matrix into an ordering that reduces the matrix bandwidth. The output\nfile can then be re-used with --in_ordering. Only one input can be used with\nthis option, we recommand an average streamline count or volume matrix.\n\nFormerly: scil_reorder_connectivity.py", - "help": "usage: scil_connectivity_reorder_rois.py [-h]\n (--in_ordering IN_ORDERING | --optimal_leaf_ordering OUT_FILE)\n [--out_suffix OUT_SUFFIX]\n [--out_dir OUT_DIR]\n [--labels_list LABELS_LIST]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_matrices [in_matrices ...]\n\nRe-order one or many connectivity matrices using a text file format.\nThe first row are the (x) and the second row the (y), must be space separated.\nThe resulting matrix does not have to be square (support unequal number of\nx and y).\n\nThe values refer to the coordinates (starting at 0) in the matrix, but if the\n--labels_list parameter is used, the values will refer to the label which will\nbe converted to the appropriate coordinates. This file must be the same as the\none provided to the scil_tractogram_segment_bundles_for_connectivity.py.\n\nTo subsequently use scil_visualize_connectivity.py with a lookup table, you\nmust use a label-based reording json and use --labels_list.\n\nYou can also use the Optimal Leaf Ordering(OLO) algorithm to transform a\nsparse matrix into an ordering that reduces the matrix bandwidth. The output\nfile can then be re-used with --in_ordering. Only one input can be used with\nthis option, we recommand an average streamline count or volume matrix.\n\nFormerly: scil_reorder_connectivity.py\n\npositional arguments:\n in_matrices Connectivity matrices in .npy or .txt format.\n\noptions:\n -h, --help show this help message and exit\n --in_ordering IN_ORDERING\n Txt file with the first row as x and second as y.\n --optimal_leaf_ordering OUT_FILE\n Output a text file with an ordering that alignsstructures along the diagonal.\n --out_suffix OUT_SUFFIX\n Suffix for the output matrix filename.\n --out_dir OUT_DIR Output directory for the re-ordered matrices.\n --labels_list LABELS_LIST\n List saved by the decomposition script,\n --in_ordering must contain labels rather than coordinates (.txt).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Rubinov, Mikail, and Olaf Sporns. \"Complex network measures of brain\n connectivity: uses and interpretations.\" Neuroimage 52.3 (2010):\n 1059-1069.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "streamline", - "streamline" - ], - [ - "network", - "networks", - "network" - ], - [ - "connectivity", - "connectivity" - ], - [ - "matrices", - "matrices" - ], - [ - "algorithm", - "algorithm" - ], - [ - "space", - "space" - ], - [ - "parameter", - "parameter" - ], - [ - "based", - "based" - ], - [ - "complex", - "complex" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "average", - "average" - ], - [ - "level", - "level" - ], - [ - "subsequently", - "subsequently" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_denoising_nlmeans", - "docstring": "Script to denoise a dataset with the Non Local Means algorithm.\n\nFormerly: scil_run_nlmeans.py", - "help": "usage: scil_denoising_nlmeans.py [-h] [--mask] [--sigma float] [--log LOGFILE]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_image out_image number_coils\n\nScript to denoise a dataset with the Non Local Means algorithm.\n\nFormerly: scil_run_nlmeans.py\n\npositional arguments:\n in_image Path of the image file to denoise.\n out_image Path to save the denoised image file.\n number_coils Number of receiver coils of the scanner.\n Use number_coils=1 in the case of a SENSE (GE, Philips) reconstruction and \n number_coils >= 1 for GRAPPA reconstruction (Siemens). number_coils=4 works well for the 1.5T\n in Sherbrooke. Use number_coils=0 if the noise is considered Gaussian distributed.\n\noptions:\n -h, --help show this help message and exit\n --mask Path to a binary mask. Only the data inside the mask will be used for computations\n --sigma float The standard deviation of the noise to use instead of computing it automatically.\n --log LOGFILE If supplied, name of the text file to store the logs.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "image", - "image" - ], - [ - "algorithm", - "algorithm" - ], - [ - "Data", - "data", - "data" - ], - [ - "processes", - "processes" - ], - [ - "binary", - "binary" - ], - [ - "considered", - "considered" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_dki_metrics", - "docstring": "Script to compute the Diffusion Kurtosis Imaging (DKI) and Mean Signal DKI\n(MSDKI) metrics. DKI is a multi-shell diffusion model. The input DWI needs\nto be multi-shell, i.e. multi-bvalued.\n\nSince the diffusion kurtosis model involves the estimation of a large number\nof parameters and since the non-Gaussian components of the diffusion signal\nare more sensitive to artefacts, you should really denoise your DWI volume\nbefore using this DKI script (e.g. scil_denoising_nlmeans.py). Moreover, to\nremove biases due to fiber dispersion, fiber crossings and other mesoscopic\nproperties of the underlying tissue, MSDKI does a powder-average of DWI for all\ndirections, thus removing the orientational dependencies and creating an\nalternative mean kurtosis map.\n\nDKI is also known to be vulnerable to artefacted voxels induced by the\nlow radial diffusivities of aligned white matter (CC, CST voxels). Since it is\nvery hard to capture non-Gaussian information due to the low decays in radial\ndirection, its kurtosis estimates have very low robustness.\nNoisy kurtosis estimates tend to be negative and its absolute values can have\norder of magnitudes higher than the typical kurtosis values. Consequently,\nthese negative kurtosis values will heavily propagate to the mean and radial\nkurtosis metrics. This is well-reported in [Rafael Henriques MSc thesis 2012,\nchapter 3]. Two ways to overcome this issue: i) compute the kurtosis values\nfrom powder-averaged MSDKI, and ii) perform 3D Gaussian smoothing. On\npowder-averaged signal decays, you don't have this low diffusivity issue and\nyour kurtosis estimates have much higher precision (additionally they are\nindependent to the fODF).\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section. If --not_all is set, only the metrics specified\nexplicitly by the flags will be output.\n\nThis script directly comes from the DIPY example gallery and references\ntherein.\n[1] examples_built/reconst_dki/#example-reconst-dki\n[2] examples_built/reconst_msdki/#example-reconst-msdki\n\nFormerly: scil_compute_kurtosis_metrics.py", - "help": "usage: scil_dki_metrics.py [-h] [--mask MASK] [--tolerance tol]\n [--skip_b0_check] [--min_k MIN_K] [--max_k MAX_K]\n [--smooth SMOOTH] [--not_all] [--ak file]\n [--mk file] [--rk file] [--msk file]\n [--dki_fa file] [--dki_md file] [--dki_ad file]\n [--dki_rd file] [--dki_residual file] [--msd file]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec\n\nScript to compute the Diffusion Kurtosis Imaging (DKI) and Mean Signal DKI\n(MSDKI) metrics. DKI is a multi-shell diffusion model. The input DWI needs\nto be multi-shell, i.e. multi-bvalued.\n\nSince the diffusion kurtosis model involves the estimation of a large number\nof parameters and since the non-Gaussian components of the diffusion signal\nare more sensitive to artefacts, you should really denoise your DWI volume\nbefore using this DKI script (e.g. scil_denoising_nlmeans.py). Moreover, to\nremove biases due to fiber dispersion, fiber crossings and other mesoscopic\nproperties of the underlying tissue, MSDKI does a powder-average of DWI for all\ndirections, thus removing the orientational dependencies and creating an\nalternative mean kurtosis map.\n\nDKI is also known to be vulnerable to artefacted voxels induced by the\nlow radial diffusivities of aligned white matter (CC, CST voxels). Since it is\nvery hard to capture non-Gaussian information due to the low decays in radial\ndirection, its kurtosis estimates have very low robustness.\nNoisy kurtosis estimates tend to be negative and its absolute values can have\norder of magnitudes higher than the typical kurtosis values. Consequently,\nthese negative kurtosis values will heavily propagate to the mean and radial\nkurtosis metrics. This is well-reported in [Rafael Henriques MSc thesis 2012,\nchapter 3]. Two ways to overcome this issue: i) compute the kurtosis values\nfrom powder-averaged MSDKI, and ii) perform 3D Gaussian smoothing. On\npowder-averaged signal decays, you don't have this low diffusivity issue and\nyour kurtosis estimates have much higher precision (additionally they are\nindependent to the fODF).\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section. If --not_all is set, only the metrics specified\nexplicitly by the flags will be output.\n\nThis script directly comes from the DIPY example gallery and references\ntherein.\n[1] examples_built/reconst_dki/#example-reconst-dki\n[2] examples_built/reconst_msdki/#example-reconst-msdki\n\nFormerly: scil_compute_kurtosis_metrics.py\n\npositional arguments:\n in_dwi Path of the input multi-shell DWI dataset.\n in_bval Path of the b-value file, in FSL format.\n in_bvec Path of the b-vector file, in FSL format.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computations and reconstruction.\n [Default: None]\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --min_k MIN_K Minimum kurtosis value in the output maps \n (ak, mk, rk). In theory, -3/7 is the min kurtosis \n limit for regions that consist of water confined \n to spherical pores (see DIPY example and \n documentation) [Default: 0.0].\n --max_k MAX_K Maximum kurtosis value in the output maps \n (ak, mk, rk). In theory, 10 is the max kurtosis\n limit for regions that consist of water confined\n to spherical pores (see DIPY example and \n documentation) [Default: 3.0].\n --smooth SMOOTH Smooth input DWI with a 3D Gaussian filter with \n full-width-half-max (fwhm). Kurtosis fitting is \n sensitive and outliers occur easily. According to\n tests on HCP, CB_Brain, Penthera3T, this smoothing\n is thus turned ON by default with fwhm=2.5. \n [Default: 2.5].\n --not_all If set, will only save the metrics explicitly \n specified using the other metrics flags. \n [Default: not set].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics files flags:\n --ak file Output filename for the axial kurtosis.\n --mk file Output filename for the mean kurtosis.\n --rk file Output filename for the radial kurtosis.\n --msk file Output filename for the mean signal kurtosis.\n --dki_fa file Output filename for the fractional anisotropy from DKI.\n --dki_md file Output filename for the mean diffusivity from DKI.\n --dki_ad file Output filename for the axial diffusivity from DKI.\n --dki_rd file Output filename for the radial diffusivity from DKI.\n\nQuality control files flags:\n --dki_residual file Output filename for the map of the residual of the tensor fit.\n Note. In previous versions, the resulting map was normalized. \n It is not anymore.\n --msd file Output filename for the mean signal diffusion (powder-average).\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "direction", - "direction" - ], - [ - "diffusion", - "diffusion" - ], - [ - "Data", - "data", - "data" - ], - [ - "higher", - "higher" - ], - [ - "axial", - "axial" - ], - [ - "maps", - "map" - ], - [ - "white", - "white" - ], - [ - "region", - "regions", - "regions" - ], - [ - "large", - "large" - ], - [ - "create", - "creating" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ], - [ - "reported", - "reported" - ], - [ - "precision", - "precision" - ], - [ - "comprised", - "consist" - ], - [ - "high", - "high" - ], - [ - "signal", - "signal" - ], - [ - "average", - "average" - ], - [ - "positive", - "negative" - ], - [ - "imaging", - "imaging" - ], - [ - "high", - "low" - ], - [ - "maps", - "maps" - ], - [ - "voxel", - "voxels" - ], - [ - "matter", - "matter" - ], - [ - "binary", - "binary" - ], - [ - "parameters", - "parameters" - ] - ], - "keywords": [] - }, - { - "name": "scil_dti_convert_tensors", - "docstring": "Conversion of tensors (the 6 values from the triangular matrix) between various\nsoftware standards. We cannot discover the input format type, user must know\nhow the tensors were created.", - "help": "usage: scil_dti_convert_tensors.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file out_file in_format out_format\n\nConversion of tensors (the 6 values from the triangular matrix) between various\nsoftware standards. We cannot discover the input format type, user must know\nhow the tensors were created.\n\n Dipy's order is [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://github.com/dipy/dipy/blob/master/dipy/reconst/dti.py#L1639\n \n MRTRIX's order is : [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz]\n Shape: [i, j , k, 6].\n Ref: https://mrtrix.readthedocs.io/en/dev/reference/commands/dwi2tensor.html\n \n ANTS's order ('nifti format') is : [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz].\n Shape: [i, j , k, 1, 6] (Careful, file is 5D).\n Ref: https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software\n \n FSL's order is [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide\n (Also used for the Fibernavigator)\n \n\npositional arguments:\n in_file Input tensors filename.\n out_file Output tensors filename.\n in_format Input format. Choices: ['fsl', 'nifti', 'mrtrix', 'dipy']\n out_format Output format. Choices: ['fsl', 'nifti', 'mrtrix', 'dipy']\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "variety", - "various" - ], - [ - "diffusion", - "diffusion" - ], - [ - "Data", - "data", - "data" - ], - [ - "shape", - "shape" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_dti_metrics", - "docstring": "Script to compute all of the Diffusion Tensor Imaging (DTI) metrics.\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section.\n\nIf --not_all is set, only the metrics specified explicitly by the flags\nwill be output. The available metrics are:\n\nfractional anisotropy (FA), geodesic anisotropy (GA), axial diffusivisty (AD),\nradial diffusivity (RD), mean diffusivity (MD), mode, red-green-blue colored\nFA (rgb), principal tensor e-vector and tensor coefficients (dxx, dxy, dxz,\ndyy, dyz, dzz).\n\nFor all the quality control metrics such as residual, physically implausible\nsignals, pulsation and misalignment artifacts, see\n[J-D Tournier, S. Mori, A. Leemans. Diffusion Tensor Imaging and Beyond.\nMRM 2011].\n\nFormerly: scil_compute_dti_metrics.py", - "help": "usage: scil_dti_metrics.py [-h] [-f] [--mask MASK] [--method method_name]\n [--not_all] [--ad file] [--evecs file]\n [--evals file] [--fa file] [--ga file] [--md file]\n [--mode file] [--norm file] [--rgb file]\n [--rd file] [--tensor file]\n [--tensor_format {fsl,nifti,mrtrix,dipy}]\n [--non-physical file] [--pulsation string]\n [--residual file] [--b0_threshold thr]\n [--skip_b0_check] [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval in_bvec\n\nScript to compute all of the Diffusion Tensor Imaging (DTI) metrics.\n\nBy default, will output all available metrics, using default names. Specific\nnames can be specified using the metrics flags that are listed in the \"Metrics\nfiles flags\" section.\n\nIf --not_all is set, only the metrics specified explicitly by the flags\nwill be output. The available metrics are:\n\nfractional anisotropy (FA), geodesic anisotropy (GA), axial diffusivisty (AD),\nradial diffusivity (RD), mean diffusivity (MD), mode, red-green-blue colored\nFA (rgb), principal tensor e-vector and tensor coefficients (dxx, dxy, dxz,\ndyy, dyz, dzz).\n\nFor all the quality control metrics such as residual, physically implausible\nsignals, pulsation and misalignment artifacts, see\n[J-D Tournier, S. Mori, A. Leemans. Diffusion Tensor Imaging and Beyond.\nMRM 2011].\n\nFormerly: scil_compute_dti_metrics.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n\noptions:\n -h, --help show this help message and exit\n -f Force overwriting of the output files.\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computations and reconstruction. (Default: None)\n --method method_name Tensor fit method.\n WLS for weighted least squares\n LS for ordinary least squares\n NLLS for non-linear least-squares\n restore for RESTORE robust tensor fitting. (Default: WLS)\n --not_all If set, will only save the metrics explicitly specified using the other metrics flags. (Default: not set).\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nMetrics files flags:\n --ad file Output filename for the axial diffusivity.\n --evecs file Output filename for the eigenvectors of the tensor.\n --evals file Output filename for the eigenvalues of the tensor.\n --fa file Output filename for the fractional anisotropy.\n --ga file Output filename for the geodesic anisotropy.\n --md file Output filename for the mean diffusivity.\n --mode file Output filename for the mode.\n --norm file Output filename for the tensor norm.\n --rgb file Output filename for the colored fractional anisotropy.\n --rd file Output filename for the radial diffusivity.\n --tensor file Output filename for the tensor coefficients.\n --tensor_format {fsl,nifti,mrtrix,dipy}\n Format used for the tensors saved in --tensor file.(default: fsl)\n \n Dipy's order is [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://github.com/dipy/dipy/blob/master/dipy/reconst/dti.py#L1639\n \n MRTRIX's order is : [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz]\n Shape: [i, j , k, 6].\n Ref: https://mrtrix.readthedocs.io/en/dev/reference/commands/dwi2tensor.html\n \n ANTS's order ('nifti format') is : [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz].\n Shape: [i, j , k, 1, 6] (Careful, file is 5D).\n Ref: https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software\n \n FSL's order is [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz]\n Shape: [i, j , k, 6].\n Ref: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide\n (Also used for the Fibernavigator)\n \n\nQuality control files flags:\n --non-physical file Output filename for the voxels with physically implausible signals \n where the mean of b=0 images is below one or more diffusion-weighted images.\n --pulsation string Standard deviation map across all diffusion-weighted images and across b=0 images if more than one is available.\n Shows pulsation and misalignment artifacts.\n --residual file Output filename for the map of the residual of the tensor fit.\n", - "synonyms": [ - [ - "blue", - "red", - "blue" - ], - [ - "axial", - "axial" - ], - [ - "order", - "order" - ], - [ - "methods", - "method" - ], - [ - "signal", - "signals" - ], - [ - "maps", - "map" - ], - [ - "weighted", - "weighted" - ], - [ - "principal", - "principal" - ], - [ - "imaging", - "imaging" - ], - [ - "high", - "high" - ], - [ - "diffusion", - "diffusion" - ], - [ - "Data", - "data", - "data" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "blue", - "red", - "red" - ], - [ - "voxel", - "voxels" - ], - [ - "shape", - "shape" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "green", - "green" - ], - [ - "considered", - "considered" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_apply_bias_field", - "docstring": "Apply bias field correction to DWI. This script doesn't compute the bias\nfield itself. It ONLY applies an existing bias field. Please use the ANTs\nN4BiasFieldCorrection executable to compute the bias field.\n\nFormerly: scil_apply_bias_field_on_dwi.py", - "help": "usage: scil_dwi_apply_bias_field.py [-h] [--mask MASK]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bias_field out_name\n\nApply bias field correction to DWI. This script doesn't compute the bias\nfield itself. It ONLY applies an existing bias field. Please use the ANTs\nN4BiasFieldCorrection executable to compute the bias field.\n\nFormerly: scil_apply_bias_field_on_dwi.py\n\npositional arguments:\n in_dwi DWI Nifti image.\n in_bias_field Bias field Nifti image.\n out_name Corrected DWI Nifti image.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Apply bias field correction only in the region defined by the mask.\n If this is not given, the bias field is still only applied only in non-background data \n (i.e. where the dwi is not 0).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "region", - "regions", - "region" - ], - [ - "image", - "image" - ], - [ - "applied", - "apply" - ], - [ - "Data", - "data", - "data" - ], - [ - "applied", - "applied" - ], - [ - "level", - "level" - ], - [ - "defined", - "defined" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_compute_snr", - "docstring": "Script to compute signal to noise ratio (SNR) in a region of interest (ROI)\nof a DWI volume.\n\nIt will compute the SNR for all DWI volumes of the input image seperately.\nThe output will contain the SNR which is the ratio of\nmean(signal) / std(noise).\nThe mean of the signal is computed inside the mask.\nThe standard deviation of the noise is estimated inside the noise_mask\nor inside the same mask if a noise_map is provided.\nIf it's not supplied, it will be estimated using the data outside the brain,\ncomputed with Dipy medotsu\n\nIf verbose is True, the SNR for every DWI volume will be output.\n\nThis works best in a well-defined ROI such as the corpus callosum.\nIt is heavily dependent on the ROI and its quality.\n\nWe highly recommend using a noise_map if you can acquire one.\nSee refs [1, 2] that describe the noise map acquisition.\n[1] St-Jean, et al (2016). Non Local Spatial and Angular Matching...\n https://doi.org/10.1016/j.media.2016.02.010\n[2] Reymbaut, et al (2021). Magic DIAMOND...\n https://doi.org/10.1016/j.media.2021.101988\n\nFormerly: scil_snr_in_roi.py", - "help": "usage: scil_dwi_compute_snr.py [-h]\n [--noise_mask NOISE_MASK | --noise_map NOISE_MAP]\n [--b0_thr B0_THR] [--out_basename OUT_BASENAME]\n [--split_shells] [--indent INDENT]\n [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_mask\n\nScript to compute signal to noise ratio (SNR) in a region of interest (ROI)\nof a DWI volume.\n\nIt will compute the SNR for all DWI volumes of the input image seperately.\nThe output will contain the SNR which is the ratio of\nmean(signal) / std(noise).\nThe mean of the signal is computed inside the mask.\nThe standard deviation of the noise is estimated inside the noise_mask\nor inside the same mask if a noise_map is provided.\nIf it's not supplied, it will be estimated using the data outside the brain,\ncomputed with Dipy medotsu\n\nIf verbose is True, the SNR for every DWI volume will be output.\n\nThis works best in a well-defined ROI such as the corpus callosum.\nIt is heavily dependent on the ROI and its quality.\n\nWe highly recommend using a noise_map if you can acquire one.\nSee refs [1, 2] that describe the noise map acquisition.\n[1] St-Jean, et al (2016). Non Local Spatial and Angular Matching...\n https://doi.org/10.1016/j.media.2016.02.010\n[2] Reymbaut, et al (2021). Magic DIAMOND...\n https://doi.org/10.1016/j.media.2021.101988\n\nFormerly: scil_snr_in_roi.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n in_mask Binary mask of the region used to estimate SNR.\n\noptions:\n -h, --help show this help message and exit\n --b0_thr B0_THR All b-values with values less than or equal to b0_thr are considered as b0s i.e. without diffusion weighting. [0.0]\n --out_basename OUT_BASENAME\n Path and prefix for the various saved file.\n --split_shells SNR will be split into shells.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMasks options:\n --noise_mask NOISE_MASK\n Binary mask used to estimate the noise from the DWI.\n --noise_map NOISE_MAP\n Noise map.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "region", - "regions", - "region" - ], - [ - "variety", - "various" - ], - [ - "spatial", - "spatial" - ], - [ - "maps", - "map" - ], - [ - "corpus", - "corpus" - ], - [ - "image", - "image" - ], - [ - "diffusion", - "diffusion" - ], - [ - "volume", - "volumes", - "volumes" - ], - [ - "highly", - "highly" - ], - [ - "signal", - "signal" - ], - [ - "Data", - "data", - "data" - ], - [ - "true", - "true" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "considered", - "considered" - ], - [ - "level", - "level" - ], - [ - "brain", - "brain" - ], - [ - "defined", - "defined" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_concatenate", - "docstring": "Concatenate DWI, bval and bvecs together. File must be specified in matching\norder. Default data type will be the same as the first input DWI.\n\nFormerly: scil_concatenate_dwi.py", - "help": "usage: scil_dwi_concatenate.py [-h] [--in_dwis IN_DWIS [IN_DWIS ...]]\n [--in_bvals IN_BVALS [IN_BVALS ...]]\n [--in_bvecs IN_BVECS [IN_BVECS ...]]\n [--data_type DATA_TYPE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_dwi out_bval out_bvec\n\nConcatenate DWI, bval and bvecs together. File must be specified in matching\norder. Default data type will be the same as the first input DWI.\n\nFormerly: scil_concatenate_dwi.py\n\npositional arguments:\n out_dwi The name of the output DWI file.\n out_bval The name of the output b-values file (.bval).\n out_bvec The name of the output b-vectors file (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n The DWI file (.nii) to concatenate.\n --in_bvals IN_BVALS [IN_BVALS ...]\n The b-values files in FSL format (.bval).\n --in_bvecs IN_BVECS [IN_BVECS ...]\n The b-vectors files in FSL format (.bvec).\n --data_type DATA_TYPE\n Data type of the output image. Use the format: uint8, int16, int/float32, int/float64.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "image", - "image" - ], - [ - "Data", - "data", - "data" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_convert_FDF", - "docstring": "Converts a Varian FDF file or directory to a nifti file.\nIf the procpar contains diffusion information, it will be saved as bval and\nbvec in the same folder as the output file.\n\nex: scil_dwi_convert_FDF.py semsdw/b0_folder/ semsdw/dwi_folder/ dwi.nii.gz --bval dwi.bval --bvec dwi.bvec -f\n\nFormerly: scil_convert_fdf.py", - "help": "usage: scil_dwi_convert_FDF.py [-h] [--bval BVAL] [--bvec BVEC]\n [--flip dimension [dimension ...]]\n [--swap dimension [dimension ...]]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_b0_path in_dwi_path out_path\n\nConverts a Varian FDF file or directory to a nifti file.\nIf the procpar contains diffusion information, it will be saved as bval and\nbvec in the same folder as the output file.\n\nex: scil_dwi_convert_FDF.py semsdw/b0_folder/ semsdw/dwi_folder/ dwi.nii.gz --bval dwi.bval --bvec dwi.bvec -f\n\nFormerly: scil_convert_fdf.py\n\npositional arguments:\n in_b0_path Path to the b0 FDF file or folder to convert.\n in_dwi_path Path to the DWI FDF file or folder to convert.\n out_path Path to the nifti file to write on disk.\n\noptions:\n -h, --help show this help message and exit\n --bval BVAL Path to the bval file to write on disk.\n --bvec BVEC Path to the bvec file to write on disk.\n --flip dimension [dimension ...]\n The axes you want to flip. eg: to flip the x and y axes use: x y. [None]\n --swap dimension [dimension ...]\n The axes you want to swap. eg: to swap the x and y axes use: x y. [None]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "diffusion", - "diffusion" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_detect_volume_outliers", - "docstring": "This script simply finds the 3 closest angular neighbors of each direction\n(per shell) and compute the voxel-wise correlation.\nIf the angles or correlations to neighbors are below the shell average (by\nargs.std_scale x STD) it will flag the volume as a potential outlier.\n\nThis script supports multi-shells, but each shell is independant and detected\nusing the --b0_threshold parameter.\n\nThis script can be run before any processing to identify potential problem\nbefore launching pre-processing.", - "help": "usage: scil_dwi_detect_volume_outliers.py [-h] [--std_scale STD_SCALE]\n [--b0_threshold thr]\n [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval in_bvec\n\nThis script simply finds the 3 closest angular neighbors of each direction\n(per shell) and compute the voxel-wise correlation.\nIf the angles or correlations to neighbors are below the shell average (by\nargs.std_scale x STD) it will flag the volume as a potential outlier.\n\nThis script supports multi-shells, but each shell is independant and detected\nusing the --b0_threshold parameter.\n\nThis script can be run before any processing to identify potential problem\nbefore launching pre-processing.\n\npositional arguments:\n in_dwi The DWI file (.nii) to concatenate.\n in_bval The b-values files in FSL format (.bval).\n in_bvec The b-vectors files in FSL format (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --std_scale STD_SCALE\n How many deviation from the mean are required to be considered an outlier. [2.0]\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "direction", - "direction" - ], - [ - "high", - "high" - ], - [ - "parameter", - "parameter" - ], - [ - "Data", - "data", - "data" - ], - [ - "potential", - "potential" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "average", - "average" - ], - [ - "considered", - "considered" - ], - [ - "voxel", - "voxel" - ], - [ - "processing", - "processing" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_extract_b0", - "docstring": "Extract B0s from DWI, based on the bval and bvec information.\n\nThe default behavior is to save the first b0 of the series.\n\nFormerly: scil_extract_b0.py", - "help": "usage: scil_dwi_extract_b0.py [-h]\n [--all | --mean | --cluster-mean | --cluster-first]\n [--block-size INT] [--single-image]\n [--b0_threshold thr] [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_b0\n\nExtract B0s from DWI, based on the bval and bvec information.\n\nThe default behavior is to save the first b0 of the series.\n\nFormerly: scil_extract_b0.py\n\npositional arguments:\n in_dwi DWI Nifti image.\n in_bval b-values filename, in FSL format (.bval).\n in_bvec b-values filename, in FSL format (.bvec).\n out_b0 Output b0 file(s).\n\noptions:\n -h, --help show this help message and exit\n --block-size INT, -s INT\n Load the data using this block size. Useful\n when the data is too large to be loaded in memory.\n --single-image If output b0 volume has multiple time points, only outputs a single \n image instead of a numbered series of images.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nOptions in the case of multiple b0s.:\n --all Extract all b0s. Index number will be appended to the output file.\n --mean Extract mean b0.\n --cluster-mean Extract mean of each continuous cluster of b0s.\n --cluster-first Extract first b0 of each continuous cluster of b0s.\n", - "synonyms": [ - [ - "image", - "image" - ], - [ - "high", - "high" - ], - [ - "Data", - "data", - "data" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "large", - "large" - ], - [ - "based", - "based" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "considered", - "considered" - ], - [ - "memory", - "memory" - ], - [ - "level", - "level" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_extract_shell", - "docstring": "Extracts the DWI volumes that are on specific b-value shells. Many shells\ncan be extracted at once by specifying multiple b-values. The extracted\nvolumes are in the same order as in the original file.\n\nIf the b-values of a shell are not all identical, use the --tolerance\nargument to adjust the accepted interval. For example, a b-value of 2000\nand a tolerance of 20 will extract all volumes with a b-values from 1980 to\n2020.\n\nFiles that are too large to be loaded in memory can still be processed by\nsetting the --block-size argument. A block size of X means that X DWI volumes\nare loaded at a time for processing.\n\nFormerly: scil_extract_dwi_shell.py", - "help": "usage: scil_dwi_extract_shell.py [-h] [--out_indices OUT_INDICES]\n [--block-size INT] [--tolerance INT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_bvals_to_extract\n [in_bvals_to_extract ...] out_dwi out_bval\n out_bvec\n\nExtracts the DWI volumes that are on specific b-value shells. Many shells\ncan be extracted at once by specifying multiple b-values. The extracted\nvolumes are in the same order as in the original file.\n\nIf the b-values of a shell are not all identical, use the --tolerance\nargument to adjust the accepted interval. For example, a b-value of 2000\nand a tolerance of 20 will extract all volumes with a b-values from 1980 to\n2020.\n\nFiles that are too large to be loaded in memory can still be processed by\nsetting the --block-size argument. A block size of X means that X DWI volumes\nare loaded at a time for processing.\n\nFormerly: scil_extract_dwi_shell.py\n\npositional arguments:\n in_dwi The DW image file to split.\n in_bval The b-values file in FSL format (.bval).\n in_bvec The b-vectors file in FSL format (.bvec).\n in_bvals_to_extract The list of b-values to extract. For example 0 2000.\n out_dwi The name of the output DWI file.\n out_bval The name of the output b-value file (.bval).\n out_bvec The name of the output b-vector file (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --out_indices OUT_INDICES\n Optional filename for valid indices in input dwi volume\n --block-size INT, -s INT\n Loads the data using this block size. Useful\n when the data is too large to be loaded in memory.\n --tolerance INT, -t INT\n The tolerated gap between the b-values to extract\n and the actual b-values. [20]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "image", - "image" - ], - [ - "volume", - "volumes", - "volumes" - ], - [ - "Data", - "data", - "data" - ], - [ - "large", - "large" - ], - [ - "valid", - "valid" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "memory", - "memory" - ], - [ - "processing", - "processing" - ], - [ - "specific", - "specific" - ], - [ - "level", - "level" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_powder_average", - "docstring": "Script to compute powder average (mean diffusion weighted image) from set of\ndiffusion images.\n\nBy default will output an average image calculated from all images with\nnon-zero bvalue.\n\nSpecify --bvalue to output an image for a single shell\n\nScript currently does not take into account the diffusion gradient directions\nbeing averaged.\n\nFormerly: scil_compute_powder_average.py", - "help": "usage: scil_dwi_powder_average.py [-h] [-f] [--mask file] [--b0_thr B0_THR]\n [--shells SHELLS [SHELLS ...]]\n [--shell_thr SHELL_THR]\n [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval out_avg\n\nScript to compute powder average (mean diffusion weighted image) from set of\ndiffusion images.\n\nBy default will output an average image calculated from all images with\nnon-zero bvalue.\n\nSpecify --bvalue to output an image for a single shell\n\nScript currently does not take into account the diffusion gradient directions\nbeing averaged.\n\nFormerly: scil_compute_powder_average.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n out_avg Path of the output file.\n\noptions:\n -h, --help show this help message and exit\n -f Force overwriting of the output files.\n --mask file Path to a binary mask.\n Only data inside the mask will be used for powder avg. (Default: None)\n --b0_thr B0_THR Exclude b0 volumes from powder average with bvalue less than specified threshold.\n (Default: remove volumes with bvalue < 50\n --shells SHELLS [SHELLS ...]\n bvalue (shells) to include in powder average passed as a list \n (e.g. --shells 1000 2000). If not specified will include all volumes with a non-zero bvalue.\n --shell_thr SHELL_THR\n Include volumes with bvalue +- the specified threshold.\n (Default: [50]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "weighted", - "weighted" - ], - [ - "image", - "image" - ], - [ - "diffusion", - "diffusion" - ], - [ - "volume", - "volumes", - "volumes" - ], - [ - "Data", - "data", - "data" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "average", - "average" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_prepare_eddy_command", - "docstring": "Prepare a typical command for eddy and create the necessary files. When using\nmultiple acquisitions and/or opposite phase directions, images, b-values and\nb-vectors should be merged together using scil_dwi_concatenate.py. If using\ntopup prior to calling this script, images should be concatenated in the same\norder as the b0s used with prepare_topup.\n\nFormerly: scil_prepare_eddy_command.py", - "help": "usage: scil_dwi_prepare_eddy_command.py [-h] [--n_reverse N_REVERSE]\n [--topup TOPUP]\n [--topup_params TOPUP_PARAMS]\n [--eddy_cmd {eddy_openmp,eddy_cuda,eddy_cuda8.0,eddy_cuda9.1,eddy_cuda10.2,eddy,eddy_cpu}]\n [--b0_thr B0_THR]\n [--encoding_direction {x,y,z}]\n [--readout READOUT]\n [--slice_drop_correction]\n [--lsr_resampling]\n [--out_directory OUT_DIRECTORY]\n [--out_prefix OUT_PREFIX]\n [--out_script] [--fix_seed]\n [--eddy_options EDDY_OPTIONS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bvals in_bvecs in_mask\n\nPrepare a typical command for eddy and create the necessary files. When using\nmultiple acquisitions and/or opposite phase directions, images, b-values and\nb-vectors should be merged together using scil_dwi_concatenate.py. If using\ntopup prior to calling this script, images should be concatenated in the same\norder as the b0s used with prepare_topup.\n\nFormerly: scil_prepare_eddy_command.py\n\npositional arguments:\n in_dwi Input DWI Nifti image. If using multiple acquisition and/or opposite phase directions, please merge in the same order as for prepare_topup using scil_dwi_concatenate.py.\n in_bvals Input b-values file in FSL format.\n in_bvecs Input b-vectors file in FSL format.\n in_mask Binary brain mask.\n\noptions:\n -h, --help show this help message and exit\n --n_reverse N_REVERSE\n Number of reverse phase volumes included in the DWI image [0].\n --topup TOPUP Topup output name. If given, apply topup during eddy.\n Should be the same as --out_prefix from scil_dwi_prepare_topup_command.py.\n --topup_params TOPUP_PARAMS\n Parameters file (typically named acqparams) used to run topup.\n --eddy_cmd {eddy_openmp,eddy_cuda,eddy_cuda8.0,eddy_cuda9.1,eddy_cuda10.2,eddy,eddy_cpu}\n Eddy command [eddy_openmp].\n --b0_thr B0_THR All b-values with values less than or equal to b0_thr are considered\n as b0s i.e. without diffusion weighting [20].\n --encoding_direction {x,y,z}\n Acquisition direction, default is AP-PA [y].\n --readout READOUT Total readout time from the DICOM metadata [0.062].\n --slice_drop_correction\n If set, will activate eddy's outlier correction,\n which includes slice drop correction.\n --lsr_resampling Perform least-square resampling, allowing eddy to combine forward and reverse phase acquisitions for better reconstruction. Only works if directions and b-values are identical in both phase direction.\n --out_directory OUT_DIRECTORY\n Output directory for eddy files [.].\n --out_prefix OUT_PREFIX\n Prefix of the eddy-corrected DWI [dwi_eddy_corrected].\n --out_script If set, will output a .sh script (eddy.sh).\n else, will output the lines to the terminal [False].\n --fix_seed If set, will use the fixed seed strategy for eddy.\n Enhances reproducibility.\n --eddy_options EDDY_OPTIONS\n Additional options you want to use to run eddy.\n Add these options using quotes (i.e. \"--ol_nstd=6 --mb=4\").\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "direction", - "direction" - ], - [ - "create", - "create" - ], - [ - "image", - "image" - ], - [ - "diffusion", - "diffusion" - ], - [ - "applied", - "apply" - ], - [ - "volume", - "volumes", - "volumes" - ], - [ - "binary", - "binary" - ], - [ - "considered", - "considered" - ], - [ - "level", - "level" - ], - [ - "total", - "total" - ], - [ - "parameters", - "parameters" - ], - [ - "false", - "false" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_prepare_topup_command", - "docstring": "Prepare a typical command for topup and create the necessary files.\nThe reversed b0 must be in a different file.\n\nFormerly: scil_prepare_topup_command.py", - "help": "usage: scil_dwi_prepare_topup_command.py [-h] [--config CONFIG] [--synb0]\n [--encoding_direction {x,y,z}]\n [--readout READOUT]\n [--out_b0s OUT_B0S]\n [--out_directory OUT_DIRECTORY]\n [--out_prefix OUT_PREFIX]\n [--out_params OUT_PARAMS]\n [--out_script]\n [--topup_options TOPUP_OPTIONS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_forward_b0 in_reverse_b0\n\nPrepare a typical command for topup and create the necessary files.\nThe reversed b0 must be in a different file.\n\nFormerly: scil_prepare_topup_command.py\n\npositional arguments:\n in_forward_b0 Input b0 Nifti image with forward phase encoding.\n in_reverse_b0 Input b0 Nifti image with reversed phase encoding.\n\noptions:\n -h, --help show this help message and exit\n --config CONFIG Topup config file [b02b0.cnf].\n --synb0 If set, will use SyNb0 custom acqparams file.\n --encoding_direction {x,y,z}\n Acquisition direction of the forward b0 image, default is AP [y].\n --readout READOUT Total readout time from the DICOM metadata [0.062].\n --out_b0s OUT_B0S Output fused b0 file [fused_b0s.nii.gz].\n --out_directory OUT_DIRECTORY\n Output directory for topup files [.].\n --out_prefix OUT_PREFIX\n Prefix of the topup results [topup_results].\n --out_params OUT_PARAMS\n Filename for the acquisition parameters file [acqparams.txt].\n --out_script If set, will output a .sh script (topup.sh).\n else, will output the lines to the terminal [False].\n --topup_options TOPUP_OPTIONS\n Additional options you want to use to run topup.\n Add these options using quotes (i.e. \"--fwhm=6 --miter=4\").\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "direction", - "direction" - ], - [ - "create", - "create" - ], - [ - "image", - "image" - ], - [ - "level", - "level" - ], - [ - "total", - "total" - ], - [ - "parameters", - "parameters" - ], - [ - "false", - "false" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_reorder_philips", - "docstring": "Re-order gradient according to original table (Philips)\nThis script is not needed for version 5.6 and higher\n\nFormerly: scil_reorder_dwi_philips.py", - "help": "usage: scil_dwi_reorder_philips.py [-h] [--json JSON]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_table\n out_basename\n\nRe-order gradient according to original table (Philips)\nThis script is not needed for version 5.6 and higher\n\nFormerly: scil_reorder_dwi_philips.py\n\npositional arguments:\n in_dwi Input dwi file.\n in_bval Input bval FSL format.\n in_bvec Input bvec FSL format.\n in_table Original philips table - first line is skipped.\n out_basename Basename output file.\n\noptions:\n -h, --help show this help message and exit\n --json JSON If you give a json file, it will check if you need to reorder your Philips dwi.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "level", - "level" - ], - [ - "higher", - "higher" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_split_by_indices", - "docstring": "Splits the DWI image at certain indices along the last dimension (b-values).\nMany indices can be given at once by specifying multiple values. The splited\nvolumes are in the same order as in the original file. Also outputs the\ncorresponding .bval and .bvec files.\n\nThis script can be useful for splitting images at places where a b-value\nextraction does not work. For instance, if one wants to split the x first\nb-1500s from the rest of the b-1500s in an image, simply put x as an index.\n\nFormerly: scil_split_image.py", - "help": "usage: scil_dwi_split_by_indices.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_basename\n split_indices [split_indices ...]\n\nSplits the DWI image at certain indices along the last dimension (b-values).\nMany indices can be given at once by specifying multiple values. The splited\nvolumes are in the same order as in the original file. Also outputs the\ncorresponding .bval and .bvec files.\n\nThis script can be useful for splitting images at places where a b-value\nextraction does not work. For instance, if one wants to split the x first\nb-1500s from the rest of the b-1500s in an image, simply put x as an index.\n\nFormerly: scil_split_image.py\n\npositional arguments:\n in_dwi The DW image file to split.\n in_bval The b-values file in FSL format (.bval).\n in_bvec The b-vectors file in FSL format (.bvec).\n out_basename The basename of the output files. Indices number will be appended to out_basename. For example, if split_indices were 3 10, the files would be saved as out_basename_0_2, out_basename_3_10, out_basename_11_20, where the size of the last dimension is 21 in this example.\n split_indices The list of indices where to split the image. For example 3 10. This would split the image in three parts, such as [:3], [3:10], [10:]. Indices must be in increasing order.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "work", - "work" - ], - [ - "image", - "image" - ], - [ - "volume", - "volumes", - "volumes" - ], - [ - "level", - "level" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_dwi_to_sh", - "docstring": "Script to compute the SH coefficient directly on the raw DWI signal.\n\nFormerly: scil_compute_sh_from_signal.py", - "help": "usage: scil_dwi_to_sh.py [-h] [--sh_order SH_ORDER]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--smooth SMOOTH] [--use_attenuation] [--mask MASK]\n [--b0_threshold thr] [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_sh\n\nScript to compute the SH coefficient directly on the raw DWI signal.\n\nFormerly: scil_compute_sh_from_signal.py\n\npositional arguments:\n in_dwi Path of the dwi volume.\n in_bval Path of the b-value file, in FSL format.\n in_bvec Path of the b-vector file, in FSL format.\n out_sh Name of the output SH file to save.\n\noptions:\n -h, --help show this help message and exit\n --sh_order SH_ORDER SH order to fit (int). [4]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --smooth SMOOTH Lambda-regularization coefficient in the SH fit (float). [0.006]\n --use_attenuation If set, will use signal attenuation before fitting the SH (i.e. divide by the b0).\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computations and reconstruction \n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "high", - "high" - ], - [ - "signal", - "signal" - ], - [ - "Data", - "data", - "data" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "considered", - "considered" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_fodf_max_in_ventricles", - "docstring": "Script to compute the maximum fODF in the ventricles. The ventricules are\nestimated from an MD and FA threshold.\n\nThis allows to clip the noise of fODF using an absolute thresold.\n\nFormerly: scil_compute_fodf_max_in_ventricles.py", - "help": "usage: scil_fodf_max_in_ventricles.py [-h] [--fa_threshold FA_THRESHOLD]\n [--md_threshold MD_THRESHOLD]\n [--max_value_output file]\n [--mask_output file] [--small_dims]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n fODFs FA MD\n\nScript to compute the maximum fODF in the ventricles. The ventricules are\nestimated from an MD and FA threshold.\n\nThis allows to clip the noise of fODF using an absolute thresold.\n\nFormerly: scil_compute_fodf_max_in_ventricles.py\n\npositional arguments:\n fODFs Path of the fODF volume in spherical harmonics (SH).\n FA Path to the FA volume.\n MD Path to the mean diffusivity (MD) volume.\n\noptions:\n -h, --help show this help message and exit\n --fa_threshold FA_THRESHOLD\n Maximal threshold of FA (voxels under that threshold are considered \n for evaluation. [0.1]).\n --md_threshold MD_THRESHOLD\n Minimal threshold of MD in mm2/s (voxels above that threshold are \n considered for evaluation. [0.003]).\n --max_value_output file\n Output path for the text file containing the value. If not set the \n file will not be saved.\n --mask_output file Output path for the ventricule mask. If not set, the mask \n will not be saved.\n --small_dims If set, takes the full range of data to search the max fodf amplitude \n in ventricles. Useful when the data has small dimensions.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Dell'Acqua, Flavio, et al. \"Can spherical deconvolution provide more\n information than fiber orientations? Hindrance modulated orientational\n anisotropy, a true-tract specific index to characterize white matter\n diffusion.\" Human brain mapping 34.10 (2013): 2464-2483.\n", - "synonyms": [ - [ - "human", - "human" - ], - [ - "white", - "white" - ], - [ - "diffusion", - "diffusion" - ], - [ - "tract", - "tracts", - "tract" - ], - [ - "orientation", - "orientations" - ], - [ - "Data", - "data", - "data" - ], - [ - "large", - "small" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "true", - "true" - ], - [ - "voxel", - "voxels" - ], - [ - "matter", - "matter" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "considered", - "considered" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_fodf_memsmt", - "docstring": "Script to compute multi-encoding multi-shell multi-tissue (memsmt)\nConstrained Spherical Deconvolution ODFs.\n\nIn order to operate, the script only needs the data from one type of b-tensor\nencoding. However, giving only a spherical one will not produce good fODFs, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable.\n\nAll of `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\n>>> scil_fodf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_fodf.py", - "help": "usage: scil_fodf_memsmt.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals\n IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS\n [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5}\n [{0,1,-0.5,0.5} ...] [--sh_order int] [--mask MASK]\n [--tolerance tol] [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--not_all] [--wm_out_fODF file]\n [--gm_out_fODF file] [--csf_out_fODF file]\n [--vf file] [--vf_rgb file]\n in_wm_frf in_gm_frf in_csf_frf\n\nScript to compute multi-encoding multi-shell multi-tissue (memsmt)\nConstrained Spherical Deconvolution ODFs.\n\nIn order to operate, the script only needs the data from one type of b-tensor\nencoding. However, giving only a spherical one will not produce good fODFs, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable.\n\nAll of `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the\nsame number of arguments. Be sure to keep the same order of encodings\nthroughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT).\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\n>>> scil_fodf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_fodf.py\n\npositional arguments:\n in_wm_frf Text file of WM response function.\n in_gm_frf Text file of GM response function.\n in_csf_frf Text file of CSF response function.\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n Path to the input diffusion volume for each b-tensor encoding type.\n --in_bvals IN_BVALS [IN_BVALS ...]\n Path to the bval file, in FSL format, for each b-tensor encoding type.\n --in_bvecs IN_BVECS [IN_BVECS ...]\n Path to the bvec file, in FSL format, for each b-tensor encoding type.\n --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...]\n Value of b_delta for each b-tensor encoding type, in the same order as dwi, bval and bvec inputs.\n --sh_order int SH order used for the CSD. (Default: 8)\n --mask MASK Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --not_all If set, only saves the files specified using the file flags. (Default: False)\n\nFile flags:\n --wm_out_fODF file Output filename for the WM fODF coefficients.\n --gm_out_fODF file Output filename for the GM fODF coefficients.\n --csf_out_fODF file Output filename for the CSF fODF coefficients.\n --vf file Output filename for the volume fractions map.\n --vf_rgb file Output filename for the volume fractions map in rgb.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "variance", - "variance" - ], - [ - "maps", - "map" - ], - [ - "supported", - "supported" - ], - [ - "work", - "work" - ], - [ - "image", - "image" - ], - [ - "high", - "high" - ], - [ - "diffusion", - "diffusion" - ], - [ - "Data", - "data", - "data" - ], - [ - "processes", - "processes" - ], - [ - "function", - "functions", - "function" - ], - [ - "based", - "based" - ], - [ - "shape", - "shape" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "specific", - "specific" - ], - [ - "level", - "level" - ], - [ - "false", - "false" - ], - [ - "analysis", - "analysis" - ] - ], - "keywords": [] - }, - { - "name": "scil_fodf_metrics", - "docstring": "Script to compute the maximum Apparent Fiber Density (AFD), the fiber ODFs\norientations, values and indices (peaks, peak_values, peak_indices), the Number\nof Fiber Orientations (NuFO) maps from fiber ODFs and the RGB map.\n\nAFD_max map is the maximal fODF amplitude for each voxel.\n\nNuFO is the the number of maxima of the fODF with an ABSOLUTE amplitude above\nthe threshold set using --at, AND an amplitude above the RELATIVE threshold\nset using --rt.\n\nThe --at argument should be set to a value which is 1.5 times the maximal\nvalue of the fODF in the ventricules. This can be obtained with the\nscil_fodf_max_in_ventricles.py script.\n\nIf the --abs_peaks_and_values argument is set, the peaks are all normalized\nand the peak_values are equal to the actual fODF amplitude of the peaks. By\ndefault, the script max-normalizes the peak_values for each voxel and\nmultiplies the peaks by peak_values.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Raffelt et al. NeuroImage 2012] and [Dell'Acqua et al HBM 2013] for the\ndefinitions.\n\nFormerly: scil_compute_fodf_metrics.py", - "help": "usage: scil_fodf_metrics.py [-h] [--sphere string] [--mask] [--at A_THRESHOLD]\n [--rt R_THRESHOLD] [--abs_peaks_and_values]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [--processes NBR] [-f]\n [--not_all] [--afd_max file] [--afd_total file]\n [--afd_sum file] [--nufo file] [--rgb file]\n [--peaks file] [--peak_values file]\n [--peak_indices file]\n in_fODF\n\nScript to compute the maximum Apparent Fiber Density (AFD), the fiber ODFs\norientations, values and indices (peaks, peak_values, peak_indices), the Number\nof Fiber Orientations (NuFO) maps from fiber ODFs and the RGB map.\n\nAFD_max map is the maximal fODF amplitude for each voxel.\n\nNuFO is the the number of maxima of the fODF with an ABSOLUTE amplitude above\nthe threshold set using --at, AND an amplitude above the RELATIVE threshold\nset using --rt.\n\nThe --at argument should be set to a value which is 1.5 times the maximal\nvalue of the fODF in the ventricules. This can be obtained with the\nscil_fodf_max_in_ventricles.py script.\n\nIf the --abs_peaks_and_values argument is set, the peaks are all normalized\nand the peak_values are equal to the actual fODF amplitude of the peaks. By\ndefault, the script max-normalizes the peak_values for each voxel and\nmultiplies the peaks by peak_values.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Raffelt et al. NeuroImage 2012] and [Dell'Acqua et al HBM 2013] for the\ndefinitions.\n\nFormerly: scil_compute_fodf_metrics.py\n\npositional arguments:\n in_fODF Path of the fODF volume in spherical harmonics (SH).\n\noptions:\n -h, --help show this help message and exit\n --sphere string Discrete sphere to use in the processing [repulsion724].\n --mask Path to a binary mask. Only the data inside the mask\n will beused for computations and reconstruction [None].\n --at A_THRESHOLD Absolute threshold on fODF amplitude. This value should be set to\n approximately 1.5 to 2 times the maximum fODF amplitude in isotropic voxels\n (ie. ventricles).\n Use scil_fodf_max_in_ventricles.py to find the maximal value.\n See [Dell'Acqua et al HBM 2013] [0.0].\n --rt R_THRESHOLD Relative threshold on fODF amplitude in percentage [0.1].\n --abs_peaks_and_values\n If set, the peak_values are not max-normalized for each voxel, \n but rather they keep the actual fODF amplitude of the peaks. \n Also, the peaks are given as unit directions instead of being proportional to peak_values. [False]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -f Force overwriting of the output files.\n --not_all If set, only saves the files specified using the file flags [False].\n\nFile flags:\n --afd_max file Output filename for the AFD_max map.\n --afd_total file Output filename for the AFD_total map(SH coeff = 0).\n --afd_sum file Output filename for the sum of all peak contributions\n (sum of fODF lobes on the sphere).\n --nufo file Output filename for the NuFO map.\n --rgb file Output filename for the RGB map.\n --peaks file Output filename for the extracted peaks.\n --peak_values file Output filename for the extracted peaks values.\n --peak_indices file Output filename for the generated peaks indices on the sphere.\n", - "synonyms": [ - [ - "maps", - "map" - ], - [ - "maps", - "maps" - ], - [ - "orientation", - "orientations" - ], - [ - "Data", - "data", - "data" - ], - [ - "processes", - "processes" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "voxel", - "voxels" - ], - [ - "lobes", - "lobes" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "voxel", - "voxel" - ], - [ - "processing", - "processing" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ], - [ - "false", - "false" - ] - ], - "keywords": [] - }, - { - "name": "scil_fodf_msmt", - "docstring": "Script to compute Multishell Multi-tissue Constrained Spherical Deconvolution\nODFs.\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical\ndeconvolution for improved analysis of multi-shell diffusion\nMRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_fodf.py", - "help": "usage: scil_fodf_msmt.py [-h] [--sh_order int] [--mask] [--tolerance tol]\n [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [--not_all] [--wm_out_fODF file]\n [--gm_out_fODF file] [--csf_out_fODF file]\n [--vf file] [--vf_rgb file]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_wm_frf in_gm_frf in_csf_frf\n\nScript to compute Multishell Multi-tissue Constrained Spherical Deconvolution\nODFs.\n\nBy default, will output all possible files, using default names.\nSpecific names can be specified using the file flags specified in the\n\"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags\nwill be output.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical\ndeconvolution for improved analysis of multi-shell diffusion\nMRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_fodf.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bval file, in FSL format.\n in_bvec Path of the bvec file, in FSL format.\n in_wm_frf Text file of WM response function.\n in_gm_frf Text file of GM response function.\n in_csf_frf Text file of CSF response function.\n\noptions:\n -h, --help show this help message and exit\n --sh_order int SH order used for the CSD. (Default: 8)\n --mask Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --not_all If set, only saves the files specified using the file flags. (Default: False)\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nFile flags:\n --wm_out_fODF file Output filename for the WM fODF coefficients.\n --gm_out_fODF file Output filename for the GM fODF coefficients.\n --csf_out_fODF file Output filename for the CSF fODF coefficients.\n --vf file Output filename for the volume fractions map.\n --vf_rgb file Output filename for the volume fractions map in rgb.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "maps", - "map" - ], - [ - "high", - "high" - ], - [ - "diffusion", - "diffusion" - ], - [ - "Data", - "data", - "data" - ], - [ - "processes", - "processes" - ], - [ - "function", - "functions", - "function" - ], - [ - "based", - "based" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ], - [ - "false", - "false" - ], - [ - "analysis", - "analysis" - ] - ], - "keywords": [] - }, - { - "name": "scil_fodf_ssst", - "docstring": "Script to compute Constrained Spherical Deconvolution (CSD) fiber ODFs.\n\nSee [Tournier et al. NeuroImage 2007]\n\nFormerly: scil_compute_ssst_fodf.py", - "help": "usage: scil_fodf_ssst.py [-h] [--sh_order int] [--mask] [--b0_threshold thr]\n [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec frf_file out_fODF\n\nScript to compute Constrained Spherical Deconvolution (CSD) fiber ODFs.\n\nSee [Tournier et al. NeuroImage 2007]\n\nFormerly: scil_compute_ssst_fodf.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n frf_file Path of the FRF file\n out_fODF Output path for the fiber ODF coefficients.\n\noptions:\n -h, --help show this help message and exit\n --sh_order int SH order used for the CSD. (Default: 8)\n --mask Path to a binary mask. Only the data inside the mask will be used \n for computations and reconstruction.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "high", - "high" - ], - [ - "diffusion", - "diffusion" - ], - [ - "Data", - "data", - "data" - ], - [ - "processes", - "processes" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "considered", - "considered" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_fodf_to_bingham", - "docstring": "Script for fitting a Bingham distribution to each fODF lobe, as described\nin [1].\n\nThe Bingham fit is saved, with each Bingham distribution described by 7\ncoefficients (for example, for a maximum number of lobes of 5, the number\nof coefficients is 7 x 5 = 35 -- less than the number of coefficients for\nSH of maximum order 8).\n\nUsing 12 threads, the execution takes approximately 30 minutes for a brain with\n1mm isotropic resolution.\n\nFormerly: scil_fit_bingham_to_fodf.py", - "help": "usage: scil_fodf_to_bingham.py [-h] [--max_lobes MAX_LOBES] [--at AT]\n [--rt RT] [--min_sep_angle MIN_SEP_ANGLE]\n [--max_fit_angle MAX_FIT_ANGLE] [--mask MASK]\n [-v [{DEBUG,INFO,WARNING}]] [--processes NBR]\n [-f]\n in_sh out_bingham\n\nScript for fitting a Bingham distribution to each fODF lobe, as described\nin [1].\n\nThe Bingham fit is saved, with each Bingham distribution described by 7\ncoefficients (for example, for a maximum number of lobes of 5, the number\nof coefficients is 7 x 5 = 35 -- less than the number of coefficients for\nSH of maximum order 8).\n\nUsing 12 threads, the execution takes approximately 30 minutes for a brain with\n1mm isotropic resolution.\n\nFormerly: scil_fit_bingham_to_fodf.py\n\npositional arguments:\n in_sh Input SH image.\n out_bingham Output Bingham functions image.\n\noptions:\n -h, --help show this help message and exit\n --max_lobes MAX_LOBES\n Maximum number of lobes per voxel to extract. [5]\n --at AT Absolute threshold for peaks extraction. [0.0]\n --rt RT Relative threshold for peaks extraction. [0.1]\n --min_sep_angle MIN_SEP_ANGLE\n Minimum separation angle between two peaks. [25.0]\n --max_fit_angle MAX_FIT_ANGLE\n Maximum distance in degrees around a peak direction for fitting the Bingham function. [15.0]\n --mask MASK Optional mask file. Only SH inside the mask are fitted.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -f Force overwriting of the output files.\n\n[1] T. W. Riffert, J. Schreiber, A. Anwander, and T. R. Kn\u00f6sche, \u201cBeyond\n fractional anisotropy: Extraction of bundle-specific structural metrics\n from crossing fiber models,\u201d NeuroImage, vol. 100, pp. 176-191, Oct. 2014,\n doi: 10.1016/j.neuroimage.2014.06.015.\n\n[2] J. Schreiber, T. Riffert, A. Anwander, and T. R. Kn\u00f6sche, \u201cPlausibility\n Tracking: A method to evaluate anatomical connectivity and microstructural\n properties along fiber pathways,\u201d NeuroImage, vol. 90, pp. 163-178, Apr.\n 2014, doi: 10.1016/j.neuroimage.2014.01.002.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "lobe", - "lobe" - ], - [ - "methods", - "method" - ], - [ - "direction", - "direction" - ], - [ - "examine", - "evaluate" - ], - [ - "connectivity", - "connectivity" - ], - [ - "bundles", - "bundle" - ], - [ - "image", - "image" - ], - [ - "pathway", - "pathways", - "pathways" - ], - [ - "processes", - "processes" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "function", - "functions", - "function" - ], - [ - "lobes", - "lobes" - ], - [ - "structural", - "structural" - ], - [ - "anatomical", - "anatomy", - "anatomical" - ], - [ - "voxel", - "voxel" - ], - [ - "tracking", - "tracking" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ], - [ - "brain", - "brain" - ], - [ - "function", - "functions", - "functions" - ] - ], - "keywords": [] - }, - { - "name": "scil_freewater_maps", - "docstring": "Compute Free Water maps [1] using AMICO.\nThis script supports both single and multi-shell data.\n\nFormerly: scil_compute_freewater.py", - "help": "usage: scil_freewater_maps.py [-h] [--mask MASK] [--out_dir OUT_DIR]\n [--b_thr B_THR] [--para_diff PARA_DIFF]\n [--iso_diff ISO_DIFF]\n [--perp_diff_min PERP_DIFF_MIN]\n [--perp_diff_max PERP_DIFF_MAX]\n [--lambda1 LAMBDA1] [--lambda2 LAMBDA2]\n [--save_kernels DIRECTORY | --load_kernels DIRECTORY]\n [--compute_only] [--mouse] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec\n\nCompute Free Water maps [1] using AMICO.\nThis script supports both single and multi-shell data.\n\nFormerly: scil_compute_freewater.py\n\npositional arguments:\n in_dwi DWI file.\n in_bval b-values filename, in FSL format (.bval).\n in_bvec b-vectors filename, in FSL format (.bvec).\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Brain mask filename.\n --out_dir OUT_DIR Output directory for the Free Water results. [results]\n --b_thr B_THR Limit value to consider that a b-value is on an\n existing shell. Above this limit, the b-value is\n placed on a new shell. This includes b0s values.\n --mouse If set, use mouse fitting profile.\n --processes NBR Number of sub-processes to start. Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n\nModel options:\n --para_diff PARA_DIFF\n Axial diffusivity (AD) in the CC. [0.0015]\n --iso_diff ISO_DIFF Mean diffusivity (MD) in ventricles. [0.003]\n --perp_diff_min PERP_DIFF_MIN\n Radial diffusivity (RD) minimum. [0.0001]\n --perp_diff_max PERP_DIFF_MAX\n Radial diffusivity (RD) maximum. [0.0007]\n --lambda1 LAMBDA1 First regularization parameter. [0.0]\n --lambda2 LAMBDA2 Second regularization parameter. [0.25]\n\nKernels options:\n --save_kernels DIRECTORY\n Output directory for the COMMIT kernels.\n --load_kernels DIRECTORY\n Input directory where the COMMIT kernels are located.\n --compute_only Compute kernels only, --save_kernels must be used.\n\nReference:\n [1] Pasternak 0, Sochen N, Gur Y, Intrator N, Assaf Y.\n Free water elimination and mapping from diffusion mri.\n Magn Reson Med. 62 (3) (2009) 717-730.\n", - "synonyms": [ - [ - "axial", - "axial" - ], - [ - "diffusion", - "diffusion" - ], - [ - "maps", - "maps" - ], - [ - "parameter", - "parameter" - ], - [ - "Data", - "data", - "data" - ], - [ - "processes", - "processes" - ], - [ - "level", - "level" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_freewater_priors", - "docstring": "Synonym for scil_NODDI_priors.py", - "help": "usage: scil_freewater_priors.py [-h]\n [--fa_min_single_fiber FA_MIN_SINGLE_FIBER]\n [--fa_max_ventricles FA_MAX_VENTRICLES]\n [--md_min_ventricles MD_MIN_VENTRICLES]\n [--roi_radius ROI_RADIUS]\n [--roi_center pos pos pos]\n [--out_txt_1fiber_para FILE]\n [--out_txt_1fiber_perp FILE]\n [--out_mask_1fiber FILE]\n [--out_txt_ventricles FILE]\n [--out_mask_ventricles FILE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_FA in_AD in_RD in_MD\n\nCompute the axial (para_diff), radial (perp_diff), and mean (iso_diff)\ndiffusivity priors for NODDI.\n\nFormerly: scil_compute_NODDI_priors.py\n\npositional arguments:\n in_FA Path to the FA volume.\n in_AD Path to the axial diffusivity (AD) volume.\n in_RD Path to the radial diffusivity (RD) volume.\n in_MD Path to the mean diffusivity (MD) volume.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics options:\n --fa_min_single_fiber FA_MIN_SINGLE_FIBER\n Minimal threshold of FA (voxels above that threshold are considered in \n the single fiber mask). [0.7]\n --fa_max_ventricles FA_MAX_VENTRICLES\n Maximal threshold of FA (voxels under that threshold are considered in \n the ventricles). [0.1]\n --md_min_ventricles MD_MIN_VENTRICLES\n Minimal threshold of MD in mm2/s (voxels above that threshold are considered \n for in the ventricles). [0.003]\n\nRegions options:\n --roi_radius ROI_RADIUS\n Radius of the region used to estimate the priors. The roi will be a cube spanning \n from ROI_CENTER in each direction. [20]\n --roi_center pos pos pos\n Center of the roi of size roi_radius used to estimate the priors; a 3-value coordinate. \n If not set, uses the center of the 3D volume.\n\nOutputs:\n --out_txt_1fiber_para FILE\n Output path for the text file containing the single fiber average value of AD.\n If not set, the file will not be saved.\n --out_txt_1fiber_perp FILE\n Output path for the text file containing the single fiber average value of RD.\n If not set, the file will not be saved.\n --out_mask_1fiber FILE\n Output path for single fiber mask. If not set, the mask will not be saved.\n --out_txt_ventricles FILE\n Output path for the text file containing the ventricles average value of MD.\n If not set, the file will not be saved.\n --out_mask_ventricles FILE\n Output path for the ventricule mask.\n If not set, the mask will not be saved.\n\nReference:\n [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC.\n NODDI: practical in vivo neurite orientation dispersion and density\n imaging of the human brain. NeuroImage. 2012 Jul 16;61:1000-16.\n", - "synonyms": [ - [ - "region", - "regions", - "region" - ], - [ - "human", - "human" - ], - [ - "axial", - "axial" - ], - [ - "direction", - "direction" - ], - [ - "orientation", - "orientation" - ], - [ - "region", - "regions", - "regions" - ], - [ - "imaging", - "imaging" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "voxel", - "voxels" - ], - [ - "vivo", - "vivo" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "average", - "average" - ], - [ - "considered", - "considered" - ], - [ - "level", - "level" - ], - [ - "brain", - "brain" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_frf_mean", - "docstring": "Compute the mean Fiber Response Function from a set of individually\ncomputed Response Functions.\n\nThe FRF files are obtained from scil_frf_ssst.py, scil_frf_msmt.py in the\ncase of multi-shell data or scil_frf_memsmt.py in the case of multi-encoding\nmulti-shell data.\n\nFormerly: scil_compute_mean_frf.py", - "help": "usage: scil_frf_mean.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n list [list ...] file\n\nCompute the mean Fiber Response Function from a set of individually\ncomputed Response Functions.\n\nThe FRF files are obtained from scil_frf_ssst.py, scil_frf_msmt.py in the\ncase of multi-shell data or scil_frf_memsmt.py in the case of multi-encoding\nmulti-shell data.\n\nFormerly: scil_compute_mean_frf.py\n\npositional arguments:\n list List of FRF filepaths.\n file Path of the output mean FRF file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "Data", - "data", - "data" - ], - [ - "level", - "level" - ], - [ - "function", - "functions", - "function" - ], - [ - "function", - "functions", - "functions" - ] - ], - "keywords": [] - }, - { - "name": "scil_frf_memsmt", - "docstring": "Script to estimate response functions for multi-encoding multi-shell\nmulti-tissue (memsmt) constrained spherical deconvolution. In order to operate,\nthe script only needs the data from one type of b-tensor encoding. However,\ngiving only a spherical one will not produce good fiber response functions, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable. Moreover, all of `--in_dwis`, `--in_bvals`, `--in_bvecs` and\n`--in_bdeltas` must have the same number of arguments. Be sure to keep the\nsame order of encodings throughout all these inputs and to set `--in_bdeltas`\naccordingly (IMPORTANT).\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n\nIn the wm, we compute the response function in each voxels where\nthe FA is superior at threshold_fa_wm.\n\nIn the gm (or csf), we compute the response function in each voxels where\nthe FA is below at threshold_fa_gm (or threshold_fa_csf) and where\nthe MD is below threshold_md_gm (or threshold_md_csf).\n\n>>> scil_frf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_frf.py", - "help": "usage: scil_frf_memsmt.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals\n IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS\n [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5}\n [{0,1,-0.5,0.5} ...] [--mask MASK]\n [--mask_wm MASK_WM] [--mask_gm MASK_GM]\n [--mask_csf MASK_CSF] [--fa_thr_wm FA_THR_WM]\n [--fa_thr_gm FA_THR_GM] [--fa_thr_csf FA_THR_CSF]\n [--md_thr_gm MD_THR_GM] [--md_thr_csf MD_THR_CSF]\n [--min_nvox MIN_NVOX] [--tolerance tol]\n [--skip_b0_check] [--dti_bval_limit DTI_BVAL_LIMIT]\n [--roi_radii ROI_RADII [ROI_RADII ...]]\n [--roi_center tuple(3) tuple(3) tuple(3)]\n [--wm_frf_mask file] [--gm_frf_mask file]\n [--csf_frf_mask file] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n out_wm_frf out_gm_frf out_csf_frf\n\nScript to estimate response functions for multi-encoding multi-shell\nmulti-tissue (memsmt) constrained spherical deconvolution. In order to operate,\nthe script only needs the data from one type of b-tensor encoding. However,\ngiving only a spherical one will not produce good fiber response functions, as\nit only probes spherical shapes. As for planar encoding, it should technically\nwork alone, but seems to be very sensitive to noise and is yet to be properly\ndocumented. We thus suggest to always use at least the linear encoding, which\nwill be equivalent to standard multi-shell multi-tissue if used alone, in\ncombinaison with other encodings. Note that custom encodings are not yet\nsupported, so that only the linear tensor encoding (LTE, b_delta = 1), the\nplanar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding\n(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are\navailable. Moreover, all of `--in_dwis`, `--in_bvals`, `--in_bvecs` and\n`--in_bdeltas` must have the same number of arguments. Be sure to keep the\nsame order of encodings throughout all these inputs and to set `--in_bdeltas`\naccordingly (IMPORTANT).\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n\nIn the wm, we compute the response function in each voxels where\nthe FA is superior at threshold_fa_wm.\n\nIn the gm (or csf), we compute the response function in each voxels where\nthe FA is below at threshold_fa_gm (or threshold_fa_csf) and where\nthe MD is below threshold_md_gm (or threshold_md_csf).\n\n>>> scil_frf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz\n PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs\n LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz\n\nBased on P. Karan et al., Bridging the gap between constrained spherical\ndeconvolution and diffusional variance decomposition via tensor-valued\ndiffusion MRI. Medical Image Analysis (2022)\n\nFormerly: scil_compute_memsmt_frf.py\n\npositional arguments:\n out_wm_frf Path to the output WM frf file, in .txt format.\n out_gm_frf Path to the output GM frf file, in .txt format.\n out_csf_frf Path to the output CSF frf file, in .txt format.\n\noptions:\n -h, --help show this help message and exit\n --in_dwis IN_DWIS [IN_DWIS ...]\n Path to the input diffusion volume for each b-tensor encoding type.\n --in_bvals IN_BVALS [IN_BVALS ...]\n Path to the bval file, in FSL format, for each b-tensor encoding type.\n --in_bvecs IN_BVECS [IN_BVECS ...]\n Path to the bvec file, in FSL format, for each b-tensor encoding type.\n --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...]\n Value of b_delta for each b-tensor encoding type, in the same order as \n dwi, bval and bvec inputs.\n --mask MASK Path to a binary mask. Only the data inside the mask will be used for\n computations and reconstruction. Useful if no tissue masks are available.\n --mask_wm MASK_WM Path to the input WM mask file, used to improve the final WM frf mask.\n --mask_gm MASK_GM Path to the input GM mask file, used to improve the final GM frf mask.\n --mask_csf MASK_CSF Path to the input CSF mask file, used to improve the final CSF frf mask.\n --fa_thr_wm FA_THR_WM\n If supplied, use this threshold to select single WM fiber voxels from \n the FA inside the WM mask defined by mask_wm. \n Each voxel above this threshold will be selected. [0.7]\n --fa_thr_gm FA_THR_GM\n If supplied, use this threshold to select GM voxels from the FA inside \n the GM mask defined by mask_gm. \n Each voxel below this threshold will be selected. [0.2]\n --fa_thr_csf FA_THR_CSF\n If supplied, use this threshold to select CSF voxels from the FA inside \n the CSF mask defined by mask_csf. \n Each voxel below this threshold will be selected. [0.1]\n --md_thr_gm MD_THR_GM\n If supplied, use this threshold to select GM voxels from the MD inside \n the GM mask defined by mask_gm. \n Each voxel below this threshold will be selected. [0.0007]\n --md_thr_csf MD_THR_CSF\n If supplied, use this threshold to select CSF voxels from the MD inside \n the CSF mask defined by mask_csf. \n Each voxel below this threshold will be selected. [0.003]\n --min_nvox MIN_NVOX Minimal number of voxels needed for each tissue masks in order to \n proceed to frf estimation. [100]\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --tolerance). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n Use with care, and only if you understand your data.\n --dti_bval_limit DTI_BVAL_LIMIT\n The highest b-value taken for the DTI model. [1200]\n --roi_radii ROI_RADII [ROI_RADII ...]\n If supplied, use those radii to select a cuboid roi to estimate the \n response functions. The roi will be a cuboid spanning from the middle of \n the volume in each direction with the different radii. The type is either \n an int (e.g. --roi_radii 10) or an array-like (3,) (e.g. --roi_radii 20 30 10). [[20]]\n --roi_center tuple(3) tuple(3) tuple(3)\n If supplied, use this center to span the cuboid roi using roi_radii. \n [center of the 3D volume] (e.g. --roi_center 66 79 79)\n --wm_frf_mask file Path to the output WM frf mask file, the voxels used to compute the WM frf.\n --gm_frf_mask file Path to the output GM frf mask file, the voxels used to compute the GM frf.\n --csf_frf_mask file Path to the output CSF frf mask file, the voxels used to compute the CSF frf.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "variance", - "variance" - ], - [ - "superior", - "superior" - ], - [ - "direction", - "direction" - ], - [ - "white", - "white" - ], - [ - "supported", - "supported" - ], - [ - "work", - "work" - ], - [ - "image", - "image" - ], - [ - "high", - "high" - ], - [ - "diffusion", - "diffusion" - ], - [ - "middle", - "middle" - ], - [ - "Data", - "data", - "data" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "function", - "functions", - "function" - ], - [ - "voxel", - "voxels" - ], - [ - "based", - "based" - ], - [ - "shape", - "shape" - ], - [ - "matter", - "matter" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "analysis", - "analysis" - ], - [ - "highest", - "highest" - ], - [ - "function", - "functions", - "functions" - ], - [ - "defined", - "defined" - ] - ], - "keywords": [] - }, - { - "name": "scil_frf_msmt", - "docstring": "Compute response functions for multi-shell multi-tissue (MSMT) constrained\nspherical deconvolution from DWI data.\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n - In the wm, we compute the response function in each voxel where the FA is\n superior at threshold_fa_wm.\n - In the gm (or csf), we compute the response function in each voxel where\n the FA is below at threshold_fa_gm (or threshold_fa_csf) and where the MD\n is below threshold_md_gm (or threshold_md_csf).\n\nWe output one response function file for each tissue, containing the response\nfunction for each b-value (arranged by lines). These are saved as the diagonal\nof the axis-symmetric diffusion tensor (3 e-values) and a mean b0 value.\nFor example, a typical wm_frf is [15e-4, 4e-4, 4e-4, 700], where the tensor\ne-values are (15,4,4)x10^-4 mm^2/s and the mean b0 is 700.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical deconvolution\nfor improved analysis of multi-shell diffusion MRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_frf.py", - "help": "usage: scil_frf_msmt.py [-h] [--mask MASK] [--mask_wm MASK_WM]\n [--mask_gm MASK_GM] [--mask_csf MASK_CSF]\n [--fa_thr_wm FA_THR_WM] [--fa_thr_gm FA_THR_GM]\n [--fa_thr_csf FA_THR_CSF] [--md_thr_gm MD_THR_GM]\n [--md_thr_csf MD_THR_CSF] [--min_nvox MIN_NVOX]\n [--tolerance TOLERANCE] [--skip_b0_check]\n [--dti_bval_limit DTI_BVAL_LIMIT]\n [--roi_radii ROI_RADII [ROI_RADII ...]]\n [--roi_center tuple(3) tuple(3) tuple(3)]\n [--wm_frf_mask file] [--gm_frf_mask file]\n [--csf_frf_mask file] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec out_wm_frf out_gm_frf\n out_csf_frf\n\nCompute response functions for multi-shell multi-tissue (MSMT) constrained\nspherical deconvolution from DWI data.\n\nThe script computes a response function for white-matter (wm),\ngray-matter (gm), csf and the mean b=0.\n - In the wm, we compute the response function in each voxel where the FA is\n superior at threshold_fa_wm.\n - In the gm (or csf), we compute the response function in each voxel where\n the FA is below at threshold_fa_gm (or threshold_fa_csf) and where the MD\n is below threshold_md_gm (or threshold_md_csf).\n\nWe output one response function file for each tissue, containing the response\nfunction for each b-value (arranged by lines). These are saved as the diagonal\nof the axis-symmetric diffusion tensor (3 e-values) and a mean b0 value.\nFor example, a typical wm_frf is [15e-4, 4e-4, 4e-4, 700], where the tensor\ne-values are (15,4,4)x10^-4 mm^2/s and the mean b0 is 700.\n\nBased on B. Jeurissen et al., Multi-tissue constrained spherical deconvolution\nfor improved analysis of multi-shell diffusion MRI data. Neuroimage (2014)\n\nFormerly: scil_compute_msmt_frf.py\n\npositional arguments:\n in_dwi Path to the input diffusion volume.\n in_bval Path to the bval file, in FSL format.\n in_bvec Path to the bvec file, in FSL format.\n out_wm_frf Path to the output WM frf file, in .txt format.\n out_gm_frf Path to the output GM frf file, in .txt format.\n out_csf_frf Path to the output CSF frf file, in .txt format.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to a binary mask. Only the data inside the mask\n will be used for computations and reconstruction.\n Useful if no tissue masks are available.\n --mask_wm MASK_WM Path to the input WM mask file, used to improve the\n final WM frf mask.\n --mask_gm MASK_GM Path to the input GM mask file, used to improve the\n final GM frf mask.\n --mask_csf MASK_CSF Path to the input CSF mask file, used to improve the\n final CSF frf mask.\n --fa_thr_wm FA_THR_WM\n If supplied, use this threshold to select single WM\n fiber voxels from the FA inside the WM mask defined by\n mask_wm. Each voxel above this threshold will be\n selected. [0.7]\n --fa_thr_gm FA_THR_GM\n If supplied, use this threshold to select GM voxels\n from the FA inside the GM mask defined by mask_gm.\n Each voxel below this threshold will be selected.\n [0.2]\n --fa_thr_csf FA_THR_CSF\n If supplied, use this threshold to select CSF voxels\n from the FA inside the CSF mask defined by mask_csf.\n Each voxel below this threshold will be selected.\n [0.1]\n --md_thr_gm MD_THR_GM\n If supplied, use this threshold to select GM voxels\n from the MD inside the GM mask defined by mask_gm.\n Each voxel below this threshold will be selected.\n [0.0007]\n --md_thr_csf MD_THR_CSF\n If supplied, use this threshold to select CSF voxels\n from the MD inside the CSF mask defined by mask_csf.\n Each voxel below this threshold will be selected.\n [0.003]\n --min_nvox MIN_NVOX Minimal number of voxels needed for each tissue masks\n in order to proceed to frf estimation. [100]\n --tolerance TOLERANCE\n The tolerated gap between the b-values to extract and\n the current b-value. [20]\n --skip_b0_check By default, we supervise that at least one b0 exists\n in your data (i.e. b-values below the default\n --tolerance). Use this option to allow continuing even\n if the minimum b-value is suspiciously high. Use with\n care, and only if you understand your data.\n --dti_bval_limit DTI_BVAL_LIMIT\n The highest b-value taken for the DTI model. [1200]\n --roi_radii ROI_RADII [ROI_RADII ...]\n If supplied, use those radii to select a cuboid roi to\n estimate the response functions. The roi will be a\n cuboid spanning from the middle of the volume in each\n direction with the different radii. The type is either\n an int (e.g. --roi_radii 10) or an array-like (3,)\n (e.g. --roi_radii 20 30 10). [[20]]\n --roi_center tuple(3) tuple(3) tuple(3)\n If supplied, use this center to span the cuboid roi\n using roi_radii. [center of the 3D volume] (e.g.\n --roi_center 66 79 79)\n --wm_frf_mask file Path to the output WM frf mask file, the voxels used\n to compute the WM frf.\n --gm_frf_mask file Path to the output GM frf mask file, the voxels used\n to compute the GM frf.\n --csf_frf_mask file Path to the output CSF frf mask file, the voxels used\n to compute the CSF frf.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "superior", - "superior" - ], - [ - "direction", - "direction" - ], - [ - "white", - "white" - ], - [ - "high", - "high" - ], - [ - "diffusion", - "diffusion" - ], - [ - "middle", - "middle" - ], - [ - "Data", - "data", - "data" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "function", - "functions", - "function" - ], - [ - "voxel", - "voxels" - ], - [ - "based", - "based" - ], - [ - "matter", - "matter" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "analysis", - "analysis" - ], - [ - "highest", - "highest" - ], - [ - "function", - "functions", - "functions" - ], - [ - "defined", - "defined" - ] - ], - "keywords": [] - }, - { - "name": "scil_frf_set_diffusivities", - "docstring": "Replace the fiber response function in the FRF file.\nUse this script when you want to use a fixed response function\nand keep the mean b0.\n\nThe FRF file is obtained from scil_frf_ssst.py or scil_frf_msmt.py in the case\nof multi-shell data.\n\nFormerly: scil_set_response_function.py", - "help": "usage: scil_frf_set_diffusivities.py [-h] [--no_factor]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n input new_frf output\n\nReplace the fiber response function in the FRF file.\nUse this script when you want to use a fixed response function\nand keep the mean b0.\n\nThe FRF file is obtained from scil_frf_ssst.py or scil_frf_msmt.py in the case\nof multi-shell data.\n\nFormerly: scil_set_response_function.py\n\npositional arguments:\n input Path of the FRF file.\n new_frf New response function given as a tuple. We will replace the \n response function in frf_file with this fiber response \n function x 10**-4 (e.g. 15,4,4). \n If multi-shell, write the first shell,then the second shell, \n and the third, etc. (e.g. 15,4,4,13,5,5,12,5,5).\n output Path of the new FRF file.\n\noptions:\n -h, --help show this help message and exit\n --no_factor If supplied, the fiber response function is\n evaluated without the x 10**-4 factor. [False].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "Data", - "data", - "data" - ], - [ - "function", - "functions", - "function" - ], - [ - "level", - "level" - ], - [ - "false", - "false" - ], - [ - "examined", - "evaluated" - ] - ], - "keywords": [] - }, - { - "name": "scil_frf_ssst", - "docstring": "Compute a single Fiber Response Function from a DWI.\n\nA DTI fit is made, and voxels containing a single fiber population are\nfound using a threshold on the FA.\n\nFormerly: scil_compute_ssst_frf.py", - "help": "usage: scil_frf_ssst.py [-h] [--mask MASK] [--mask_wm MASK_WM]\n [--fa_thresh FA_THRESH]\n [--min_fa_thresh MIN_FA_THRESH] [--min_nvox MIN_NVOX]\n [--roi_radii ROI_RADII [ROI_RADII ...]]\n [--roi_center tuple(3) tuple(3) tuple(3)]\n [--b0_threshold thr] [--skip_b0_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec frf_file\n\nCompute a single Fiber Response Function from a DWI.\n\nA DTI fit is made, and voxels containing a single fiber population are\nfound using a threshold on the FA.\n\nFormerly: scil_compute_ssst_frf.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n frf_file Path to the output FRF file, in .txt format, saved by Numpy.\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to a binary mask. Only the data inside the mask will be used \n for computations and reconstruction. Useful if no white matter mask \n is available.\n --mask_wm MASK_WM Path to a binary white matter mask. Only the data inside this mask \n and above the threshold defined by --fa_thresh will be used to estimate the \n fiber response function.\n --fa_thresh FA_THRESH\n If supplied, use this threshold as the initial threshold to select \n single fiber voxels. [0.7]\n --min_fa_thresh MIN_FA_THRESH\n If supplied, this is the minimal value that will be tried when looking \n for single fiber voxels. [0.5]\n --min_nvox MIN_NVOX Minimal number of voxels needing to be identified as single fiber voxels \n in the automatic estimation. [300]\n --roi_radii ROI_RADII [ROI_RADII ...]\n If supplied, use those radii to select a cuboid roi to estimate the \n response functions. The roi will be a cuboid spanning from the middle of \n the volume in each direction with the different radii. The type is either \n an int (e.g. --roi_radii 10) or an array-like (3,) (e.g. --roi_radii 20 30 10). [[20]]\n --roi_center tuple(3) tuple(3) tuple(3)\n If supplied, use this center to span the roi of size roi_radius. [center of the 3D volume]\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences: [1] Tournier et al. NeuroImage 2007\n", - "synonyms": [ - [ - "population", - "population" - ], - [ - "direction", - "direction" - ], - [ - "white", - "white" - ], - [ - "high", - "high" - ], - [ - "diffusion", - "diffusion" - ], - [ - "middle", - "middle" - ], - [ - "Data", - "data", - "data" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "function", - "functions", - "function" - ], - [ - "voxel", - "voxels" - ], - [ - "matter", - "matter" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "considered", - "considered" - ], - [ - "level", - "level" - ], - [ - "function", - "functions", - "functions" - ], - [ - "defined", - "defined" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_get_version", - "docstring": "Give you information about your current scilpy installation.\nThis is useful for non-developers to give you the information\nneeded to reproduce your results, or to help debugging.\n\nIf you are experiencing a bug, please run this script and\nsend the output to the scilpy developers.", - "help": "usage: scil_get_version.py [-h] [--show_dependencies]\n [-v [{DEBUG,INFO,WARNING}]]\n\nGive you information about your current scilpy installation.\nThis is useful for non-developers to give you the information\nneeded to reproduce your results, or to help debugging.\n\nIf you are experiencing a bug, please run this script and\nsend the output to the scilpy developers.\n\noptions:\n -h, --help show this help message and exit\n --show_dependencies Show the dependencies of scilpy.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_gradients_apply_transform", - "docstring": "Transform bvecs using an affine/rigid transformation.\n\nFormerly: scil_apply_transform_to_bvecs.py.", - "help": "usage: scil_gradients_apply_transform.py [-h] [--inverse]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bvecs in_transfo out_bvecs\n\nTransform bvecs using an affine/rigid transformation.\n\nFormerly: scil_apply_transform_to_bvecs.py.\n\npositional arguments:\n in_bvecs Path of the bvec file, in FSL format\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_bvecs Output filename of the transformed bvecs.\n\noptions:\n -h, --help show this help message and exit\n --inverse Apply the inverse transformation.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "applied", - "apply" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_gradients_convert", - "docstring": "Script to convert gradient tables between FSL and MRtrix formats.\n\nFormerly: scil_convert_gradients_mrtrix_to_fsl.py or\nscil_convert_gradients_fsl_to_mrtrix.py", - "help": "usage: scil_gradients_convert.py [-h] (--input_fsl | --input_mrtrix) [-f]\n [-v [{DEBUG,INFO,WARNING}]]\n GRADIENT_FILE(S) [GRADIENT_FILE(S) ...]\n output\n\nScript to convert gradient tables between FSL and MRtrix formats.\n\nFormerly: scil_convert_gradients_mrtrix_to_fsl.py or\nscil_convert_gradients_fsl_to_mrtrix.py\n\npositional arguments:\n GRADIENT_FILE(S) Path(s) to the gradient file(s). Either FSL (.bval, .bvec) or MRtrix (.b).\n output Basename of output without extension. Extension(s) will be added automatically (.b for MRtrix, .bval/.bvec for FSL.\n\noptions:\n -h, --help show this help message and exit\n --input_fsl FSL format.\n --input_mrtrix MRtrix format.\n -f Force overwriting of the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_gradients_generate_sampling", - "docstring": "Generate multi-shell gradient sampling with various processing options. Helps\naccelerate gradients, optimize duty cycle and avoid artefacts.\n\nMulti-shell gradient sampling is generated as in [1]. The bvecs are then\nflipped to maximize spread for eddy current correction, b0s are interleaved at\nequal spacing and the non-b0 samples are finally shuffled to minimize the total\ndiffusion gradient amplitude over a few TR.\n\nFormerly: scil_generate_gradient_sampling.py", - "help": "usage: scil_gradients_generate_sampling.py [-h] [--eddy] [--duty]\n [--no_b0_start NO_B0_START | --b0_every B0_EVERY]\n [--b0_end] [--b0_value B0_VALUE]\n [--b0_philips]\n (--bvals bvals [bvals ...] | --b_lin_max B_LIN_MAX | --q_lin_max Q_LIN_MAX)\n (--fsl | --mrtrix)\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n nb_samples_per_shell\n [nb_samples_per_shell ...]\n out_basename\n\nGenerate multi-shell gradient sampling with various processing options. Helps\naccelerate gradients, optimize duty cycle and avoid artefacts.\n\nMulti-shell gradient sampling is generated as in [1]. The bvecs are then\nflipped to maximize spread for eddy current correction, b0s are interleaved at\nequal spacing and the non-b0 samples are finally shuffled to minimize the total\ndiffusion gradient amplitude over a few TR.\n\nFormerly: scil_generate_gradient_sampling.py\n\npositional arguments:\n nb_samples_per_shell Number of samples on each non b0 shell. \n If multishell, provide a number per shell.\n out_basename Gradient sampling output basename (don't include extension).\n Please add options --fsl and/or --mrtrix below.\n\noptions:\n -h, --help show this help message and exit\n --eddy If set, we apply eddy optimization.\n B-vectors are flipped to be well spread without symmetry.\n --duty If set, we apply duty cycle optimization. \n B-vectors are shuffled to reduce consecutive colinearity in the samples. [False]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nb0 acquisitions:\n Default if you add no option is to have a b0 at the start.\n\n --no_b0_start NO_B0_START\n If set, do not add a b0 at the beginning. \n --b0_every B0_EVERY Interleave a b0 every n=b0_every values. Starts after the first b0 \n (cannot be used with --no_b0_start). Must be an integer >= 1.\n --b0_end If set, adds a b0 as last sample.\n --b0_value B0_VALUE b-value of the b0s. [0.0]\n --b0_philips If set, replaces values of b0s bvecs by existing bvecs for Philips handling.\n\nNon-b0 acquisitions:\n --bvals bvals [bvals ...]\n bval of each non-b0 shell.\n --b_lin_max B_LIN_MAX\n b-max for linear bval distribution in *b*.\n --q_lin_max Q_LIN_MAX\n b-max for linear bval distribution in *q*; \n the square root of b-values will be linearly distributed..\n\nSave as:\n --fsl Save in FSL format (.bvec/.bval).\n --mrtrix Save in MRtrix format (.b).\n\nReferences: [1] Emmanuel Caruyer, Christophe Lenglet, Guillermo Sapiro,\nRachid Deriche. Design of multishell gradient sampling with uniform coverage\nin diffusion MRI. Magnetic Resonance in Medicine, Wiley, 2013, 69 (6),\npp. 1534-1540. \n \n", - "synonyms": [ - [ - "variety", - "various" - ], - [ - "diffusion", - "diffusion" - ], - [ - "applied", - "apply" - ], - [ - "processing", - "processing" - ], - [ - "level", - "level" - ], - [ - "total", - "total" - ], - [ - "false", - "false" - ] - ], - "keywords": [] - }, - { - "name": "scil_gradients_modify_axes", - "docstring": "Flip (ex, x --> -x) or swap (ex, x <-> y) chosen axes of the gradient sampling\nmatrix. Result will be saved in the same format as input gradient sampling\nfile.\n\nFormerly: scil_flip_gradients.py or scil_swap_gradient_axis.py", - "help": "usage: scil_gradients_modify_axes.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_gradient_sampling_file\n out_gradient_sampling_file\n {1,2,3,-1,-2,-3} {1,2,3,-1,-2,-3}\n {1,2,3,-1,-2,-3}\n\nFlip (ex, x --> -x) or swap (ex, x <-> y) chosen axes of the gradient sampling\nmatrix. Result will be saved in the same format as input gradient sampling\nfile.\n\nFormerly: scil_flip_gradients.py or scil_swap_gradient_axis.py\n\npositional arguments:\n in_gradient_sampling_file\n Path to gradient sampling file. (.bvec or .b)\n out_gradient_sampling_file\n Where to save the flipped gradient sampling file.Extension (.bvec or .b) must be the same as in_gradient_sampling_file\n {1,2,3,-1,-2,-3} The final order of the axes, compared to original order: x=1 y=2 z=3.\n Ex: to only flip y: 1 -2 3.\n Ex: to only swap x and y: 2 1 3.\n Ex: to first flip x, then permute all three axes: 3 -1 2.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "result", - "result" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_gradients_round_bvals", - "docstring": "Select b-values on specific b-value shells.\n\nWith the --tolerance argument, this is useful for sampling schemes where\nb-values of a shell are not all identical. Adjust the tolerance to vary the\naccepted interval around the targetted b-value.\n\nFor example, a b-value of 2000 and a tolerance of 20 will select all b-values\nbetween [1980, 2020] and round them to the value of 2000.\n\n>> scil_gradients_round_bvals.py bvals 0 1000 2000 newbvals --tolerance 20\n\nFormerly: scil_resample_bvals.py", - "help": "usage: scil_gradients_round_bvals.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bval shells [shells ...] out_bval\n tolerance\n\nSelect b-values on specific b-value shells.\n\nWith the --tolerance argument, this is useful for sampling schemes where\nb-values of a shell are not all identical. Adjust the tolerance to vary the\naccepted interval around the targetted b-value.\n\nFor example, a b-value of 2000 and a tolerance of 20 will select all b-values\nbetween [1980, 2020] and round them to the value of 2000.\n\n>> scil_gradients_round_bvals.py bvals 0 1000 2000 newbvals --tolerance 20\n\nFormerly: scil_resample_bvals.py\n\npositional arguments:\n in_bval The b-values in FSL format.\n shells The list of expected shells. For example 0 1000 2000.\n All b-values in the b_val file should correspond to one given shell (up to the tolerance).\n out_bval The name of the output b-values.\n tolerance The tolerated gap between the b-values to extract and the \n actual b-values. Expecting an integer value. Comparison is \n strict: a b-value of 1010 with a tolerance of 10 is NOT \n included in shell 1000. Suggestion: 20.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "level", - "level" - ], - [ - "specific", - "specific" - ] - ], - "keywords": [] - }, - { - "name": "scil_gradients_validate_correct", - "docstring": "Detect sign flips and/or axes swaps in the gradients table from a fiber\ncoherence index [1]. The script takes as input the principal direction(s)\nat each voxel, the b-vectors and the fractional anisotropy map and outputs\na corrected b-vectors file.\n\nA typical pipeline could be:\n>>> scil_dti_metrics.py dwi.nii.gz bval bvec --not_all --fa fa.nii.gz\n --evecs peaks.nii.gz\n>>> scil_gradients_validate_correct.py bvec peaks_v1.nii.gz fa.nii.gz bvec_corr\n\nNote that peaks_v1.nii.gz is the file containing the direction associated\nto the highest eigenvalue at each voxel.\n\nIt is also possible to use a file containing multiple principal directions per\nvoxel, given that they are sorted by decreasing amplitude. In that case, the\nfirst direction (with the highest amplitude) will be chosen for validation.\nOnly 4D data is supported, so the directions must be stored in a single\ndimension. For example, peaks.nii.gz from scil_fodf_metrics.py could be used.\n\nFormerly: scil_validate_and_correct_bvecs.py", - "help": "usage: scil_gradients_validate_correct.py [-h] [--mask MASK]\n [--fa_threshold FA_THRESHOLD]\n [--column_wise]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bvec in_peaks in_FA out_bvec\n\nDetect sign flips and/or axes swaps in the gradients table from a fiber\ncoherence index [1]. The script takes as input the principal direction(s)\nat each voxel, the b-vectors and the fractional anisotropy map and outputs\na corrected b-vectors file.\n\nA typical pipeline could be:\n>>> scil_dti_metrics.py dwi.nii.gz bval bvec --not_all --fa fa.nii.gz\n --evecs peaks.nii.gz\n>>> scil_gradients_validate_correct.py bvec peaks_v1.nii.gz fa.nii.gz bvec_corr\n\nNote that peaks_v1.nii.gz is the file containing the direction associated\nto the highest eigenvalue at each voxel.\n\nIt is also possible to use a file containing multiple principal directions per\nvoxel, given that they are sorted by decreasing amplitude. In that case, the\nfirst direction (with the highest amplitude) will be chosen for validation.\nOnly 4D data is supported, so the directions must be stored in a single\ndimension. For example, peaks.nii.gz from scil_fodf_metrics.py could be used.\n\nFormerly: scil_validate_and_correct_bvecs.py\n\npositional arguments:\n in_bvec Path to bvec file.\n in_peaks Path to peaks file.\n in_FA Path to the fractional anisotropy file.\n out_bvec Path to corrected bvec file (FSL format).\n\noptions:\n -h, --help show this help message and exit\n --mask MASK Path to an optional mask. If set, FA and Peaks will only be used inside the mask.\n --fa_threshold FA_THRESHOLD\n FA threshold. Only voxels with FA higher than fa_threshold will be considered. [0.2]\n --column_wise Specify if input peaks are column-wise (..., 3, N) instead of row-wise (..., N, 3).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Schilling KG, Yeh FC, Nath V, Hansen C, Williams O, Resnick S, Anderson AW,\nLandman BA. A fiber coherence index for quality control of B-table orientation\nin diffusion MRI scans. Magn Reson Imaging. 2019 May;58:82-89.\ndoi: 10.1016/j.mri.2019.01.018.\n", - "synonyms": [ - [ - "maps", - "map" - ], - [ - "principal", - "principal" - ], - [ - "direction", - "direction" - ], - [ - "orientation", - "orientation" - ], - [ - "imaging", - "imaging" - ], - [ - "supported", - "supported" - ], - [ - "diffusion", - "diffusion" - ], - [ - "Data", - "data", - "data" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "voxel", - "voxels" - ], - [ - "considered", - "considered" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "higher", - "higher" - ], - [ - "highest", - "highest" - ], - [ - "validation", - "validation" - ] - ], - "keywords": [] - }, - { - "name": "scil_gradients_validate_correct_eddy", - "docstring": "Validate and correct gradients from eddy outputs\nWith full AP-PA eddy outputs a full bvec bval (2x nb of dirs and bval)\nthat doesnt fit with the output dwi (1x nb of dir)\n\nFormerly: scil_validate_and_correct_eddy_gradients.py", - "help": "usage: scil_gradients_validate_correct_eddy.py [-h]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bvec in_bval nb_dirs\n out_bvec out_bval\n\nValidate and correct gradients from eddy outputs\nWith full AP-PA eddy outputs a full bvec bval (2x nb of dirs and bval)\nthat doesnt fit with the output dwi (1x nb of dir)\n\nFormerly: scil_validate_and_correct_eddy_gradients.py\n\npositional arguments:\n in_bvec In bvec file.\n in_bval In bval file.\n nb_dirs Number of directions per DWI.\n out_bvec Out bvec file.\n out_bval Out bval file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_header_print_info", - "docstring": "Print the raw header from the provided file or only the specified keys.\nSupport trk, nii and mgz files.\n\nFormerly: scil_print_header.py", - "help": "usage: scil_header_print_info.py [-h] [--keys KEYS [KEYS ...]]\n [--print_affine] [-v [{DEBUG,INFO,WARNING}]]\n in_file\n\nPrint the raw header from the provided file or only the specified keys.\nSupport trk, nii and mgz files.\n\nFormerly: scil_print_header.py\n\npositional arguments:\n in_file Input file (trk, nii and mgz).\n\noptions:\n -h, --help show this help message and exit\n --keys KEYS [KEYS ...]\n Print only the specified keys.\n --print_affine Print nibabel's affine.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_header_validate_compatibility", - "docstring": "Will compare all input files against the first one for the compatibility\nof their spatial attributes.\n\nSpatial attributes are: affine, dimensions, voxel sizes and voxel order.\n\nFormerly: scil_verify_space_attributes_compatibility.py", - "help": "usage: scil_header_validate_compatibility.py [-h] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n in_files [in_files ...]\n\nWill compare all input files against the first one for the compatibility\nof their spatial attributes.\n\nSpatial attributes are: affine, dimensions, voxel sizes and voxel order.\n\nFormerly: scil_verify_space_attributes_compatibility.py\n\npositional arguments:\n in_files List of file to compare (trk, tck and nii/nii.gz).\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "spatial", - "spatial" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_json_convert_entries_to_xlsx", - "docstring": "Convert a final aggregated json file to an Excel spreadsheet.\nTypically used during the tractometry pipeline.\n\nFormerly: scil_convert_json_to_xlsx.py", - "help": "usage: scil_json_convert_entries_to_xlsx.py [-h] [--no_sort_subs]\n [--no_sort_bundles]\n [--ignore_bundles FILE]\n [--stats_over_population]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_json out_xlsx\n\nConvert a final aggregated json file to an Excel spreadsheet.\nTypically used during the tractometry pipeline.\n\nFormerly: scil_convert_json_to_xlsx.py\n\npositional arguments:\n in_json File containing the json stats (.json).\n out_xlsx Output Excel file for the stats (.xlsx).\n\noptions:\n -h, --help show this help message and exit\n --no_sort_subs If set, subjects won't be sorted alphabetically.\n --no_sort_bundles If set, bundles won't be sorted alphabetically.\n --ignore_bundles FILE\n Path to a text file containing a list of bundles to ignore (.txt).\n One bundle, corresponding to keys in the json, per line.\n --stats_over_population\n If set, consider the input stats to be over an entire population and not subject-based.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "population", - "population" - ], - [ - "subject", - "subject" - ], - [ - "bundles", - "bundle" - ], - [ - "subjects", - "subjects" - ], - [ - "based", - "based" - ], - [ - "bundles", - "bundles" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_json_harmonize_entries", - "docstring": "This script will harmonize a json file by adding missing keys and values\nthat differs between the different layers of the dictionary.\n\nThis is used only (for now) in Aggregate_All_* portion of tractometry-flow,\nto counter the problem of missing bundles/metrics/lesions between subjects.\n\nThe most common use case is when specific subjects have missing bundles\nwhich will cause a panda array to be incomplete, and thus crash. Finding out\nthe union of all bundles/metrics/lesions will allow to create a complete json\n(but with NaN for missing values).\n\nFormerly: scil_harmonize_json.py", - "help": "usage: scil_json_harmonize_entries.py [-h] [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file out_file\n\n This script will harmonize a json file by adding missing keys and values\nthat differs between the different layers of the dictionary.\n\nThis is used only (for now) in Aggregate_All_* portion of tractometry-flow,\nto counter the problem of missing bundles/metrics/lesions between subjects.\n\nThe most common use case is when specific subjects have missing bundles\nwhich will cause a panda array to be incomplete, and thus crash. Finding out\nthe union of all bundles/metrics/lesions will allow to create a complete json\n(but with NaN for missing values).\n\nFormerly: scil_harmonize_json.py\n\npositional arguments:\n in_file Input file (json).\n out_file Output file (json).\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "create", - "create" - ], - [ - "subjects", - "subjects" - ], - [ - "bundles", - "bundles" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ] - ], - "keywords": [] - }, - { - "name": "scil_json_merge_entries", - "docstring": "Merge multiple json files into a single one.\nTypically used during the tractometry pipeline.\n\nWithout option it will simply merge all entries at the top level, the top\nlevel must not have any conflicting keys.\n\n--keep_separate option will add a parent for each file, its basename will\nbecome the key.\n\n--no_list option will merge all entries at the top level, if there is a\nconflict the lowest level will be extended with the new values (if list) or\nadded (if value)\n\n--add_parent_key option will add a parent key before merging all entries.\n\n--remove_parent_key option will remove the parent key before merging all\nentries.\n\n--recursive option will merge all entries (scalar) at the lowest layers as a\nlist.\n\n--average_last_layer option will average all entries (scalar) at the lowest\nlayers, but instead of creating a list it creates a mean/std level.\n\nFormerly: scil_merge_json.py", - "help": "usage: scil_json_merge_entries.py [-h] [--keep_separate] [--no_list]\n [--add_parent_key ADD_PARENT_KEY]\n [--remove_parent_key] [--recursive]\n [--average_last_layer] [--indent INDENT]\n [--sort_keys] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_json [in_json ...] out_json\n\n Merge multiple json files into a single one.\nTypically used during the tractometry pipeline.\n\nWithout option it will simply merge all entries at the top level, the top\nlevel must not have any conflicting keys.\n\n--keep_separate option will add a parent for each file, its basename will\nbecome the key.\n\n--no_list option will merge all entries at the top level, if there is a\nconflict the lowest level will be extended with the new values (if list) or\nadded (if value)\n\n--add_parent_key option will add a parent key before merging all entries.\n\n--remove_parent_key option will remove the parent key before merging all\nentries.\n\n--recursive option will merge all entries (scalar) at the lowest layers as a\nlist.\n\n--average_last_layer option will average all entries (scalar) at the lowest\nlayers, but instead of creating a list it creates a mean/std level.\n\nFormerly: scil_merge_json.py\n\npositional arguments:\n in_json List of json files to merge (.json).\n out_json Output json file (.json).\n\noptions:\n -h, --help show this help message and exit\n --keep_separate Merge entries as separate keys based on filename.\n --no_list Merge entries knowing there is no conflict.\n --add_parent_key ADD_PARENT_KEY\n Merge all entries under a single parent.\n --remove_parent_key Merge ignoring parent key (e.g for population).\n --recursive Merge all entries at the lowest layers.\n --average_last_layer Average all entries at the lowest layers.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "population", - "population" - ], - [ - "key", - "key" - ], - [ - "create", - "creating" - ], - [ - "based", - "based" - ], - [ - "highest", - "lowest" - ], - [ - "average", - "average" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_labels_combine", - "docstring": "Script to combine labels from multiple volumes. If there is overlap, it will\noverwrite them based on the input order.\n\n >>> scil_labels_combine.py out_labels.nii.gz\n --volume_ids animal_labels.nii 20\n --volume_ids DKT_labels.nii.gz 44 53\n --out_labels_indices 20 44 53\n >>> scil_labels_combine.py slf_labels.nii.gz\n --volume_ids a2009s_aseg.nii.gz all\n --volume_ids clean/s1__DKT.nii.gz 1028 2028\n\nFormerly: scil_combine_labels.py.", - "help": "usage: scil_labels_combine.py [-h] --volume_ids VOLUME_IDS [VOLUME_IDS ...]\n [--out_labels_ids OUT_LABELS_IDS [OUT_LABELS_IDS ...]\n | --unique | --group_in_m]\n [--background BACKGROUND] [--merge_groups]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n output\n\nScript to combine labels from multiple volumes. If there is overlap, it will\noverwrite them based on the input order.\n\n >>> scil_labels_combine.py out_labels.nii.gz\n --volume_ids animal_labels.nii 20\n --volume_ids DKT_labels.nii.gz 44 53\n --out_labels_indices 20 44 53\n >>> scil_labels_combine.py slf_labels.nii.gz\n --volume_ids a2009s_aseg.nii.gz all\n --volume_ids clean/s1__DKT.nii.gz 1028 2028\n\nFormerly: scil_combine_labels.py.\n\npositional arguments:\n output Combined labels volume output.\n\noptions:\n -h, --help show this help message and exit\n --volume_ids VOLUME_IDS [VOLUME_IDS ...]\n List of volumes directly followed by their labels:\n --volume_ids atlasA id1a id2a \n --volume_ids atlasB id1b id2b ... \n \"all\" can be used instead of id numbers.\n --out_labels_ids OUT_LABELS_IDS [OUT_LABELS_IDS ...]\n List of labels indices for output images.\n --unique If set, output id with unique labels, excluding first background value.\n --group_in_m Add (x * 10 000) to each volume labels, where x is the input volume order number.\n --background BACKGROUND\n Background id, excluded from output [0],\n the value is used as output background value.\n --merge_groups Each group from the --volume_ids option will be merged as a single labels.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "connectome", - "connectome" - ], - [ - "unique", - "unique" - ], - [ - "volume", - "volumes", - "volumes" - ], - [ - "integrating", - "integration" - ], - [ - "based", - "based" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "level", - "level" - ], - [ - "analysis", - "analysis" - ] - ], - "keywords": [] - }, - { - "name": "scil_labels_dilate", - "docstring": "Dilate regions (with or without masking) from a labeled volume:\n- \"label_to_dilate\" are regions that will dilate over\n \"label_to_fill\" if close enough to it (\"distance\").\n- \"label_to_dilate\", by default (None) will be all\n non-\"label_to_fill\" and non-\"label_not_to_dilate\".\n- \"label_not_to_dilate\" will not be changed, but will not dilate.\n- \"mask\" is where the dilation is allowed (constrained)\n in addition to \"background_label\" (logical AND)\n\n>>> scil_labels_dilate.py wmparc_t1.nii.gz wmparc_dil.nii.gz \\\n --label_to_fill 0 5001 5002 \\\n --label_not_to_dilate 4 43 10 11 12 49 50 51\n\nFormerly: scil_dilate_labels.py", - "help": "usage: scil_labels_dilate.py [-h] [--distance DISTANCE]\n [--labels_to_dilate LABELS_TO_DILATE [LABELS_TO_DILATE ...]]\n [--labels_to_fill LABELS_TO_FILL [LABELS_TO_FILL ...]]\n [--labels_not_to_dilate LABELS_NOT_TO_DILATE [LABELS_NOT_TO_DILATE ...]]\n [--mask MASK] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file out_file\n\nDilate regions (with or without masking) from a labeled volume:\n- \"label_to_dilate\" are regions that will dilate over\n \"label_to_fill\" if close enough to it (\"distance\").\n- \"label_to_dilate\", by default (None) will be all\n non-\"label_to_fill\" and non-\"label_not_to_dilate\".\n- \"label_not_to_dilate\" will not be changed, but will not dilate.\n- \"mask\" is where the dilation is allowed (constrained)\n in addition to \"background_label\" (logical AND)\n\n>>> scil_labels_dilate.py wmparc_t1.nii.gz wmparc_dil.nii.gz \\\n --label_to_fill 0 5001 5002 \\\n --label_not_to_dilate 4 43 10 11 12 49 50 51\n\nFormerly: scil_dilate_labels.py\n\npositional arguments:\n in_file Path of the volume (nii or nii.gz).\n out_file Output filename of the dilated labels.\n\noptions:\n -h, --help show this help message and exit\n --distance DISTANCE Maximal distance to dilate (in mm) [2.0].\n --labels_to_dilate LABELS_TO_DILATE [LABELS_TO_DILATE ...]\n Label list to dilate. By default it dilates all \n labels not in labels_to_fill nor in labels_not_to_dilate.\n --labels_to_fill LABELS_TO_FILL [LABELS_TO_FILL ...]\n Background id / labels to be filled [[0]],\n the first one is given as output background value.\n --labels_not_to_dilate LABELS_NOT_TO_DILATE [LABELS_NOT_TO_DILATE ...]\n Label list not to dilate.\n --mask MASK Only dilate values inside the mask.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \n", - "synonyms": [ - [ - "connectome", - "connectome" - ], - [ - "region", - "regions", - "regions" - ], - [ - "processes", - "processes" - ], - [ - "integrating", - "integration" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "level", - "level" - ], - [ - "analysis", - "analysis" - ] - ], - "keywords": [] - }, - { - "name": "scil_labels_remove", - "docstring": "Script to remove specific labels from an atlas volume.\n\n >>> scil_labels_remove.py DKT_labels.nii out_labels.nii.gz -i 5001 5002\n\nFormerly: scil_remove_labels.py", - "help": "usage: scil_labels_remove.py [-h] -i INDICES [INDICES ...]\n [--background BACKGROUND]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_labels out_labels\n\nScript to remove specific labels from an atlas volume.\n\n >>> scil_labels_remove.py DKT_labels.nii out_labels.nii.gz -i 5001 5002\n\nFormerly: scil_remove_labels.py\n\npositional arguments:\n in_labels Input labels volume.\n out_labels Output labels volume.\n\noptions:\n -h, --help show this help message and exit\n -i INDICES [INDICES ...], --indices INDICES [INDICES ...]\n List of labels indices to remove.\n --background BACKGROUND\n Integer used for removed labels [0].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \n", - "synonyms": [ - [ - "connectome", - "connectome" - ], - [ - "atlas", - "atlas" - ], - [ - "integrating", - "integration" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ], - [ - "analysis", - "analysis" - ] - ], - "keywords": [] - }, - { - "name": "scil_labels_split_volume_by_ids", - "docstring": "Split a label image into multiple images where the name of the output images\nis the id of the label (ex. 35.nii.gz, 36.nii.gz, ...). If the --range option\nis not provided, all labels of the image are extracted. The label 0 is\nconsidered as the background and is ignored.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_ids.py", - "help": "usage: scil_labels_split_volume_by_ids.py [-h] [--out_dir OUT_DIR]\n [--out_prefix OUT_PREFIX]\n [-r min max min max]\n [--background BACKGROUND]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_labels\n\nSplit a label image into multiple images where the name of the output images\nis the id of the label (ex. 35.nii.gz, 36.nii.gz, ...). If the --range option\nis not provided, all labels of the image are extracted. The label 0 is\nconsidered as the background and is ignored.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_ids.py\n\npositional arguments:\n in_labels Path of the input label file, in a format supported by Nibabel.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Put all ouptput images in a specific directory.\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n -r min max min max, --range min max min max\n Specifies a subset of labels to split, formatted as min max. Ex: -r 3 5 will give files _3, _4, _5.\n --background BACKGROUND\n Background value. Will not be saved as a separate label. Default: 0.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "supported", - "supported" - ], - [ - "image", - "image" - ], - [ - "considered", - "considered" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ] - ], - "keywords": [] - }, - { - "name": "scil_labels_split_volume_from_lut", - "docstring": "Split a label image into multiple images where the name of the output images\nis taken from a lookup table (ex: left-lateral-occipital.nii.gz,\nright-thalamus.nii.gz, ...). Only the labels included in the lookup table\nare extracted.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_labels.py", - "help": "usage: scil_labels_split_volume_from_lut.py [-h] [--out_dir OUT_DIR]\n [--out_prefix OUT_PREFIX]\n (--scilpy_lut {freesurfer_subcortical,dk_aggregate_structures,freesurfer_desikan_killiany} | --custom_lut CUSTOM_LUT)\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_label\n\nSplit a label image into multiple images where the name of the output images\nis taken from a lookup table (ex: left-lateral-occipital.nii.gz,\nright-thalamus.nii.gz, ...). Only the labels included in the lookup table\nare extracted.\n\nIMPORTANT: your label image must be of an integer type.\n\nFormerly: scil_split_volume_by_labels.py\n\npositional arguments:\n in_label Path of the input label file, in a format supported by Nibabel.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Put all ouptput images in a specific directory.\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n --scilpy_lut {freesurfer_subcortical,dk_aggregate_structures,freesurfer_desikan_killiany}\n Lookup table, in the file scilpy/data/LUT, used to name the output files.\n --custom_lut CUSTOM_LUT\n Path of the lookup table file, used to name the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "lateral", - "lateral" - ], - [ - "supported", - "supported" - ], - [ - "image", - "image" - ], - [ - "gyrus", - "occipital", - "occipital" - ], - [ - "Data", - "data", - "data" - ], - [ - "left", - "left" - ], - [ - "thalamus", - "thalamus" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ] - ], - "keywords": [] - }, - { - "name": "scil_lesions_info", - "docstring": "This script will output informations about lesion load in bundle(s).\nThe input can either be streamlines, binary bundle map, or a bundle voxel\nlabel map.\n\nTo be considered a valid lesion, the lesion volume must be at least\nmin_lesion_vol mm3. This avoid the detection of thousand of single voxel\nlesions if an automatic lesion segmentation tool is used.\n\nFormerly: scil_analyse_lesions_load.py", - "help": "usage: scil_lesions_info.py [-h]\n [--bundle BUNDLE | --bundle_mask BUNDLE_MASK | --bundle_labels_map BUNDLE_LABELS_MAP]\n [--min_lesion_vol MIN_LESION_VOL]\n [--out_lesion_atlas FILE]\n [--out_lesion_stats FILE]\n [--out_streamlines_stats FILE] [--indent INDENT]\n [--sort_keys] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_lesion out_json\n\nThis script will output informations about lesion load in bundle(s).\nThe input can either be streamlines, binary bundle map, or a bundle voxel\nlabel map.\n\nTo be considered a valid lesion, the lesion volume must be at least\nmin_lesion_vol mm3. This avoid the detection of thousand of single voxel\nlesions if an automatic lesion segmentation tool is used.\n\nFormerly: scil_analyse_lesions_load.py\n\npositional arguments:\n in_lesion Binary mask of the lesion(s) (.nii.gz).\n out_json Output file for lesion information (.json).\n\noptions:\n -h, --help show this help message and exit\n --bundle BUNDLE Path of the bundle file (.trk).\n --bundle_mask BUNDLE_MASK\n Path of the bundle binary mask (.nii.gz).\n --bundle_labels_map BUNDLE_LABELS_MAP\n Path of the bundle labels map (.nii.gz).\n --min_lesion_vol MIN_LESION_VOL\n Minimum lesion volume in mm3 [7].\n --out_lesion_atlas FILE\n Save the labelized lesion(s) map (.nii.gz).\n --out_lesion_stats FILE\n Save the lesion-wise volume measure (.json).\n --out_streamlines_stats FILE\n Save the lesion-wise streamline count (.json).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "maps", - "map" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "tool", - "tool" - ], - [ - "valid", - "valid" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "considered", - "considered" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_mti_adjust_B1_header", - "docstring": "Correct B1 map header problem, by applying the scaling (slope) and setting\nthe slope to 1.", - "help": "usage: scil_mti_adjust_B1_header.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_B1_map out_B1_map in_B1_json\n\nCorrect B1 map header problem, by applying the scaling (slope) and setting\nthe slope to 1.\n\npositional arguments:\n in_B1_map Path to input B1 map file.\n out_B1_map Path to output B1 map file.\n in_B1_json Json file of the B1 map.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "level", - "level" - ], - [ - "applied", - "applying" - ], - [ - "maps", - "map" - ] - ], - "keywords": [] - }, - { - "name": "scil_mti_maps_MT", - "docstring": "This script computes two myelin indices maps from the Magnetization Transfer\n(MT) images.\nMagnetization Transfer is a contrast mechanism in tissue resulting from the\nproton exchange between non-aqueous protons (from macromolecules and their\nclosely associated water molecules, the \"bound\" pool) and protons in the free\nwater pool called aqueous protons. This exchange attenuates the MRI signal,\nintroducing microstructure-dependent contrast. MT's effect reflects the\nrelative density of macromolecules such as proteins and lipids, it has been\nassociated with myelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse to saturating the\nprotons on non-aqueous molecules a frequency irradiation. The MT maps are\nobtained using three or four contrasts: a single positive frequency image\nand/or a single negative frequency image, and two unsaturated contrasts as\nreference. These two references should be acquired with predominant PD\n(proton density) and T1 weighting at different excitation flip angles\n(a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all input must have a matching json file with the same filename\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script.\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\n\nThe output consists of a MT_native_maps folder containing the 2 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n The MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n The MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\n\nThe final maps from MT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 1 or 2 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n\n>>> scil_mti_maps_MT.py path/to/output/directory\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_positive path/to/echo*pos.nii.gz --in_negative path/to/echo*neg.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.", - "help": "usage: scil_mti_maps_MT.py [-h] [--out_prefix OUT_PREFIX] [--mask MASK]\n --in_positive IN_POSITIVE [IN_POSITIVE ...]\n --in_negative IN_NEGATIVE [IN_NEGATIVE ...]\n --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...]\n [--in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]]\n [--extended] [--filtering]\n [--in_jsons PD_json T1_json | --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time]\n [--in_B1_map IN_B1_MAP]\n [--B1_correction_method {empiric,model_based}]\n [--B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]]\n [--B1_nominal B1_NOMINAL]\n [--B1_smooth_dims B1_SMOOTH_DIMS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_dir\n\nThis script computes two myelin indices maps from the Magnetization Transfer\n(MT) images.\nMagnetization Transfer is a contrast mechanism in tissue resulting from the\nproton exchange between non-aqueous protons (from macromolecules and their\nclosely associated water molecules, the \"bound\" pool) and protons in the free\nwater pool called aqueous protons. This exchange attenuates the MRI signal,\nintroducing microstructure-dependent contrast. MT's effect reflects the\nrelative density of macromolecules such as proteins and lipids, it has been\nassociated with myelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse to saturating the\nprotons on non-aqueous molecules a frequency irradiation. The MT maps are\nobtained using three or four contrasts: a single positive frequency image\nand/or a single negative frequency image, and two unsaturated contrasts as\nreference. These two references should be acquired with predominant PD\n(proton density) and T1 weighting at different excitation flip angles\n(a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all input must have a matching json file with the same filename\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script.\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\nThe output consists of a MT_native_maps folder containing the 2 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n The MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n The MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\nThe final maps from MT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 1 or 2 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n>>> scil_mti_maps_MT.py path/to/output/directory\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_positive path/to/echo*pos.nii.gz --in_negative path/to/echo*neg.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.\n\npositional arguments:\n out_dir Path to output folder.\n\noptions:\n -h, --help show this help message and exit\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n --mask MASK Path to the binary brain mask.\n --extended If set, outputs the folder Complementary_maps.\n --filtering Gaussian filtering to remove Gibbs ringing. Not recommended.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nContrast maps:\n Path to echoes corresponding to contrast images. All constrasts must have \n the same number of echoes and coregistered between them. Use * to include all echoes. \n The in_mtoff_pd input and at least one of in_positive or in_negative are required.\n\n --in_positive IN_POSITIVE [IN_POSITIVE ...]\n Path to all echoes corresponding to the positive frequency \n saturation pulse.\n --in_negative IN_NEGATIVE [IN_NEGATIVE ...]\n Path to all echoes corresponding to the negative frequency \n saturation pulse.\n --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...]\n Path to all echoes corresponding to the predominant PD \n (proton density) weighting images with no saturation pulse.\n --in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]\n Path to all echoes corresponding to the predominant T1 \n weighting images with no saturation pulse. This one is optional, \n since it is only needed for the calculation of MTsat. \n Acquisition parameters should also be set with this image.\n\nAcquisition parameters:\n Acquisition parameters required for MTsat and ihMTsat calculation. \n These are the excitation flip angles (a_PD, a_T1), in DEGREES, and \n repetition times (TR_PD, TR_T1) of the PD and T1 images, in SECONDS. \n Can be given through json files (--in_jsons) or directly (--in_acq_parameters).\n\n --in_jsons PD_json T1_json\n Path to MToff PD json file and MToff T1 json file, in that order. \n The acquisition parameters will be extracted from these files. \n Must come from a Philips acquisition, otherwise, use in_acq_parameters.\n --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time\n Acquisition parameters in that order: flip angle of mtoff_PD, \n flip angle of mtoff_T1, repetition time of mtoff_PD, \n repetition time of mtoff_T1\n\nB1 correction:\n --in_B1_map IN_B1_MAP\n Path to B1 coregister map to MT contrasts.\n --B1_correction_method {empiric,model_based}\n Choice of B1 correction method. Choose between empiric and model-based. \n Note that the model-based method requires a B1 fitvalues file. \n Both method will only correct the saturation measures. [empiric]\n --B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]\n Path to B1 fitvalues files obtained externally. Should be one .mat \n file per input MT-on image, given in this specific order: \n positive frequency saturation, negative frequency saturation.\n --B1_nominal B1_NOMINAL\n Nominal value for the B1 map. For Philips, should be 100. [100]\n --B1_smooth_dims B1_SMOOTH_DIMS\n Dimension of the squared window used for B1 smoothing, in number of voxels. [5]\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "methods", - "method" - ], - [ - "create", - "create" - ], - [ - "contrast", - "contrast" - ], - [ - "Data", - "data", - "data" - ], - [ - "based", - "based" - ], - [ - "maps", - "map" - ], - [ - "white", - "white" - ], - [ - "image", - "image" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ], - [ - "weighted", - "weighted" - ], - [ - "positive", - "positive" - ], - [ - "signal", - "signal" - ], - [ - "comprised", - "consists" - ], - [ - "brain", - "brain" - ], - [ - "positive", - "negative" - ], - [ - "maps", - "maps" - ], - [ - "voxel", - "voxels" - ], - [ - "matter", - "matter" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "methods" - ], - [ - "parameters", - "parameters" - ] - ], - "keywords": [] - }, - { - "name": "scil_mti_maps_ihMT", - "docstring": "This script computes four myelin indices maps from the Magnetization Transfer\n(MT) and inhomogeneous Magnetization Transfer (ihMT) images. Magnetization\nTransfer is a contrast mechanism in tissue resulting from the proton exchange\nbetween non-aqueous protons (from macromolecules and their closely associated\nwater molecules, the \"bound\" pool) and protons in the free water pool called\naqueous protons. This exchange attenuates the MRI signal, introducing\nmicrostructure-dependent contrast. MT's effect reflects the relative density\nof macromolecules such as proteins and lipids, it has been associated with\nmyelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse prior to image\nacquisition (a prepulse), saturating the protons on non-aqueous molecules,\nby applying different frequency irradiation. The two MT maps and two ihMT maps\nare obtained using six contrasts: single positive frequency image, single\nnegative frequency image, dual alternating positive/negative frequency image,\ndual alternating negative/positive frequency image (saturated images);\nand two unsaturated contrasts as reference. These two references should be\nacquired with predominant PD (proton density) and T1 weighting at different\nexcitation flip angles (a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\n\nThe output consists of a ihMT_native_maps folder containing the 4 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n - ihMTR.nii.gz : inhomogeneous Magnetization Transfer Ratio map\n The (ih)MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n - ihMTsat.nii.gz : inhomogeneous Magnetization Transfer saturation map\n The (ih)MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - altnp.nii.gz : dual alternating negative and positive frequency image\n - altpn.nii.gz : dual alternating positive and negative frequency image\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_d.nii.gz : MTsat computed from the mean dual frequency images\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\n\nThe final maps from ihMT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 3 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation, dual frequency saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n\n>>> scil_mti_maps_ihMT.py path/to/output/directory\n --in_altnp path/to/echo*altnp.nii.gz --in_altpn path/to/echo*altpn.nii.gz\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_negative path/to/echo*neg.nii.gz --in_positive path/to/echo*pos.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.", - "help": "usage: scil_mti_maps_ihMT.py [-h] [--out_prefix OUT_PREFIX] [--mask MASK]\n --in_altnp IN_ALTNP [IN_ALTNP ...] --in_altpn\n IN_ALTPN [IN_ALTPN ...] --in_negative IN_NEGATIVE\n [IN_NEGATIVE ...] --in_positive IN_POSITIVE\n [IN_POSITIVE ...] --in_mtoff_pd IN_MTOFF_PD\n [IN_MTOFF_PD ...]\n [--in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]]\n [--extended] [--filtering]\n [--in_jsons PD_json T1_json | --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time]\n [--in_B1_map IN_B1_MAP]\n [--B1_correction_method {empiric,model_based}]\n [--B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]]\n [--B1_nominal B1_NOMINAL]\n [--B1_smooth_dims B1_SMOOTH_DIMS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n out_dir\n\nThis script computes four myelin indices maps from the Magnetization Transfer\n(MT) and inhomogeneous Magnetization Transfer (ihMT) images. Magnetization\nTransfer is a contrast mechanism in tissue resulting from the proton exchange\nbetween non-aqueous protons (from macromolecules and their closely associated\nwater molecules, the \"bound\" pool) and protons in the free water pool called\naqueous protons. This exchange attenuates the MRI signal, introducing\nmicrostructure-dependent contrast. MT's effect reflects the relative density\nof macromolecules such as proteins and lipids, it has been associated with\nmyelin content in white matter of the brain.\n\nDifferent contrasts can be done with an off-resonance pulse prior to image\nacquisition (a prepulse), saturating the protons on non-aqueous molecules,\nby applying different frequency irradiation. The two MT maps and two ihMT maps\nare obtained using six contrasts: single positive frequency image, single\nnegative frequency image, dual alternating positive/negative frequency image,\ndual alternating negative/positive frequency image (saturated images);\nand two unsaturated contrasts as reference. These two references should be\nacquired with predominant PD (proton density) and T1 weighting at different\nexcitation flip angles (a_PD, a_T1) and repetition times (TR_PD, TR_T1).\n\nInput Data recommendation:\n - it is recommended to use dcm2niix (v1.0.20200331) to convert data\n https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331\n - dcm2niix conversion will create all echo files for each contrast and\n corresponding json files\n - all contrasts must have a same number of echoes and coregistered\n between them before running the script\n - Mask must be coregistered to the echo images\n - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/)\n\nThe output consists of a ihMT_native_maps folder containing the 4 myelin maps:\n - MTR.nii.gz : Magnetization Transfer Ratio map\n - ihMTR.nii.gz : inhomogeneous Magnetization Transfer Ratio map\n The (ih)MT ratio is a measure reflecting the amount of bound protons.\n - MTsat.nii.gz : Magnetization Transfer saturation map\n - ihMTsat.nii.gz : inhomogeneous Magnetization Transfer saturation map\n The (ih)MT saturation is a pseudo-quantitative maps representing\n the signal change between the bound and free water pools.\n\nAs an option, the Complementary_maps folder contains the following images:\n - altnp.nii.gz : dual alternating negative and positive frequency image\n - altpn.nii.gz : dual alternating positive and negative frequency image\n - positive.nii.gz : single positive frequency image\n - negative.nii.gz : single negative frequency image\n - mtoff_PD.nii.gz : unsaturated proton density weighted image\n - mtoff_T1.nii.gz : unsaturated T1 weighted image\n - MTsat_d.nii.gz : MTsat computed from the mean dual frequency images\n - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image\n - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image\n - R1app.nii.gz : Apparent R1 map computed for MTsat.\n - B1_map.nii.gz : B1 map after correction and smoothing (if given).\n\nThe final maps from ihMT_native_maps can be corrected for B1+ field\n inhomogeneity, using either an empiric method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method empiric\n or a model-based method with\n --in_B1_map option, suffix *B1_corrected is added for each map.\n --B1_correction_method model_based\n --B1_fitValues 3 .mat files, obtained externally from\n https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction,\n and given in this order: positive frequency saturation, negative frequency\n saturation, dual frequency saturation.\nFor both methods, the nominal value of the B1 map can be set with\n --B1_nominal value\n\n>>> scil_mti_maps_ihMT.py path/to/output/directory\n --in_altnp path/to/echo*altnp.nii.gz --in_altpn path/to/echo*altpn.nii.gz\n --in_mtoff_pd path/to/echo*mtoff.nii.gz\n --in_negative path/to/echo*neg.nii.gz --in_positive path/to/echo*pos.nii.gz\n --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz\n --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json\n\nBy default, the script uses all the echoes available in the input folder.\nIf you want to use a single echo, replace the * with the specific number of\nthe echo.\n\npositional arguments:\n out_dir Path to output folder.\n\noptions:\n -h, --help show this help message and exit\n --out_prefix OUT_PREFIX\n Prefix to be used for each output image.\n --mask MASK Path to the binary brain mask.\n --extended If set, outputs the folder Complementary_maps.\n --filtering Gaussian filtering to remove Gibbs ringing. Not recommended.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nContrast maps:\n Path to echoes corresponding to contrast images. All constrasts must have \n the same number of echoes and coregistered between them. Use * to include all echoes.\n\n --in_altnp IN_ALTNP [IN_ALTNP ...]\n Path to all echoes corresponding to the alternation of \n negative and positive frequency saturation pulse.\n --in_altpn IN_ALTPN [IN_ALTPN ...]\n Path to all echoes corresponding to the alternation of \n positive and negative frequency saturation pulse.\n --in_negative IN_NEGATIVE [IN_NEGATIVE ...]\n Path to all echoes corresponding to the negative frequency \n saturation pulse.\n --in_positive IN_POSITIVE [IN_POSITIVE ...]\n Path to all echoes corresponding to the positive frequency \n saturation pulse.\n --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...]\n Path to all echoes corresponding to the predominant PD \n (proton density) weighting images with no saturation pulse.\n --in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]\n Path to all echoes corresponding to the predominant T1 \n weighting images with no saturation pulse. This one is optional, \n since it is only needed for the calculation of MTsat and ihMTsat. \n Acquisition parameters should also be set with this image.\n\nAcquisition parameters:\n Acquisition parameters required for MTsat and ihMTsat calculation. \n These are the excitation flip angles (a_PD, a_T1), in DEGREES, and \n repetition times (TR_PD, TR_T1) of the PD and T1 images, in SECONDS. \n Can be given through json files (--in_jsons) or directly (--in_acq_parameters).\n\n --in_jsons PD_json T1_json\n Path to MToff PD json file and MToff T1 json file, in that order. \n The acquisition parameters will be extracted from these files. \n Must come from a Philips acquisition, otherwise, use in_acq_parameters.\n --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time\n Acquisition parameters in that order: flip angle of mtoff_PD, \n flip angle of mtoff_T1, repetition time of mtoff_PD, \n repetition time of mtoff_T1\n\nB1 correction:\n --in_B1_map IN_B1_MAP\n Path to B1 coregister map to MT contrasts.\n --B1_correction_method {empiric,model_based}\n Choice of B1 correction method. Choose between empiric and model-based. \n Note that the model-based method requires a B1 fitvalues file. \n Both method will only correct the saturation measures. [empiric]\n --B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]\n Path to B1 fitvalues files obtained externally. Should be one .mat \n file per input MT-on image, given in this specific order: \n positive frequency saturation, negative frequency saturation.\n --B1_nominal B1_NOMINAL\n Nominal value for the B1 map. For Philips, should be 100. [100]\n --B1_smooth_dims B1_SMOOTH_DIMS\n Dimension of the squared window used for B1 smoothing, in number of voxels. [5]\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "methods", - "method" - ], - [ - "create", - "create" - ], - [ - "contrast", - "contrast" - ], - [ - "Data", - "data", - "data" - ], - [ - "based", - "based" - ], - [ - "maps", - "map" - ], - [ - "white", - "white" - ], - [ - "image", - "image" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ], - [ - "applied", - "applying" - ], - [ - "weighted", - "weighted" - ], - [ - "positive", - "positive" - ], - [ - "signal", - "signal" - ], - [ - "comprised", - "consists" - ], - [ - "brain", - "brain" - ], - [ - "positive", - "negative" - ], - [ - "maps", - "maps" - ], - [ - "voxel", - "voxels" - ], - [ - "matter", - "matter" - ], - [ - "binary", - "binary" - ], - [ - "methods", - "methods" - ], - [ - "parameters", - "parameters" - ] - ], - "keywords": [] - }, - { - "name": "scil_plot_stats_per_point", - "docstring": "Plot all mean/std per point for a subject or population json file from\ntractometry-flow.\nWARNING: For population, the displayed STDs is only showing the variation\nof the means. It does not account intra-subject STDs.\n\nFormerly: scil_plot_mean_std_per_point.py", - "help": "usage: scil_plot_stats_per_point.py [-h] [--stats_over_population]\n [--nb_pts NB_PTS] [--display_means]\n [--fill_color FILL_COLOR | --dict_colors DICT_COLORS]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_json out_dir\n\nPlot all mean/std per point for a subject or population json file from\ntractometry-flow.\nWARNING: For population, the displayed STDs is only showing the variation\nof the means. It does not account intra-subject STDs.\n\nFormerly: scil_plot_mean_std_per_point.py\n\npositional arguments:\n in_json JSON file containing the mean/std per point. For example, can be created using scil_bundle_mean_std.py.\n out_dir Output directory.\n\noptions:\n -h, --help show this help message and exit\n --stats_over_population\n If set, consider the input stats to be over an entire population and not subject-based.\n --nb_pts NB_PTS Force the number of divisions for the bundles.\n Avoid unequal plots across datasets, replace missing data with zeros.\n --display_means Display the subjects means as semi-transparent line.\n Poor results when the number of subject is high.\n --fill_color FILL_COLOR\n Hexadecimal RGB color filling the region between mean +/- std. The hexadecimal RGB color should be formatted as 0xRRGGBB.\n --dict_colors DICT_COLORS\n Dictionnary mapping basename to color.Same convention as --color.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "region", - "regions", - "region" - ], - [ - "population", - "population" - ], - [ - "subject", - "subject" - ], - [ - "high", - "high" - ], - [ - "Data", - "data", - "data" - ], - [ - "subjects", - "subjects" - ], - [ - "based", - "based" - ], - [ - "bundles", - "bundles" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_qball_metrics", - "docstring": "Script to compute the Constant Solid Angle (CSA) or Analytical Q-ball model,\nthe generalized fractional anisotropy (GFA) and the peaks of the model.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Descoteaux et al MRM 2007, Aganj et al MRM 2009] for details and\n[Cote et al MEDIA 2013] for quantitative comparisons.\n\nFormerly: scil_compute_qball_metrics.py", - "help": "usage: scil_qball_metrics.py [-h] [-f] [--sh_order SH_ORDER] [--mask MASK]\n [--use_qball] [--not_all] [--gfa GFA]\n [--peaks PEAKS] [--peak_indices PEAK_INDICES]\n [--sh SH] [--nufo NUFO] [--a_power A_POWER]\n [--b0_threshold thr] [--skip_b0_check]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n in_dwi in_bval in_bvec\n\nScript to compute the Constant Solid Angle (CSA) or Analytical Q-ball model,\nthe generalized fractional anisotropy (GFA) and the peaks of the model.\n\nBy default, will output all possible files, using default names. Specific names\ncan be specified using the file flags specified in the \"File flags\" section.\n\nIf --not_all is set, only the files specified explicitly by the flags will be\noutput.\n\nSee [Descoteaux et al MRM 2007, Aganj et al MRM 2009] for details and\n[Cote et al MEDIA 2013] for quantitative comparisons.\n\nFormerly: scil_compute_qball_metrics.py\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bvals file, in FSL format.\n in_bvec Path of the bvecs file, in FSL format.\n\noptions:\n -h, --help show this help message and exit\n -f Force overwriting of the output files.\n --sh_order SH_ORDER Spherical harmonics order. Must be a positive even number [4].\n --mask MASK Path to a binary mask. Only data inside the mask will be used for computations and reconstruction [None].\n --use_qball If set, qball will be used as the odf reconstruction model instead of CSA.\n --not_all If set, will only save the files specified using the following flags.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n [Default: 20] \n * Note. We would expect to find at least one b-value in the \n range [0, b0_threshold]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nFile flags:\n --gfa GFA Output filename for the generalized fractional anisotropy [gfa.nii.gz].\n --peaks PEAKS Output filename for the extracted peaks [peaks.nii.gz].\n --peak_indices PEAK_INDICES\n Output filename for the generated peaks indices on the sphere [peaks_indices.nii.gz].\n --sh SH Output filename for the spherical harmonics coefficients [sh.nii.gz].\n --nufo NUFO Output filename for the NUFO map [nufo.nii.gz].\n --a_power A_POWER Output filename for the anisotropic power map[anisotropic_power.nii.gz].\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "maps", - "map" - ], - [ - "high", - "high" - ], - [ - "diffusion", - "diffusion" - ], - [ - "positive", - "positive" - ], - [ - "processes", - "processes" - ], - [ - "Data", - "data", - "data" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "considered", - "considered" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ] - ], - "keywords": [] - }, - { - "name": "scil_rgb_convert", - "docstring": "Converts a RGB image encoded as a 4D image to a RGB image encoded as\na 3D image, or vice versa.\n\nTypically, most software tools used in the SCIL (including MI-Brain) use\nthe former, while Trackvis uses the latter.\n\nInput\n-Case 1: 4D image where the 4th dimension contains 3 values.\n-Case 2: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value.\n\nOutput\n-Case 1: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value (uint8).\n-Case 2: 4D image where the 4th dimension contains 3 values (uint8).\n\nFormerly: scil_convert_rgb.py", - "help": "usage: scil_rgb_convert.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image\n\nConverts a RGB image encoded as a 4D image to a RGB image encoded as\na 3D image, or vice versa.\n\nTypically, most software tools used in the SCIL (including MI-Brain) use\nthe former, while Trackvis uses the latter.\n\nInput\n-Case 1: 4D image where the 4th dimension contains 3 values.\n-Case 2: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value.\n\nOutput\n-Case 1: 3D image, in Trackvis format where each voxel contains a\n tuple of 3 elements, one for each value (uint8).\n-Case 2: 4D image where the 4th dimension contains 3 values (uint8).\n\nFormerly: scil_convert_rgb.py\n\npositional arguments:\n in_image name of input RGB image.\n Either 4D or 3D image.\n out_image name of output RGB image.\n Either 3D or 4D image.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "tool", - "tools" - ], - [ - "image", - "image" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_sh_convert", - "docstring": "Convert a SH file between the two of the following basis choices:\n'descoteaux07', 'descoteaux07_legacy', 'tournier07' or 'tournier07_legacy'.\nUsing the sh_basis argument, both the input and the output SH bases must be\ngiven, in the order. For more information about the bases, see\nhttps://docs.dipy.org/stable/theory/sh_basis.html.\n\nFormerly: scil_convert_sh_basis.py", - "help": "usage: scil_sh_convert.py [-h] [--processes NBR] [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_sh out_sh\n {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n\nConvert a SH file between the two of the following basis choices:\n'descoteaux07', 'descoteaux07_legacy', 'tournier07' or 'tournier07_legacy'.\nUsing the sh_basis argument, both the input and the output SH bases must be\ngiven, in the order. For more information about the bases, see\nhttps://docs.dipy.org/stable/theory/sh_basis.html.\n\nFormerly: scil_convert_sh_basis.py\n\npositional arguments:\n in_sh Input SH filename. (nii or nii.gz)\n out_sh Output SH filename. (nii or nii.gz)\n {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Both the input and output bases are required, in that order.\n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy', 'tournier07']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n\noptions:\n -h, --help show this help message and exit\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "processes", - "processes" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_sh_fusion", - "docstring": "Merge a list of Spherical Harmonics files.\n\nThis merges the coefficients of multiple Spherical Harmonics files by taking,\nfor each coefficient, the one with the largest magnitude.\n\nCan be used to merge fODFs computed from different shells into 1, while\nconserving the most relevant information.\n\nBased on [1] and [2].\n\nFormerly: scil_merge_sh.py", - "help": "usage: scil_sh_fusion.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_shs [in_shs ...] out_sh\n\nMerge a list of Spherical Harmonics files.\n\nThis merges the coefficients of multiple Spherical Harmonics files by taking,\nfor each coefficient, the one with the largest magnitude.\n\nCan be used to merge fODFs computed from different shells into 1, while\nconserving the most relevant information.\n\nBased on [1] and [2].\n\nFormerly: scil_merge_sh.py\n\npositional arguments:\n in_shs List of SH files.\n out_sh output SH file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReference:\n[1] Garyfallidis, E., Zucchelli, M., Houde, J-C., Descoteaux, M.\n How to perform best ODF reconstruction from the Human Connectome\n Project sampling scheme?\n ISMRM 2014.\n\n[2] Khachaturian, M. H., Wisco, J. J., & Tuch, D. S. (2007). Boosting the\n sampling efficiency of q\u2010ball imaging using multiple wavevector fusion.\n Magnetic Resonance in Medicine: An Official Journal of the International\n Society for Magnetic Resonance in Medicine, 57(2), 289-296.\n", - "synonyms": [ - [ - "human", - "human" - ], - [ - "connectome", - "connectome" - ], - [ - "imaging", - "imaging" - ], - [ - "project", - "project" - ], - [ - "based", - "based" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_sh_to_aodf", - "docstring": "Script to estimate asymmetric ODFs (aODFs) from a spherical harmonics image.\n\nTwo methods are available:\n * Unified filtering [1] combines four asymmetric filtering methods into\n a single equation and relies on a combination of four gaussian filters.\n * Cosine filtering [2] is a simpler implementation using cosine distance\n for assigning weights to neighbours.\n\nUnified filtering can be accelerated using OpenCL with the option --use_opencl.\nMake sure you have pyopencl installed before using this option. By default, the\nOpenCL program will run on the cpu. To use a gpu instead, also specify the\noption --device gpu.", - "help": "usage: scil_sh_to_aodf.py [-h] [--out_sym OUT_SYM]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--method {unified,cosine}]\n [--sigma_spatial SIGMA_SPATIAL]\n [--sigma_align SIGMA_ALIGN]\n [--sigma_range SIGMA_RANGE]\n [--sigma_angle SIGMA_ANGLE] [--disable_spatial]\n [--disable_align] [--disable_range]\n [--include_center] [--win_hwidth WIN_HWIDTH]\n [--sharpness SHARPNESS] [--device {cpu,gpu}]\n [--use_opencl] [--patch_size PATCH_SIZE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh out_sh\n\nScript to estimate asymmetric ODFs (aODFs) from a spherical harmonics image.\n\nTwo methods are available:\n * Unified filtering [1] combines four asymmetric filtering methods into\n a single equation and relies on a combination of four gaussian filters.\n * Cosine filtering [2] is a simpler implementation using cosine distance\n for assigning weights to neighbours.\n\nUnified filtering can be accelerated using OpenCL with the option --use_opencl.\nMake sure you have pyopencl installed before using this option. By default, the\nOpenCL program will run on the cpu. To use a gpu instead, also specify the\noption --device gpu.\n\npositional arguments:\n in_sh Path to the input file.\n out_sh File name for averaged signal.\n\noptions:\n -h, --help show this help message and exit\n --out_sym OUT_SYM Name of optional symmetric output. [None]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Sphere used for the SH to SF projection. [repulsion200]\n --method {unified,cosine}\n Method for estimating asymmetric ODFs [unified].\n One of:\n 'unified': Unified filtering [1].\n 'cosine' : Cosine-based filtering [2].\n --device {cpu,gpu} Device to use for execution. [cpu]\n --use_opencl Accelerate code using OpenCL (requires pyopencl\n and a working OpenCL implementation).\n --patch_size PATCH_SIZE\n OpenCL patch size. [40]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nShared filter arguments:\n --sigma_spatial SIGMA_SPATIAL\n Standard deviation for spatial distance. [1.0]\n\nUnified filter arguments:\n --sigma_align SIGMA_ALIGN\n Standard deviation for alignment filter. [0.8]\n --sigma_range SIGMA_RANGE\n Standard deviation for range filter\n *relative to SF range of image*. [0.2]\n --sigma_angle SIGMA_ANGLE\n Standard deviation for angular filter\n (disabled by default).\n --disable_spatial Disable spatial filtering.\n --disable_align Disable alignment filtering.\n --disable_range Disable range filtering.\n --include_center Include center voxel in neighourhood.\n --win_hwidth WIN_HWIDTH\n Filtering window half-width. Defaults to 3*sigma_spatial.\n\nCosine filter arguments:\n --sharpness SHARPNESS\n Specify sharpness factor to use for\n weighted average. [1.0]\n\n[1] Poirier and Descoteaux, 2024, \"A Unified Filtering Method for Estimating\n Asymmetric Orientation Distribution Functions\", Neuroimage, vol. 287,\n https://doi.org/10.1016/j.neuroimage.2024.120516\n\n[2] Poirier et al, 2021, \"Investigating the Occurrence of Asymmetric Patterns\n in White Matter Fiber Orientation Distribution Functions\", ISMRM 2021\n (abstract 0865)\n", - "synonyms": [ - [ - "spatial", - "spatial" - ], - [ - "methods", - "method" - ], - [ - "weighted", - "weighted" - ], - [ - "working", - "working" - ], - [ - "white", - "white" - ], - [ - "orientation", - "orientation" - ], - [ - "image", - "image" - ], - [ - "signal", - "signal" - ], - [ - "projection", - "projection" - ], - [ - "based", - "based" - ], - [ - "matter", - "matter" - ], - [ - "methods", - "methods" - ], - [ - "average", - "average" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "function", - "functions", - "functions" - ], - [ - "occurrence", - "occurrence" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_sh_to_rish", - "docstring": "Compute the RISH (Rotationally Invariant Spherical Harmonics) features of an SH\nsignal [1].\n\nEach RISH feature map is the total energy of its associated order.\nMathematically, it is the sum of the squared SH coefficients of the SH order.\n\nThis script supports both symmetrical and asymmetrical SH images as input, of\nany SH order.\n\nEach RISH feature will be saved as a separate file.\n\n[1] Mirzaalian, Hengameh, et al. \"Harmonizing diffusion MRI data across\nmultiple sites and scanners.\" MICCAI 2015.\nhttps://scholar.harvard.edu/files/hengameh/files/miccai2015.pdf\n\nFormerly: scil_compute_rish_from_sh.py", - "help": "usage: scil_sh_to_rish.py [-h] [--full_basis] [--mask MASK]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh out_prefix\n\nCompute the RISH (Rotationally Invariant Spherical Harmonics) features of an SH\nsignal [1].\n\nEach RISH feature map is the total energy of its associated order.\nMathematically, it is the sum of the squared SH coefficients of the SH order.\n\nThis script supports both symmetrical and asymmetrical SH images as input, of\nany SH order.\n\nEach RISH feature will be saved as a separate file.\n\n[1] Mirzaalian, Hengameh, et al. \"Harmonizing diffusion MRI data across\nmultiple sites and scanners.\" MICCAI 2015.\nhttps://scholar.harvard.edu/files/hengameh/files/miccai2015.pdf\n\nFormerly: scil_compute_rish_from_sh.py\n\npositional arguments:\n in_sh Path of the sh image. They can be formatted in any sh basis, but we \n expect it to be a symmetrical one. Else, provide --full_basis.\n out_prefix Prefix of the output RISH files to save. Suffixes will be \n based on the sh orders.\n\noptions:\n -h, --help show this help message and exit\n --full_basis Input SH image uses a full SH basis (asymmetrical).\n --mask MASK Path to a binary mask.\n Only data inside the mask will be used for computation.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "maps", - "map" - ], - [ - "image", - "image" - ], - [ - "diffusion", - "diffusion" - ], - [ - "signal", - "signal" - ], - [ - "Data", - "data", - "data" - ], - [ - "based", - "based" - ], - [ - "binary", - "binary" - ], - [ - "level", - "level" - ], - [ - "total", - "total" - ] - ], - "keywords": [] - }, - { - "name": "scil_sh_to_sf", - "docstring": "Script to sample SF values from a Spherical Harmonics signal. Outputs a Nifti\nfile with the SF values and an associated .bvec file with the chosen\ndirections.\n\nIf converting from SH to a DWI-like SF volume, --in_bval and --in_b0 need\nto be provided to concatenate the b0 image to the SF, and to generate the new\nbvals file. Otherwise, no .bval file will be created.\n\nFormerly: scil_compute_sf_from_sh.py", - "help": "usage: scil_sh_to_sf.py [-h]\n (--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} | --in_bvec IN_BVEC)\n [--dtype {float32,float64}] [--in_bval IN_BVAL]\n [--in_b0 IN_B0] [--out_bval OUT_BVAL]\n [--out_bvec OUT_BVEC] [--b0_scaling]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--full_basis] [--b0_threshold thr] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_sh out_sf\n\nScript to sample SF values from a Spherical Harmonics signal. Outputs a Nifti\nfile with the SF values and an associated .bvec file with the chosen\ndirections.\n\nIf converting from SH to a DWI-like SF volume, --in_bval and --in_b0 need\nto be provided to concatenate the b0 image to the SF, and to generate the new\nbvals file. Otherwise, no .bval file will be created.\n\nFormerly: scil_compute_sf_from_sh.py\n\npositional arguments:\n in_sh Path of the SH volume.\n out_sf Name of the output SF file to save (bvals/bvecs will be automatically named when necessary).\n\noptions:\n -h, --help show this help message and exit\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Sphere used for the SH to SF projection. \n --in_bvec IN_BVEC Directions used for the SH to SF projection. \n If given, --in_bval must also be provided.\n --dtype {float32,float64}\n Datatype to use for SF computation and output array.'[float32]'\n --in_bval IN_BVAL b-value file, in FSL format, used to assign a b-value to the \n output SF and generate a `.bval` file.\n - If used, --out_bval is required.\n - The output bval will contain one b-value per point in the SF \n output (i.e. one per point on the --sphere or one per --in_bvec.)\n - The values of the output bval will all be set to the same b-value:\n the average of your in_bval. (Any b0 found in this file, i.e \n b-values under --b0_threshold, will be removed beforehand.)\n - To add b0s to both the SF volume and the --out_bval file, use --in_b0.\n --in_b0 IN_B0 b0 volume to concatenate to the final SF volume.\n --out_bval OUT_BVAL Optional output bval file.\n --out_bvec OUT_BVEC Optional output bvec file.\n --b0_scaling Scale resulting SF by the b0 image (--in_b0 mustbe given).\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --full_basis If true, use a full basis for the input SH coefficients.\n --b0_threshold thr Threshold under which b-values are considered to be b0s.\n Default if not set is 20.\n This value is used with option --in_bval only: any b0 found in the in_bval will be removed.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "image", - "image" - ], - [ - "signal", - "signal" - ], - [ - "processes", - "processes" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "projection", - "projection" - ], - [ - "true", - "true" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "average", - "average" - ], - [ - "considered", - "considered" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_stats_group_comparison", - "docstring": "Run group comparison statistics on metrics from tractometry\n1) Separate the sample given a particular variable (group_by) into groups\n\n2) Does Shapiro-Wilk test of normality for every sample\nhttps://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test\n\n3) Does Levene or Bartlett (depending on normality) test of variance\nhomogeneity Levene:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm\nBartlett:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm\n\n4) Test the group difference for every measure with the correct test depending\n on the sample (Student, Welch, Mannwhitneyu, ANOVA, Kruskall-Wallis)\nStudent :\nhttps://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test\nWelch :\nhttps://en.wikipedia.org/wiki/Welch%27s_t-test\nMann-Whitney U :\nhttps://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test\nANOVA :\nhttp://www.biostathandbook.com/onewayanova.html\nKruskall-Wallis :\nhttps://en.wikipedia.org/wiki/Kruskal%E2%80%93Wallis_one-way_analysis_of_variance\n\n\n5) If the group difference test is positive and number of group is greater than\n 2, test the group difference two by two.\n\n6) Generate the result for all metrics and bundles\n\nFormerly: scil_group_comparison.py", - "help": "usage: scil_stats_group_comparison.py [-h] [--out_dir OUT_DIR]\n [--out_json OUT_JSON]\n [--bundles BUNDLES [BUNDLES ...]]\n [--metrics METRICS [METRICS ...]]\n [--values VALUES [VALUES ...]]\n [--alpha_error ALPHA_ERROR]\n [--generate_graph] [--indent INDENT]\n [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n IN_JSON IN_PARTICIPANTS GROUP_BY\n\nRun group comparison statistics on metrics from tractometry\n1) Separate the sample given a particular variable (group_by) into groups\n\n2) Does Shapiro-Wilk test of normality for every sample\nhttps://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test\n\n3) Does Levene or Bartlett (depending on normality) test of variance\nhomogeneity Levene:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm\nBartlett:\nhttps://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm\n\n4) Test the group difference for every measure with the correct test depending\n on the sample (Student, Welch, Mannwhitneyu, ANOVA, Kruskall-Wallis)\nStudent :\nhttps://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test\nWelch :\nhttps://en.wikipedia.org/wiki/Welch%27s_t-test\nMann-Whitney U :\nhttps://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test\nANOVA :\nhttp://www.biostathandbook.com/onewayanova.html\nKruskall-Wallis :\nhttps://en.wikipedia.org/wiki/Kruskal%E2%80%93Wallis_one-way_analysis_of_variance\n\n5) If the group difference test is positive and number of group is greater than\n 2, test the group difference two by two.\n\n6) Generate the result for all metrics and bundles\n\nFormerly: scil_group_comparison.py\n\npositional arguments:\n IN_JSON Input JSON file from tractometry nextflow pipeline or equivalent.\n IN_PARTICIPANTS Input tsv participants file.See doc in https://scilpy.readthedocs.io/en/latest/documentation/construct_participants_tsv_file.html.\n GROUP_BY Variable that will be used to compare group together.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Name of the output folder path. [stats]\n --out_json OUT_JSON The name of the result json output file otherwise it will be printed.\n --bundles BUNDLES [BUNDLES ...], -b BUNDLES [BUNDLES ...]\n Bundle(s) in which you want to do stats. [all]\n --metrics METRICS [METRICS ...], -m METRICS [METRICS ...]\n Metric(s) on which you want to do stats. [all]\n --values VALUES [VALUES ...], --va VALUES [VALUES ...]\n Value(s) on which you want to do stats (mean, std). [all]\n --alpha_error ALPHA_ERROR, -a ALPHA_ERROR\n Type 1 error for all the test. [0.05]\n --generate_graph, --gg\n Generate a simple plot of every metric across groups.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "variance", - "variance" - ], - [ - "greater", - "greater" - ], - [ - "participants", - "participants" - ], - [ - "variable", - "variable" - ], - [ - "bundles", - "bundle" - ], - [ - "positive", - "positive" - ], - [ - "result", - "result" - ], - [ - "bundles", - "bundles" - ], - [ - "level", - "level" - ], - [ - "error", - "error" - ], - [ - "difference", - "difference" - ] - ], - "keywords": [] - }, - { - "name": "scil_surface_apply_transform", - "docstring": "Script to apply a transform to a surface (FreeSurfer or VTK supported),\nusing output from ANTs registration tools (i.e. affine.txt, warp.nii.gz).\n\nExample usage from T1 to b0 using ANTs transforms:\n> ConvertTransformFile 3 output0GenericAffine.mat vtk_transfo.txt --hm\n> scil_surface_apply_transform.py lh_white_lps.vtk affine.txt lh_white_b0.vtk\\\n --ants_warp warp.nii.gz\n\nImportant: The input surface needs to be in *T1 world LPS* coordinates\n(aligned over the T1 in MI-Brain).\n\nThe script will use the linear affine first and then the warp image.\nThe resulting surface will be in *b0 world LPS* coordinates\n(aligned over the b0 in MI-Brain).\n\nFormerly: scil_apply_transform_to_surface.py.", - "help": "usage: scil_surface_apply_transform.py [-h] [--ants_warp ANTS_WARP]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface ants_affine out_surface\n\nScript to apply a transform to a surface (FreeSurfer or VTK supported),\nusing output from ANTs registration tools (i.e. affine.txt, warp.nii.gz).\n\nExample usage from T1 to b0 using ANTs transforms:\n> ConvertTransformFile 3 output0GenericAffine.mat vtk_transfo.txt --hm\n> scil_surface_apply_transform.py lh_white_lps.vtk affine.txt lh_white_b0.vtk\\\n --ants_warp warp.nii.gz\n\nImportant: The input surface needs to be in *T1 world LPS* coordinates\n(aligned over the T1 in MI-Brain).\n\nThe script will use the linear affine first and then the warp image.\nThe resulting surface will be in *b0 world LPS* coordinates\n(aligned over the b0 in MI-Brain).\n\nFormerly: scil_apply_transform_to_surface.py.\n\npositional arguments:\n in_surface Input surface (.vtk).\n ants_affine Affine transform from ANTs (.txt or .mat).\n out_surface Output surface (.vtk).\n\noptions:\n -h, --help show this help message and exit\n --ants_warp ANTS_WARP\n Warp image from ANTs (Nifti image).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", - "synonyms": [ - [ - "tool", - "tools" - ], - [ - "supported", - "supported" - ], - [ - "image", - "image" - ], - [ - "applied", - "apply" - ], - [ - "tractography", - "tractography" - ], - [ - "level", - "level" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_surface_convert", - "docstring": "Script to convert surface formats\n\nSupported formats:\n \".vtk\", \".vtp\", \".ply\", \".stl\", \".xml\", \".obj\"\n and FreeSurfer surfaces\n\n> scil_surface_convert.py surf.vtk converted_surf.ply\n\nFormerly: scil_convert_surface.py", - "help": "usage: scil_surface_convert.py [-h] [--xform XFORM] [--to_lps]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface out_surface\n\nScript to convert surface formats\n\nSupported formats:\n \".vtk\", \".vtp\", \".ply\", \".stl\", \".xml\", \".obj\"\n and FreeSurfer surfaces\n\n> scil_surface_convert.py surf.vtk converted_surf.ply\n\nFormerly: scil_convert_surface.py\n\npositional arguments:\n in_surface Input a surface (FreeSurfer or supported by VTK).\n out_surface Output surface (formats supported by VTK).\n\noptions:\n -h, --help show this help message and exit\n --xform XFORM Path of the copy-paste output from mri_info \n Using: mri_info $input >> log.txt, \n The file log.txt would be this parameter\n --to_lps Flip for Surface/MI-Brain LPS\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", - "synonyms": [ - [ - "supported", - "supported" - ], - [ - "parameter", - "parameter" - ], - [ - "tractography", - "tractography" - ], - [ - "level", - "level" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_surface_flip", - "docstring": "Script to flip a given surface (FreeSurfer or VTK supported).\n\nCan flip surface coordinates around a single or multiple axes\nCan also be used to reverse the orientation of the surface normals.\n\nFormerly: scil_flip_surface.py", - "help": "usage: scil_surface_flip.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface out_surface {x,y,z,n} [{x,y,z,n} ...]\n\nScript to flip a given surface (FreeSurfer or VTK supported).\n\nCan flip surface coordinates around a single or multiple axes\nCan also be used to reverse the orientation of the surface normals.\n\nFormerly: scil_flip_surface.py\n\npositional arguments:\n in_surface Input surface (.vtk).\n out_surface Output flipped surface (.vtk).\n {x,y,z,n} The axes you want to flip. eg: to flip the x and y axes use: x y. to reverse the surface normals use: n\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", - "synonyms": [ - [ - "orientation", - "orientation" - ], - [ - "supported", - "supported" - ], - [ - "tractography", - "tractography" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_surface_smooth", - "docstring": "Script to smooth a surface with a Laplacian blur.\n\nFor a standard FreeSurfer white matter mesh a step_size from 0.1 to 10\nis recommended\n\nSmoothing time = step_size * nb_steps\n small amount of smoothing [step_size 1, nb_steps 10]\n moderate amount of smoothing [step_size 10, nb_steps 100]\n large amount of smoothing [step_size 100, nb_steps 1000]\n\nFormerly: scil_smooth_surface.py", - "help": "usage: scil_surface_smooth.py [-h] [-m VTS_MASK] [-n NB_STEPS] [-s STEP_SIZE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_surface out_surface\n\nScript to smooth a surface with a Laplacian blur.\n\nFor a standard FreeSurfer white matter mesh a step_size from 0.1 to 10\nis recommended\n\nSmoothing time = step_size * nb_steps\n small amount of smoothing [step_size 1, nb_steps 10]\n moderate amount of smoothing [step_size 10, nb_steps 100]\n large amount of smoothing [step_size 100, nb_steps 1000]\n\nFormerly: scil_smooth_surface.py\n\npositional arguments:\n in_surface Input surface (.vtk).\n out_surface Output smoothed surface (.vtk).\n\noptions:\n -h, --help show this help message and exit\n -m VTS_MASK, --vts_mask VTS_MASK\n Vertex mask no smoothing where mask equals 0 (.npy).\n -n NB_STEPS, --nb_steps NB_STEPS\n Number of steps for laplacian smooth [2].\n -s STEP_SIZE, --step_size STEP_SIZE\n Laplacian smooth step size [5.0].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018.\n Surface-enhanced tractography (SET). NeuroImage.\n", - "synonyms": [ - [ - "white", - "white" - ], - [ - "large", - "small" - ], - [ - "step", - "step" - ], - [ - "large", - "large" - ], - [ - "tractography", - "tractography" - ], - [ - "matter", - "matter" - ], - [ - "level", - "level" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_tracking_local", - "docstring": "Local streamline HARDI tractography.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nAlgo 'eudx': select the peak from the spherical function (SF) most closely\naligned to the previous direction, and follow an average of it and the previous\ndirection [1].\n\nAlgo 'det': select the orientation corresponding to the maximum of the\nspherical function.\n\nAlgo 'prob': select a direction drawn from the empirical distribution function\ndefined from the SF.\n\nAlgo 'ptt': select the propagation direction using Parallel-Transport\nTractography (PTT) framework, see [2] for more details.\n\nNOTE: eudx can be used with pre-computed peaks from fodf as well as\nevecs_v1.nii.gz from scil_dti_metrics.py (experimental).\n\nNOTE: If tracking with PTT, the step-size should be smaller than usual,\ni.e 0.1-0.2mm or lower. The maximum angle between segments (theta) should\nbe between 10 and 20 degrees.\n\nThe local tracking algorithm can also run on the GPU using the --use_gpu\noption (experimental). By default, GPU tracking behaves the same as\nDIPY. Below is a list of known divergences between the CPU and GPU\nimplementations:\n * Backend: The CPU implementation uses DIPY's LocalTracking and the\n GPU implementation uses an in-house OpenCL implementation.\n * Algo: For the GPU implementation, the only available algorithm is\n Algo 'prob'.\n * SH interpolation: For GPU tracking, SH interpolation can be set to either\n nearest neighbour or trilinear (default). With DIPY, the only available\n method is trilinear.\n * Forward tracking: For GPU tracking, the `--forward_only` flag can be used\n to disable backward tracking. This option isn't available for CPU\n tracking.\n\nAll the input nifti files must be in isotropic resolution.\n\n\nReferences\n----------\n[1]: Garyfallidis, E. (2012). Towards an accurate brain tractography\n[PhD thesis]. University of Cambridge. United Kingdom.\n\n[2]: Aydogan, D. B., & Shi, Y. (2020). Parallel transport tractography.\nIEEE transactions on medical imaging, 40(2), 635-647.\n\nFormerly: scil_compute_local_tracking.py", - "help": "usage: scil_tracking_local.py [-h] [--step STEP_SIZE] [--min_length m]\n [--max_length M] [--theta THETA]\n [--sfthres sf_th]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--npv NPV | --nt NT] [--sh_to_pmf]\n [--algo {det,prob,ptt,eudx}]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--sub_sphere SUB_SPHERE]\n [--probe_length PROBE_LENGTH]\n [--probe_radius PROBE_RADIUS]\n [--probe_quality PROBE_QUALITY]\n [--probe_count PROBE_COUNT]\n [--support_exponent SUPPORT_EXPONENT]\n [--use_gpu] [--sh_interp {trilinear,nearest}]\n [--forward_only] [--batch_size BATCH_SIZE]\n [--compress [COMPRESS_TH]] [-f] [--save_seeds]\n [--seed SEED] [-v [{DEBUG,INFO,WARNING}]]\n in_odf in_seed in_mask out_tractogram\n\nLocal streamline HARDI tractography.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nAlgo 'eudx': select the peak from the spherical function (SF) most closely\naligned to the previous direction, and follow an average of it and the previous\ndirection [1].\n\nAlgo 'det': select the orientation corresponding to the maximum of the\nspherical function.\n\nAlgo 'prob': select a direction drawn from the empirical distribution function\ndefined from the SF.\n\nAlgo 'ptt': select the propagation direction using Parallel-Transport\nTractography (PTT) framework, see [2] for more details.\n\nNOTE: eudx can be used with pre-computed peaks from fodf as well as\nevecs_v1.nii.gz from scil_dti_metrics.py (experimental).\n\nNOTE: If tracking with PTT, the step-size should be smaller than usual,\ni.e 0.1-0.2mm or lower. The maximum angle between segments (theta) should\nbe between 10 and 20 degrees.\n\nThe local tracking algorithm can also run on the GPU using the --use_gpu\noption (experimental). By default, GPU tracking behaves the same as\nDIPY. Below is a list of known divergences between the CPU and GPU\nimplementations:\n * Backend: The CPU implementation uses DIPY's LocalTracking and the\n GPU implementation uses an in-house OpenCL implementation.\n * Algo: For the GPU implementation, the only available algorithm is\n Algo 'prob'.\n * SH interpolation: For GPU tracking, SH interpolation can be set to either\n nearest neighbour or trilinear (default). With DIPY, the only available\n method is trilinear.\n * Forward tracking: For GPU tracking, the `--forward_only` flag can be used\n to disable backward tracking. This option isn't available for CPU\n tracking.\n\nAll the input nifti files must be in isotropic resolution.\n\nReferences\n----------\n[1]: Garyfallidis, E. (2012). Towards an accurate brain tractography\n[PhD thesis]. University of Cambridge. United Kingdom.\n\n[2]: Aydogan, D. B., & Shi, Y. (2020). Parallel transport tractography.\nIEEE transactions on medical imaging, 40(2), 635-647.\n\nFormerly: scil_compute_local_tracking.py\n\npositional arguments:\n in_odf File containing the orientation diffusion function \n as spherical harmonics file (.nii.gz). Ex: ODF or fODF.\n in_seed Seeding mask (.nii.gz).\n in_mask Tracking mask (.nii.gz).\n Tracking will stop outside this mask. The last point of each \n streamline (triggering the stopping criteria) IS added to the streamline.\n out_tractogram Tractogram output file (must be .trk or .tck).\n\noptions:\n -h, --help show this help message and exit\n\nTracking options:\n --step STEP_SIZE Step size in mm. [0.5]\n --min_length m Minimum length of a streamline in mm. [10.0]\n --max_length M Maximum length of a streamline in mm. [300.0]\n --theta THETA Maximum angle between 2 steps. If the angle is too big, streamline is \n stopped and the following point is NOT included.\n [\"eudx\"=60, \"det\"=45, \"prob\"=20, \"ptt\"=20]\n --sfthres sf_th Spherical function relative threshold. [0.1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --sh_to_pmf If set, map sherical harmonics to spherical function (pmf) before \n tracking (faster, requires more memory)\n --algo {det,prob,ptt,eudx}\n Algorithm to use. [prob]\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Dipy sphere; set of possible directions.\n Default: [repulsion724]\n --sub_sphere SUB_SPHERE\n Subdivides each face of the sphere into 4^s new faces. [0]\n\nSeeding options:\n When no option is provided, uses --npv 1.\n\n --npv NPV Number of seeds per voxel.\n --nt NT Total number of seeds to use.\n\nPTT options:\n --probe_length PROBE_LENGTH\n The length of the probes. Smaller value\n yields more dispersed fibers. [1.0]\n --probe_radius PROBE_RADIUS\n The radius of the probe. A large probe_radius\n helps mitigate noise in the pmf but it might\n make it harder to sample thin and intricate\n connections, also the boundary of fiber\n bundles might be eroded. [0]\n --probe_quality PROBE_QUALITY\n The quality of the probe. This parameter sets\n the number of segments to split the cylinder\n along the length of the probe (minimum=2) [3]\n --probe_count PROBE_COUNT\n The number of probes. This parameter sets the\n number of parallel lines used to model the\n cylinder (minimum=1). [1]\n --support_exponent SUPPORT_EXPONENT\n Data support exponent, used for rejection\n sampling. [3]\n\nGPU options:\n --use_gpu Enable GPU tracking (experimental).\n --sh_interp {trilinear,nearest}\n SH image interpolation method. [trilinear]\n --forward_only Perform forward tracking only.\n --batch_size BATCH_SIZE\n Approximate size of GPU batches (number\n of streamlines to track in parallel). [10000]\n\nOutput options:\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.\n A rule of thumb is to set it to 0.1mm for deterministic \n streamlines and to 0.2mm for probabilitic streamlines.[0.1]\n -f Force overwriting of the output files.\n --save_seeds If set, save the seeds used for the tracking \n in the data_per_streamline property.\n Hint: you can then use scil_tractogram_seed_density_map.\n --seed SEED Random number generator seed.\n\nLogging options:\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "seeding", - "seeding" - ], - [ - "methods", - "method" - ], - [ - "direction", - "direction" - ], - [ - "diffusion", - "diffusion" - ], - [ - "Data", - "data", - "data" - ], - [ - "memory", - "memory" - ], - [ - "streamlines", - "streamlines" - ], - [ - "maps", - "map" - ], - [ - "image", - "image" - ], - [ - "divergence", - "divergences" - ], - [ - "step", - "step" - ], - [ - "large", - "large" - ], - [ - "tractography", - "tractography" - ], - [ - "bundles", - "bundles" - ], - [ - "level", - "level" - ], - [ - "algorithm", - "algorithm" - ], - [ - "random", - "random" - ], - [ - "connections", - "connections" - ], - [ - "function", - "functions", - "function" - ], - [ - "average", - "average" - ], - [ - "tracking", - "tracking" - ], - [ - "total", - "total" - ], - [ - "brain", - "brain" - ], - [ - "size", - "size" - ], - [ - "streamline", - "streamline" - ], - [ - "higher", - "lower" - ], - [ - "orientation", - "orientation" - ], - [ - "imaging", - "imaging" - ], - [ - "larger", - "smaller" - ], - [ - "parameter", - "parameter" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "voxel", - "voxel" - ], - [ - "defined", - "defined" - ] - ], - "keywords": [] - }, - { - "name": "scil_tracking_local_dev", - "docstring": "Local streamline HARDI tractography using scilpy-only methods -- no dipy (i.e\nno cython). The goal of this is to have a python-only version that can be\nmodified more easily by our team when testing new algorithms and parameters,\nand that can be used as parent classes in sub-projects of our lab such as in\ndwi_ml.\n\nWARNING. MUCH SLOWER THAN scil_tracking_local.py. We recommand using multi-\nprocessing with option --nb_processes.\n\nSimilar to scil_tracking_local:\n The tracking direction is chosen in the aperture cone defined by the\n previous tracking direction and the angular constraint.\n - Algo 'det': the maxima of the spherical function (SF) the most closely\n aligned to the previous direction.\n - Algo 'prob': a direction drawn from the empirical distribution function\n defined from the SF.\n\nContrary to scil_tracking_local:\n - Algo 'eudx' is not yet available!\n - Input nifti files do not necessarily need to be in isotropic resolution.\n - The script works with asymmetric input ODF.\n - The interpolation for the tracking mask and spherical function can be\n one of 'nearest' or 'trilinear'.\n - Runge-Kutta integration is supported for the step function.\n\nA few notes on Runge-Kutta integration.\n 1. Runge-Kutta integration is used to approximate the next tracking\n direction by estimating directions from future tracking steps. This\n works well for deterministic tracking. However, in the context of\n probabilistic tracking, the next tracking directions cannot be estimated\n in advance, because they are picked randomly from a distribution. It is\n therefore recommanded to keep the rk_order to 1 for probabilistic\n tracking.\n 2. As a rule of thumb, doubling the rk_order will double the computation\n time in the worst case.\n\nReferences: [1] Girard, G., Whittingstall K., Deriche, R., and\n Descoteaux, M. (2014). Towards quantitative connectivity analysis:\n reducing tractography biases. Neuroimage, 98, 266-278.\n\nFormerly: scil_compute_local_tracking_dev.py", - "help": "usage: scil_tracking_local_dev.py [-h] [--step STEP_SIZE] [--min_length m]\n [--max_length M] [--theta THETA]\n [--sfthres sf_th]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--npv NPV | --nt NT] [--algo {det,prob}]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--sub_sphere SUB_SPHERE]\n [--sfthres_init sf_th] [--rk_order K]\n [--max_invalid_nb_points MAX]\n [--forward_only]\n [--sh_interp {nearest,trilinear}]\n [--mask_interp {nearest,trilinear}]\n [--keep_last_out_point]\n [--n_repeats_per_seed N_REPEATS_PER_SEED]\n [--rng_seed RNG_SEED] [--skip SKIP]\n [--processes NBR] [--compress [COMPRESS_TH]]\n [-f] [--save_seeds]\n [-v [{DEBUG,INFO,WARNING}]]\n in_odf in_seed in_mask out_tractogram\n\nLocal streamline HARDI tractography using scilpy-only methods -- no dipy (i.e\nno cython). The goal of this is to have a python-only version that can be\nmodified more easily by our team when testing new algorithms and parameters,\nand that can be used as parent classes in sub-projects of our lab such as in\ndwi_ml.\n\nWARNING. MUCH SLOWER THAN scil_tracking_local.py. We recommand using multi-\nprocessing with option --nb_processes.\n\nSimilar to scil_tracking_local:\n The tracking direction is chosen in the aperture cone defined by the\n previous tracking direction and the angular constraint.\n - Algo 'det': the maxima of the spherical function (SF) the most closely\n aligned to the previous direction.\n - Algo 'prob': a direction drawn from the empirical distribution function\n defined from the SF.\n\nContrary to scil_tracking_local:\n - Algo 'eudx' is not yet available!\n - Input nifti files do not necessarily need to be in isotropic resolution.\n - The script works with asymmetric input ODF.\n - The interpolation for the tracking mask and spherical function can be\n one of 'nearest' or 'trilinear'.\n - Runge-Kutta integration is supported for the step function.\n\nA few notes on Runge-Kutta integration.\n 1. Runge-Kutta integration is used to approximate the next tracking\n direction by estimating directions from future tracking steps. This\n works well for deterministic tracking. However, in the context of\n probabilistic tracking, the next tracking directions cannot be estimated\n in advance, because they are picked randomly from a distribution. It is\n therefore recommanded to keep the rk_order to 1 for probabilistic\n tracking.\n 2. As a rule of thumb, doubling the rk_order will double the computation\n time in the worst case.\n\nReferences: [1] Girard, G., Whittingstall K., Deriche, R., and\n Descoteaux, M. (2014). Towards quantitative connectivity analysis:\n reducing tractography biases. Neuroimage, 98, 266-278.\n\nFormerly: scil_compute_local_tracking_dev.py\n\npositional arguments:\n in_odf File containing the orientation diffusion function \n as spherical harmonics file (.nii.gz). Ex: ODF or fODF.\n in_seed Seeding mask (.nii.gz).\n in_mask Tracking mask (.nii.gz).\n Tracking will stop outside this mask. The last point of each \n streamline (triggering the stopping criteria) IS added to the streamline.\n out_tractogram Tractogram output file (must be .trk or .tck).\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nTracking options:\n --step STEP_SIZE Step size in mm. [0.5]\n --min_length m Minimum length of a streamline in mm. [10.0]\n --max_length M Maximum length of a streamline in mm. [300.0]\n --theta THETA Maximum angle between 2 steps. If the angle is too big, streamline is \n stopped and the following point is NOT included.\n [\"eudx\"=60, \"det\"=45, \"prob\"=20, \"ptt\"=20]\n --sfthres sf_th Spherical function relative threshold. [0.1]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --algo {det,prob} Algorithm to use. [prob]\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Dipy sphere; set of possible directions.\n Default: [repulsion724]\n --sub_sphere SUB_SPHERE\n Subdivides each face of the sphere into 4^s new faces. [0]\n --sfthres_init sf_th Spherical function relative threshold value for the \n initial direction. [0.5]\n --rk_order K The order of the Runge-Kutta integration used for the step function.\n For more information, refer to the note in the script description. [1]\n --max_invalid_nb_points MAX\n Maximum number of steps without valid direction, \n ex: if threshold on ODF or max angles are reached.\n Default: 0, i.e. do not add points following an invalid direction.\n --forward_only If set, tracks in one direction only (forward) given the \n initial seed. The direction is randomly drawn from the ODF.\n --sh_interp {nearest,trilinear}\n Spherical harmonic interpolation: nearest-neighbor \n or trilinear. [trilinear]\n --mask_interp {nearest,trilinear}\n Mask interpolation: nearest-neighbor or trilinear. [nearest]\n --keep_last_out_point\n If set, keep the last point (once out of the tracking mask) of \n the streamline. Default: discard them. This is the default in \n Dipy too. Note that points obtained after an invalid direction \n (ex when angle is too sharp or sh_threshold not reached) are \n never added.\n --n_repeats_per_seed N_REPEATS_PER_SEED\n By default, each seed position is used only once. This option\n allows for tracking from the exact same seed n_repeats_per_seed\n times. [1]\n\nSeeding options:\n When no option is provided, uses --npv 1.\n\n --npv NPV Number of seeds per voxel.\n --nt NT Total number of seeds to use.\n\nRandom seeding options:\n --rng_seed RNG_SEED Initial value for the random number generator. [0]\n --skip SKIP Skip the first N random number. \n Useful if you want to create new streamlines to add to \n a previously created tractogram with a fixed --rng_seed.\n Ex: If tractogram_1 was created with -nt 1,000,000, \n you can create tractogram_2 with \n --skip 1,000,000.\n\nMemory options:\n --processes NBR Number of sub-processes to start. \n Default: [1]\n\nOutput options:\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.\n A rule of thumb is to set it to 0.1mm for deterministic \n streamlines and to 0.2mm for probabilitic streamlines.[0.1]\n -f Force overwriting of the output files.\n --save_seeds If set, save the seeds used for the tracking \n in the data_per_streamline property.\n Hint: you can then use scil_tractogram_seed_density_map.\n", - "synonyms": [ - [ - "seeding", - "seeding" - ], - [ - "order", - "order" - ], - [ - "direction", - "direction" - ], - [ - "connectivity", - "connectivity" - ], - [ - "create", - "create" - ], - [ - "future", - "future" - ], - [ - "diffusion", - "diffusion" - ], - [ - "processes", - "processes" - ], - [ - "memory", - "memory" - ], - [ - "processing", - "processing" - ], - [ - "random", - "randomly" - ], - [ - "streamlines", - "streamlines" - ], - [ - "probabilistic", - "probabilistic" - ], - [ - "step", - "step" - ], - [ - "tractography", - "tractography" - ], - [ - "level", - "level" - ], - [ - "analysis", - "analysis" - ], - [ - "invalid", - "invalid" - ], - [ - "supported", - "supported" - ], - [ - "algorithm", - "algorithm" - ], - [ - "random", - "random" - ], - [ - "function", - "functions", - "function" - ], - [ - "integrating", - "integration" - ], - [ - "algorithm", - "algorithms" - ], - [ - "valid", - "valid" - ], - [ - "tracking", - "tracking" - ], - [ - "total", - "total" - ], - [ - "size", - "size" - ], - [ - "streamline", - "streamline" - ], - [ - "orientation", - "orientation" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "project", - "projects" - ], - [ - "methods", - "methods" - ], - [ - "voxel", - "voxel" - ], - [ - "parameters", - "parameters" - ], - [ - "defined", - "defined" - ] - ], - "keywords": [] - }, - { - "name": "scil_tracking_pft", - "docstring": "Local streamline HARDI tractography including Particle Filtering tracking.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nThe tracking is done inside partial volume estimation maps and uses the\nparticle filtering tractography (PFT) algorithm. See\nscil_tracking_pft_maps.py to generate PFT required maps.\n\nStreamlines longer than min_length and shorter than max_length are kept.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\nDefault parameters as suggested in [1].\n\nAlgo 'det': the maxima of the spherical function (SF) the most closely aligned\nto the previous direction.\nAlgo 'prob': a direction drawn from the empirical distribution function defined\nfrom the SF.\n\nFor streamline compression, a rule of thumb is to set it to 0.1mm for the\ndeterministic algorithm and 0.2mm for probabilitic algorithm.\n\nAll the input nifti files must be in isotropic resolution.\n\nFormerly: scil_compute_pft.py", - "help": "usage: scil_tracking_pft.py [-h] [--algo {det,prob}] [--step STEP_SIZE]\n [--min_length MIN_LENGTH]\n [--max_length MAX_LENGTH] [--theta THETA] [--act]\n [--sfthres SF_THRESHOLD]\n [--sfthres_init SF_THRESHOLD_INIT]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--npv NPV | --nt NT] [--particles PARTICLES]\n [--back BACK_TRACKING]\n [--forward FORWARD_TRACKING] [--all] [--seed SEED]\n [-f] [--save_seeds] [--compress [COMPRESS_TH]]\n [-v [{DEBUG,INFO,WARNING}]]\n in_sh in_seed in_map_include map_exclude_file\n out_tractogram\n\nLocal streamline HARDI tractography including Particle Filtering tracking.\n\nWARNING: This script DOES NOT support asymetric FODF input (aFODF).\n\nThe tracking is done inside partial volume estimation maps and uses the\nparticle filtering tractography (PFT) algorithm. See\nscil_tracking_pft_maps.py to generate PFT required maps.\n\nStreamlines longer than min_length and shorter than max_length are kept.\nThe tracking direction is chosen in the aperture cone defined by the\nprevious tracking direction and the angular constraint.\nDefault parameters as suggested in [1].\n\nAlgo 'det': the maxima of the spherical function (SF) the most closely aligned\nto the previous direction.\nAlgo 'prob': a direction drawn from the empirical distribution function defined\nfrom the SF.\n\nFor streamline compression, a rule of thumb is to set it to 0.1mm for the\ndeterministic algorithm and 0.2mm for probabilitic algorithm.\n\nAll the input nifti files must be in isotropic resolution.\n\nFormerly: scil_compute_pft.py\n\npositional arguments:\n in_sh Spherical harmonic file (.nii.gz).\n in_seed Seeding mask (.nii.gz).\n in_map_include The probability map (.nii.gz) of ending the\n streamline and including it in the output (CMC, PFT [1])\n map_exclude_file The probability map (.nii.gz) of ending the\n streamline and excluding it in the output (CMC, PFT [1]).\n out_tractogram Tractogram output file (must be .trk or .tck).\n\nGeneric options:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nTracking options:\n --algo {det,prob} Algorithm to use (must be \"det\" or \"prob\"). [prob]\n --step STEP_SIZE Step size in mm. [0.2]\n --min_length MIN_LENGTH\n Minimum length of a streamline in mm. [10.0]\n --max_length MAX_LENGTH\n Maximum length of a streamline in mm. [300.0]\n --theta THETA Maximum angle between 2 steps. [\"det\"=45, \"prob\"=20]\n --act If set, uses anatomically-constrained tractography (ACT) \n instead of continuous map criterion (CMC).\n --sfthres SF_THRESHOLD\n Spherical function relative threshold. [0.1]\n --sfthres_init SF_THRESHOLD_INIT\n Spherical function relative threshold value for the \n initial direction. [0.5]\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n\nSeeding options:\n When no option is provided, uses --npv 1.\n\n --npv NPV Number of seeds per voxel.\n --nt NT Total number of seeds to use.\n\nPFT options:\n --particles PARTICLES\n Number of particles to use for PFT. [15]\n --back BACK_TRACKING Length of PFT back tracking (mm). [2.0]\n --forward FORWARD_TRACKING\n Length of PFT forward tracking (mm). [1.0]\n\nOutput options:\n --all If set, keeps \"excluded\" streamlines.\n NOT RECOMMENDED, except for debugging.\n --seed SEED Random number generator seed.\n -f Force overwriting of the output files.\n --save_seeds If set, save the seeds used for the tracking \n in the data_per_streamline property.\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.[0.1]\n\nReferences: [1] Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M. (2014). Towards quantitative connectivity analysis: reducing tractography biases. Neuroimage, 98, 266-278.\n", - "synonyms": [ - [ - "seeding", - "seeding" - ], - [ - "anatomically", - "anatomically" - ], - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "maps", - "map" - ], - [ - "direction", - "direction" - ], - [ - "connectivity", - "connectivity" - ], - [ - "algorithm", - "algorithm" - ], - [ - "maps", - "maps" - ], - [ - "probability", - "probability" - ], - [ - "random", - "random" - ], - [ - "step", - "step" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "function", - "functions", - "function" - ], - [ - "tractography", - "tractography" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "voxel", - "voxel" - ], - [ - "tracking", - "tracking" - ], - [ - "level", - "level" - ], - [ - "total", - "total" - ], - [ - "parameters", - "parameters" - ], - [ - "analysis", - "analysis" - ], - [ - "defined", - "defined" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_tracking_pft_maps", - "docstring": "Compute include and exclude maps, and the seeding interface mask from partial\nvolume estimation (PVE) maps. Maps should have values in [0,1], gm+wm+csf=1 in\nall voxels of the brain, gm+wm+csf=0 elsewhere.\n\nReferences: Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M.\n(2014). Towards quantitative connectivity analysis: reducing tractography\nbiases. Neuroimage.\n\nFormerly: scil_compute_maps_for_particle_filter_tracking.py", - "help": "usage: scil_tracking_pft_maps.py [-h] [--include filename]\n [--exclude filename] [--interface filename]\n [-t THRESHOLD] [-f]\n [-v [{DEBUG,INFO,WARNING}]]\n in_wm in_gm in_csf\n\nCompute include and exclude maps, and the seeding interface mask from partial\nvolume estimation (PVE) maps. Maps should have values in [0,1], gm+wm+csf=1 in\nall voxels of the brain, gm+wm+csf=0 elsewhere.\n\nReferences: Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M.\n(2014). Towards quantitative connectivity analysis: reducing tractography\nbiases. Neuroimage.\n\nFormerly: scil_compute_maps_for_particle_filter_tracking.py\n\npositional arguments:\n in_wm White matter PVE map (nifti). From normal FAST output, has a PVE_2 name suffix.\n in_gm Grey matter PVE map (nifti). From normal FAST output, has a PVE_1 name suffix.\n in_csf Cerebrospinal fluid PVE map (nifti). From normal FAST output, has a PVE_0 name suffix.\n\noptions:\n -h, --help show this help message and exit\n --include filename Output include map (nifti). [map_include.nii.gz]\n --exclude filename Output exclude map (nifti). [map_exclude.nii.gz]\n --interface filename Output interface seeding mask (nifti). [interface.nii.gz]\n -t THRESHOLD Minimum gm and wm PVE values in a voxel to be into the interface. [0.1]\n -f Force overwriting of the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "seeding", - "seeding" - ], - [ - "maps", - "map" - ], - [ - "connectivity", - "connectivity" - ], - [ - "white", - "white" - ], - [ - "maps", - "maps" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "voxel", - "voxels" - ], - [ - "tractography", - "tractography" - ], - [ - "matter", - "matter" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "analysis", - "analysis" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_tracking_pft_maps_edit", - "docstring": "Modify PFT maps to allow PFT tracking in given mask (e.g edema).\n\nFormerly: scil_add_tracking_mask_to_pft_maps.py.", - "help": "usage: scil_tracking_pft_maps_edit.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n map_include map_exclude additional_mask\n map_include_corr map_exclude_corr\n\nModify PFT maps to allow PFT tracking in given mask (e.g edema).\n\nFormerly: scil_add_tracking_mask_to_pft_maps.py.\n\npositional arguments:\n map_include PFT map include.\n map_exclude PFT map exclude.\n additional_mask Allow PFT tracking in this mask.\n map_include_corr Corrected PFT map include output file name.\n map_exclude_corr Corrected PFT map exclude output file name.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "tracking", - "tracking" - ], - [ - "level", - "level" - ], - [ - "maps", - "map" - ], - [ - "maps", - "maps" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_apply_transform", - "docstring": "Transform a tractogram using an affine/rigid transformation and nonlinear\ndeformation (optional).\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nApplying transformation to a tractogram can lead to invalid streamlines (out of\nthe bounding box), and thus three strategies are available:\n1) Do nothing, may crash at saving if invalid streamlines are present.\n [This is the default]\n2) --keep_invalid, save invalid streamlines. Leave it to the user to run\n scil_tractogram_remove_invalid.py if needed.\n3) --remove_invalid, automatically remove invalid streamlines before saving.\n Should not remove more than a few streamlines. Typically, the streamlines\n that are rejected are the ones reaching the limits of the brain, ex, near\n the pons.\n4) --cut_invalid, automatically cut invalid streamlines before saving, i.e. the\n streamlines are kept but the points out of the bounding box are cut.\n\nExample:\nTo apply a transformation from ANTs to a tractogram, if the ANTs command was\nMOVING->REFERENCE...\n1) To apply the original transformation:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --inverse\n --in_deformation 1InverseWarp.nii.gz\n\n2) To apply the inverse transformation, i.e. REFERENCE->MOVING:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --in_deformation 1Warp.nii.gz\n --reverse_operation\n\nFormerly: scil_apply_transform_to_tractogram.py", - "help": "usage: scil_tractogram_apply_transform.py [-h] [--inverse]\n [--in_deformation file]\n [--reverse_operation]\n [--cut_invalid | --remove_invalid | --keep_invalid]\n [--no_empty] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_moving_tractogram in_target_file\n in_transfo out_tractogram\n\nTransform a tractogram using an affine/rigid transformation and nonlinear\ndeformation (optional).\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nApplying transformation to a tractogram can lead to invalid streamlines (out of\nthe bounding box), and thus three strategies are available:\n1) Do nothing, may crash at saving if invalid streamlines are present.\n [This is the default]\n2) --keep_invalid, save invalid streamlines. Leave it to the user to run\n scil_tractogram_remove_invalid.py if needed.\n3) --remove_invalid, automatically remove invalid streamlines before saving.\n Should not remove more than a few streamlines. Typically, the streamlines\n that are rejected are the ones reaching the limits of the brain, ex, near\n the pons.\n4) --cut_invalid, automatically cut invalid streamlines before saving, i.e. the\n streamlines are kept but the points out of the bounding box are cut.\n\nExample:\nTo apply a transformation from ANTs to a tractogram, if the ANTs command was\nMOVING->REFERENCE...\n1) To apply the original transformation:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --inverse\n --in_deformation 1InverseWarp.nii.gz\n\n2) To apply the inverse transformation, i.e. REFERENCE->MOVING:\nscil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE}\n 0GenericAffine.mat ${OUTPUT_NAME}\n --in_deformation 1Warp.nii.gz\n --reverse_operation\n\nFormerly: scil_apply_transform_to_tractogram.py\n\npositional arguments:\n in_moving_tractogram Path of the tractogram to be transformed.\n Bounding box validity will not be checked (could \n contain invalid streamlines).\n in_target_file Path of the reference target file (trk or nii).\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_tractogram Output tractogram filename (transformed data).\n\noptions:\n -h, --help show this help message and exit\n --no_empty Do not write file if there is no streamline.\n You may save an empty file if you use remove_invalid.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nTransformation options:\n --inverse Apply the inverse linear transformation.\n --in_deformation file\n Path to the file containing a deformation field.\n --reverse_operation Apply the transformation in reverse (see doc), warp\n first, then linear.\n\nManagement of invalid streamlines:\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_invalid Remove the streamlines landing out of the bounding box.\n --keep_invalid Keep the streamlines landing out of the bounding box.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "invalid", - "invalid" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "applied", - "apply" - ], - [ - "Data", - "data", - "data" - ], - [ - "level", - "level" - ], - [ - "applied", - "applying" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_apply_transform_to_hdf5", - "docstring": "Transform tractogram(s) contained in the hdf5 output from a connectivity\nscript, using an affine/rigid transformation and nonlinear deformation\n(optional).\n\nSee scil_tractogram_apply_transform.py to apply directly to a tractogram.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nOr use >> scil_tractogram_apply_transform.py --help\n\nFormerly: scil_apply_transform_to_hdf5.py", - "help": "usage: scil_tractogram_apply_transform_to_hdf5.py [-h] [--inverse]\n [--in_deformation file]\n [--reverse_operation]\n [--cut_invalid | --remove_invalid | --keep_invalid]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_hdf5 in_target_file\n in_transfo out_hdf5\n\nTransform tractogram(s) contained in the hdf5 output from a connectivity\nscript, using an affine/rigid transformation and nonlinear deformation\n(optional).\n\nSee scil_tractogram_apply_transform.py to apply directly to a tractogram.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nOr use >> scil_tractogram_apply_transform.py --help\n\nFormerly: scil_apply_transform_to_hdf5.py\n\npositional arguments:\n in_hdf5 Path of the hdf5 containing the moving tractogram, to be transformed. (.h5 extension).\n in_target_file Path of the reference target file (.trk or .nii).\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_hdf5 Output tractogram filename (transformed data).\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nTransformation options:\n --inverse Apply the inverse linear transformation.\n --in_deformation file\n Path to the file containing a deformation field.\n --reverse_operation Apply the transformation in reverse (see doc), warp\n first, then linear.\n\nManagement of invalid streamlines:\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_invalid Remove the streamlines landing out of the bounding box.\n --keep_invalid Keep the streamlines landing out of the bounding box.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "connectivity", - "connectivity" - ], - [ - "invalid", - "invalid" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "applied", - "apply" - ], - [ - "Data", - "data", - "data" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_assign_custom_color", - "docstring": "The script uses scalars from an anatomy, data_per_point or data_per_streamline\n(e.g. commit_weights) to visualize them on the streamlines.\nSaves the RGB values in the data_per_point 'color' with 3 values per point:\n(color_x, color_y, color_z).\n\nIf called with .tck, the output will always be .trk, because data_per_point has\nno equivalent in tck file.\n\nIf used with a visualization software like MI-Brain\n(https://github.com/imeka/mi-brain), the 'color' dps is applied by default at\nloading time.\n\nCOLORING METHOD\nThis script maps the raw values from these sources to RGB using a colormap.\n --use_dpp: The data from each point is converted to a color.\n --use_dps: The same color is applied to all points of the streamline.\n --from_anatomy: The voxel's color is used for the points of the streamlines\n crossing it. See also scil_tractogram_project_map_to_streamlines.py. You\n can have more options to project maps to dpp, and then use --use_dpp here.\n --along_profile: The data used here is each point's position in the\n streamline. To have nice results, you should first uniformize head/tail.\n See scil_tractogram_uniformize_endpoints.py.\n --local_angle.\n\nCOLORING OPTIONS\nA minimum and a maximum range can be provided to clip values. If the range of\nvalues is too large for intuitive visualization, a log transform can be\napplied.\n\nIf the data provided from --use_dps, --use_dpp and --from_anatomy are integer\nlabels, they can be mapped using a LookUp Table (--LUT).\nThe file provided as a LUT should be either .txt or .npy and if the size is\nN=20, then the data provided should be between 1-20.\n\nA custom colormap can be provided using --colormap. It should be a string\ncontaining a colormap name OR multiple Matplotlib named colors separated by -.\nThe colormap used for mapping values to colors can be saved to a png/jpg image\nusing the --out_colorbar option.\n\nSee also: scil_tractogram_assign_uniform_color.py, for simplified options.\n\nFormerly: scil_assign_custom_color_to_tractogram.py", - "help": "", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "methods", - "method" - ], - [ - "streamline", - "streamline" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "image", - "image" - ], - [ - "maps", - "maps" - ], - [ - "project", - "project" - ], - [ - "Data", - "data", - "data" - ], - [ - "large", - "large" - ], - [ - "applied", - "applied" - ], - [ - "voxel", - "voxel" - ], - [ - "brain", - "brain" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_assign_uniform_color", - "docstring": "Assign an hexadecimal RGB color to one or more Trackvis (.trk) tractogram.\n(If called with .tck, the output will always be .trk, because data_per_point\nhas no equivalent in tck file.)\n\nSaves the RGB values in the data_per_point 'color' with values\n(color_x, color_y, color_z).\n\nThe hexadecimal RGB color should be formatted as 0xRRGGBB or \"#RRGGBB\".\n\nSee also: scil_tractogram_assign_custom_color.py\n\nFormerly: scil_assign_uniform_color_to_tractograms.py", - "help": "usage: scil_tractogram_assign_uniform_color.py [-h]\n (--fill_color str | --dict_colors file.json)\n (--out_suffix [suffix] | --out_tractogram file.trk)\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_tractograms\n [in_tractograms ...]\n\nAssign an hexadecimal RGB color to one or more Trackvis (.trk) tractogram.\n(If called with .tck, the output will always be .trk, because data_per_point\nhas no equivalent in tck file.)\n\nSaves the RGB values in the data_per_point 'color' with values\n(color_x, color_y, color_z).\n\nThe hexadecimal RGB color should be formatted as 0xRRGGBB or \"#RRGGBB\".\n\nSee also: scil_tractogram_assign_custom_color.py\n\nFormerly: scil_assign_uniform_color_to_tractograms.py\n\npositional arguments:\n in_tractograms Input tractograms (.trk or .tck).\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nColoring Methods:\n --fill_color str Can be hexadecimal (ie. either \"#RRGGBB\" or 0xRRGGBB).\n --dict_colors file.json\n Json file: dictionnary mapping each tractogram's basename to a color.\n Do not put your file's extension in your dict.\n Same convention as --fill_color.\n\nOutput options:\n --out_suffix [suffix]\n Specify suffix to append to input basename.\n Mandatory choice if you run this script on multiple tractograms.\n Mandatory choice with --dict_colors.\n [None]\n --out_tractogram file.trk\n Output filename of colored tractogram (.trk).\n", - "synonyms": [ - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "methods", - "methods" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_commit", - "docstring": "Convex Optimization Modeling for Microstructure Informed Tractography (COMMIT)\nestimates, globally, how a given tractogram explains the DWI in terms of signal\nfit, assuming a certain forward microstructure model. It assigns a weight to\neach streamline, which represents how well it explains the DWI signal globally.\nThe default forward microstructure model is stick-zeppelin-ball, which requires\nmulti-shell data and a peak file (principal fiber directions in each voxel,\ntypically from a field of fODFs).\n\nIt is possible to use the ball-and-stick model for single-shell and multi-shell\ndata. In this case, the peak file is not mandatory. Multi-shell should follow a\n\"NODDI protocol\" (low and high b-values), multiple shells with similar b-values\nshould not be used with COMMIT.\n\nThe output from COMMIT is:\n- fit_NRMSE.nii.gz\n fiting error (Normalized Root Mean Square Error)\n- fit_RMSE.nii.gz\n fiting error (Root Mean Square Error)\n- results.pickle\n Dictionary containing the experiment parameters and final weights\n- compartment_EC.nii.gz\n (est. Extra-Cellular signal fraction)\n- compartment_IC.nii.gz\n (est. Intra-Cellular signal fraction)\n- compartment_ISO.nii.gz\n (est. isotropic signal fraction (freewater comportment)):\n Each of COMMIT compartments\n- streamline_weights.txt\n Text file containing the commit weights for each streamline of the\n input tractogram.\n- streamlines_length.txt\n Text file containing the length (mm) of each streamline.\n- streamline_weights_by_length.txt\n Text file containing the commit weights for each streamline of the\n input tractogram, ordered by their length.\n- tot_streamline_weights\n Text file containing the total commit weights of each streamline.\n Equal to commit_weights * streamlines_length (W_i * L_i)\n- essential.trk / non_essential.trk\n Tractograms containing the streamlines below or equal (essential) and\n above (non_essential) a threshold_weights of 0.\n- decompose_commit.h5\n In the case where the input is a hdf5 file only, we will save an output\n hdf5 with the following information separated into each bundle's dps:\n - streamlines_weights\n - streamline_weights_by_length\n For each bundle, only the essential streamlines are kept.\n\nThis script can divide the input tractogram in two using a threshold to apply\non the streamlines' weight. The threshold used is 0.0, keeping only streamlines\nthat have non-zero weight and that contribute to explain the DWI signal.\nStreamlines with 0 weight are essentially not necessary according to COMMIT.\n\nCOMMIT2 is available only for HDF5 data from\nscil_tractogram_segment_bundles_for_connectivity.py and\nwith the --ball_stick option. Use the --commit2 option to activite it, slightly\nlonger computation time. This wrapper offers a simplify way to call COMMIT,\nbut does not allow to use (or fine-tune) every parameter. If you want to use\nCOMMIT with full access to all parameters,\nvisit: https://github.com/daducci/COMMIT\n\nWhen tunning parameters, such as --iso_diff, --para_diff, --perp_diff or\n--lambda_commit_2 you should evaluate the quality of results by:\n - Looking at the 'density' (GTM) of the connnectome (essential tractogram)\n - Confirm the quality of WM bundles reconstruction (essential tractogram)\n - Inspect the (N)RMSE map and look for peaks or anomalies\n - Compare the density map before and after (essential tractogram)\n\nFormerly: scil_run_commit.py", - "help": "usage: scil_tractogram_commit.py [-h] [--nbr_dir NBR_DIR]\n [--nbr_iter NBR_ITER] [--in_peaks IN_PEAKS]\n [--in_tracking_mask IN_TRACKING_MASK]\n [--commit2]\n [--lambda_commit_2 LAMBDA_COMMIT_2]\n [--ball_stick] [--para_diff PARA_DIFF]\n [--perp_diff PERP_DIFF [PERP_DIFF ...]]\n [--iso_diff ISO_DIFF [ISO_DIFF ...]]\n [--keep_whole_tractogram]\n [--save_kernels DIRECTORY | --load_kernels DIRECTORY]\n [--compute_only] [--tolerance tol]\n [--skip_b0_check] [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram in_dwi in_bval in_bvec out_dir\n\nConvex Optimization Modeling for Microstructure Informed Tractography (COMMIT)\nestimates, globally, how a given tractogram explains the DWI in terms of signal\nfit, assuming a certain forward microstructure model. It assigns a weight to\neach streamline, which represents how well it explains the DWI signal globally.\nThe default forward microstructure model is stick-zeppelin-ball, which requires\nmulti-shell data and a peak file (principal fiber directions in each voxel,\ntypically from a field of fODFs).\n\nIt is possible to use the ball-and-stick model for single-shell and multi-shell\ndata. In this case, the peak file is not mandatory. Multi-shell should follow a\n\"NODDI protocol\" (low and high b-values), multiple shells with similar b-values\nshould not be used with COMMIT.\n\nThe output from COMMIT is:\n- fit_NRMSE.nii.gz\n fiting error (Normalized Root Mean Square Error)\n- fit_RMSE.nii.gz\n fiting error (Root Mean Square Error)\n- results.pickle\n Dictionary containing the experiment parameters and final weights\n- compartment_EC.nii.gz\n (est. Extra-Cellular signal fraction)\n- compartment_IC.nii.gz\n (est. Intra-Cellular signal fraction)\n- compartment_ISO.nii.gz\n (est. isotropic signal fraction (freewater comportment)):\n Each of COMMIT compartments\n- streamline_weights.txt\n Text file containing the commit weights for each streamline of the\n input tractogram.\n- streamlines_length.txt\n Text file containing the length (mm) of each streamline.\n- streamline_weights_by_length.txt\n Text file containing the commit weights for each streamline of the\n input tractogram, ordered by their length.\n- tot_streamline_weights\n Text file containing the total commit weights of each streamline.\n Equal to commit_weights * streamlines_length (W_i * L_i)\n- essential.trk / non_essential.trk\n Tractograms containing the streamlines below or equal (essential) and\n above (non_essential) a threshold_weights of 0.\n- decompose_commit.h5\n In the case where the input is a hdf5 file only, we will save an output\n hdf5 with the following information separated into each bundle's dps:\n - streamlines_weights\n - streamline_weights_by_length\n For each bundle, only the essential streamlines are kept.\n\nThis script can divide the input tractogram in two using a threshold to apply\non the streamlines' weight. The threshold used is 0.0, keeping only streamlines\nthat have non-zero weight and that contribute to explain the DWI signal.\nStreamlines with 0 weight are essentially not necessary according to COMMIT.\n\nCOMMIT2 is available only for HDF5 data from\nscil_tractogram_segment_bundles_for_connectivity.py and\nwith the --ball_stick option. Use the --commit2 option to activite it, slightly\nlonger computation time. This wrapper offers a simplify way to call COMMIT,\nbut does not allow to use (or fine-tune) every parameter. If you want to use\nCOMMIT with full access to all parameters,\nvisit: https://github.com/daducci/COMMIT\n\nWhen tunning parameters, such as --iso_diff, --para_diff, --perp_diff or\n--lambda_commit_2 you should evaluate the quality of results by:\n - Looking at the 'density' (GTM) of the connnectome (essential tractogram)\n - Confirm the quality of WM bundles reconstruction (essential tractogram)\n - Inspect the (N)RMSE map and look for peaks or anomalies\n - Compare the density map before and after (essential tractogram)\n\nFormerly: scil_run_commit.py\n\npositional arguments:\n in_tractogram Input tractogram (.trk or .tck or .h5).\n in_dwi Diffusion-weighted image used by COMMIT (.nii.gz).\n in_bval b-values in the FSL format (.bval).\n in_bvec b-vectors in the FSL format (.bvec).\n out_dir Output directory for the COMMIT maps.\n\noptions:\n -h, --help show this help message and exit\n --nbr_dir NBR_DIR Number of directions, on the half of the sphere,\n representing the possible orientations of the response functions [500].\n --nbr_iter NBR_ITER Maximum number of iterations [1000].\n --in_peaks IN_PEAKS Peaks file representing principal direction(s) locally,\n typically coming from fODFs. This file is mandatory for the default \n stick-zeppelin-ball model.\n --in_tracking_mask IN_TRACKING_MASK\n Binary mask where tratography was allowed.\n If not set, uses a binary mask computed from the streamlines.\n --tolerance tol The tolerated gap between the b-values to extract and the current b-value.\n [Default: 20]\n * Note. We would expect to find at least one b-value in the \n range [0, tolerance]. To skip this check, use --skip_b0_check.\n --skip_b0_check By default, we supervise that at least one b0 exists in your data\n (i.e. b-values below the default --b0_threshold). Use this option to \n allow continuing even if the minimum b-value is suspiciously high.\n If no b-value is found below the threshold, the script will continue \n with your minimal b-value as new --b0_threshold.\n Use with care, and only if you understand your data.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nCOMMIT2 options:\n --commit2 Run commit2, requires .h5 as input and will force\n ball&stick model.\n --lambda_commit_2 LAMBDA_COMMIT_2\n Specify the clustering prior strength [0.001].\n\nModel options:\n --ball_stick Use the ball&Stick model, disable the zeppelin compartment.\n Only model suitable for single-shell data.\n --para_diff PARA_DIFF\n Parallel diffusivity in mm^2/s.\n Default for both ball_stick and stick_zeppelin_ball: 1.7E-3.\n --perp_diff PERP_DIFF [PERP_DIFF ...]\n Perpendicular diffusivity in mm^2/s.\n Default for ball_stick: None\n Default for stick_zeppelin_ball: [0.51E-3]\n --iso_diff ISO_DIFF [ISO_DIFF ...]\n Istropic diffusivity in mm^2/s.\n Default for ball_stick: [2.0E-3]\n Default for stick_zeppelin_ball: [1.7E-3, 3.0E-3]\n\nTractogram options:\n --keep_whole_tractogram\n Save a tractogram copy with streamlines weights in the data_per_streamline\n [False].\n --compute_only Compute kernels only, --save_kernels must be used.\n\nKernels options:\n --save_kernels DIRECTORY\n Output directory for the COMMIT kernels.\n --load_kernels DIRECTORY\n Input directory where the COMMIT kernels are located.\n\nReferences:\n[1] Daducci, Alessandro, et al. \"COMMIT: convex optimization modeling for\n microstructure informed tractography.\" IEEE transactions on medical\n imaging 34.1 (2014): 246-257.\n[2] Schiavi, Simona, et al. \"A new method for accurate in vivo mapping of\n human brain connections using microstructural and anatomical information.\"\n Science advances 6.31 (2020): eaba8245.\n", - "synonyms": [ - [ - "methods", - "method" - ], - [ - "principal", - "principal" - ], - [ - "direction", - "direction" - ], - [ - "diffusion", - "diffusion" - ], - [ - "Data", - "data", - "data" - ], - [ - "processes", - "processes" - ], - [ - "vivo", - "vivo" - ], - [ - "experiment", - "experiment" - ], - [ - "error", - "error" - ], - [ - "streamlines", - "streamlines" - ], - [ - "maps", - "map" - ], - [ - "examine", - "evaluate" - ], - [ - "bundles", - "bundle" - ], - [ - "image", - "image" - ], - [ - "tractography", - "tractography" - ], - [ - "bundles", - "bundles" - ], - [ - "anatomical", - "anatomy", - "anatomical" - ], - [ - "level", - "level" - ], - [ - "streamline", - "simplify" - ], - [ - "weighted", - "weighted" - ], - [ - "high", - "high" - ], - [ - "applied", - "apply" - ], - [ - "orientation", - "orientations" - ], - [ - "signal", - "signal" - ], - [ - "connections", - "connections" - ], - [ - "total", - "total" - ], - [ - "false", - "false" - ], - [ - "brain", - "brain" - ], - [ - "human", - "human" - ], - [ - "streamline", - "streamline" - ], - [ - "imaging", - "imaging" - ], - [ - "high", - "low" - ], - [ - "maps", - "maps" - ], - [ - "parameter", - "parameter" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "binary", - "binary" - ], - [ - "voxel", - "voxel" - ], - [ - "parameters", - "parameters" - ], - [ - "function", - "functions", - "functions" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_compress", - "docstring": "Compress tractogram by removing collinear (or almost) points.\n\nThe compression threshold represents the maximum distance (in mm) to the\noriginal position of the point.\n\nFormerly: scil_compress_streamlines.py", - "help": "usage: scil_tractogram_compress.py [-h] [-e ERROR_RATE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nCompress tractogram by removing collinear (or almost) points.\n\nThe compression threshold represents the maximum distance (in mm) to the\noriginal position of the point.\n\nFormerly: scil_compress_streamlines.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file (trk or tck).\n out_tractogram Path of the output tractogram file (trk or tck).\n\noptions:\n -h, --help show this help message and exit\n -e ERROR_RATE Maximum compression distance in mm [0.1].\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_compute_TODI", - "docstring": "Compute a Track Orientation Density Image (TODI).\n\nEach segment of the streamlines is weighted by its length (to support\ncompressed streamlines).\n\nThis script can afterwards output a Track Density Image (TDI) or a TODI with SF\nor SH representation, based on streamlines' segments.\n\nFormerly: scil_compute_todi.py", - "help": "usage: scil_tractogram_compute_TODI.py [-h] [--sphere SPHERE] [--mask MASK]\n [--sh_order SH_ORDER]\n [--normalize_per_voxel]\n [--smooth_todi | --asymmetric]\n [--n_steps N_STEPS]\n [--out_mask OUT_MASK]\n [--out_tdi OUT_TDI]\n [--out_todi_sf OUT_TODI_SF]\n [--out_todi_sh OUT_TODI_SH]\n [--reference REFERENCE]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram\n\nCompute a Track Orientation Density Image (TODI).\n\nEach segment of the streamlines is weighted by its length (to support\ncompressed streamlines).\n\nThis script can afterwards output a Track Density Image (TDI) or a TODI with SF\nor SH representation, based on streamlines' segments.\n\nFormerly: scil_compute_todi.py\n\npositional arguments:\n in_tractogram Input streamlines file.\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nComputing options:\n --sphere SPHERE Sphere used for the angular discretization. [repulsion724]\n --mask MASK If set, use the given mask.\n --sh_order SH_ORDER Order of the original SH. [8]\n --normalize_per_voxel\n If set, normalize each SF/SH at each voxel.\n --smooth_todi If set, smooth TODI (angular and spatial).\n --asymmetric If set, compute asymmetric TODI.\n Cannot be used with --smooth_todi.\n --n_steps N_STEPS Number of steps for streamline segments subdivision prior to binning [1].\n\nOutput files. Saves only when filename is set:\n --out_mask OUT_MASK Mask showing where TDI > 0.\n --out_tdi OUT_TDI Output Track Density Image (TDI).\n --out_todi_sf OUT_TODI_SF\n Output TODI, with SF (each directions\n on the sphere, requires a lot of memory)\n --out_todi_sh OUT_TODI_SH\n Output TODI, with SH coefficients.\n\nReferences:\n [1] Dhollander T, Emsell L, Van Hecke W, Maes F, Sunaert S, Suetens P.\n Track orientation density imaging (TODI) and\n track orientation distribution (TOD) based tractography.\n NeuroImage. 2014 Jul 1;94:312-36.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "spatial", - "spatial" - ], - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "weighted", - "weighted" - ], - [ - "orientation", - "orientation" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "imaging", - "imaging" - ], - [ - "image", - "image" - ], - [ - "tractography", - "tractography" - ], - [ - "based", - "based" - ], - [ - "memory", - "memory" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_compute_density_map", - "docstring": "Compute a density map from a streamlines file. Can be binary.\n\nThis script correctly handles compressed streamlines.\n\nFormerly: scil_compute_streamlines_density_map.py", - "help": "usage: scil_tractogram_compute_density_map.py [-h] [--binary [FIXED_VALUE]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle out_img\n\nCompute a density map from a streamlines file. Can be binary.\n\nThis script correctly handles compressed streamlines.\n\nFormerly: scil_compute_streamlines_density_map.py\n\npositional arguments:\n in_bundle Tractogram filename.\n out_img path of the output image file.\n\noptions:\n -h, --help show this help message and exit\n --binary [FIXED_VALUE]\n If set, will store the same value for all intersected voxels, \n creating a binary map.When set without a value, 1 is used (and dtype \n uint8). If a value is given, will be used as the stored value.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "maps", - "map" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "image", - "image" - ], - [ - "create", - "creating" - ], - [ - "voxel", - "voxels" - ], - [ - "binary", - "binary" - ], - [ - "level", - "level" - ], - [ - "intersected", - "intersected" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_convert", - "docstring": "Conversion of '.tck', '.trk', '.fib', '.vtk' and 'dpy' files using updated file\nformat standard. TRK file always needs a reference file, a NIFTI, for\nconversion. The FIB file format is in fact a VTK, MITK Diffusion supports it.\n\nFormerly: scil_convert_tractogram.py", - "help": "usage: scil_tractogram_convert.py [-h] [--no_bbox_check]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram output_name\n\nConversion of '.tck', '.trk', '.fib', '.vtk' and 'dpy' files using updated file\nformat standard. TRK file always needs a reference file, a NIFTI, for\nconversion. The FIB file format is in fact a VTK, MITK Diffusion supports it.\n\nFormerly: scil_convert_tractogram.py\n\npositional arguments:\n in_tractogram Tractogram filename. Format must be one of \n trk, tck, vtk, fib, dpy\n output_name Output filename. Format must be one of \n trk, tck, vtk, fib, dpy\n\noptions:\n -h, --help show this help message and exit\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "invalid", - "invalid" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "diffusion", - "diffusion" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_convert_hdf5_to_trk", - "docstring": "Save connections of a hdf5 created with\n>> scil_tractogram_segment_bundles_for_connectivity.py.\n\nUseful for quality control and visual inspections.\n\nIt can either save all connections (default), individual connections specified\nwith --edge_keys or connections from specific nodes specified with --node_keys.\n\nWith the option --save_empty, a label_lists, as a txt file, must be provided.\nThis option saves existing connections and empty connections.\n\nThe output is a directory containing the thousands of connections:\nout_dir/\n |-- LABEL1_LABEL1.trk\n |-- LABEL1_LABEL2.trk\n |-- [...]\n |-- LABEL90_LABEL90.trk\n\nFormerly: scil_save_connections_from_hdf5.py", - "help": "usage: scil_tractogram_convert_hdf5_to_trk.py [-h] [--include_dps]\n [--edge_keys LABEL1_LABEL2 [LABEL1_LABEL2 ...]\n | --node_keys NODE [NODE ...]]\n [--save_empty labels_list]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_hdf5 out_dir\n\nSave connections of a hdf5 created with\n>> scil_tractogram_segment_bundles_for_connectivity.py.\n\nUseful for quality control and visual inspections.\n\nIt can either save all connections (default), individual connections specified\nwith --edge_keys or connections from specific nodes specified with --node_keys.\n\nWith the option --save_empty, a label_lists, as a txt file, must be provided.\nThis option saves existing connections and empty connections.\n\nThe output is a directory containing the thousands of connections:\nout_dir/\n |-- LABEL1_LABEL1.trk\n |-- LABEL1_LABEL2.trk\n |-- [...]\n |-- LABEL90_LABEL90.trk\n\nFormerly: scil_save_connections_from_hdf5.py\n\npositional arguments:\n in_hdf5 HDF5 filename (.h5) containing decomposed connections.\n out_dir Path of the output directory.\n\noptions:\n -h, --help show this help message and exit\n --include_dps Include the data_per_streamline the metadata.\n --edge_keys LABEL1_LABEL2 [LABEL1_LABEL2 ...]\n Keys to identify the edges (connections) of interest.\n --node_keys NODE [NODE ...]\n Node keys to identify the sub-networks of interest.\n Equivalent to adding any --edge_keys node_LABEL2 or LABEL2_node.\n --save_empty labels_list\n Save empty connections. Then, the list of possible connections is \n not found from the hdf5 but inferred from labels_list, a txt file \n containing a list of nodes saved by the decomposition script.\n *If used together with edge_keys or node_keys, the provided nodes must \n exist in labels_list.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n CAREFUL. The whole output directory will be deleted if it exists.\n", - "synonyms": [ - [ - "visual", - "visual" - ], - [ - "network", - "networks", - "networks" - ], - [ - "individual", - "individual" - ], - [ - "connections", - "connections" - ], - [ - "exist", - "exist" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_count_streamlines", - "docstring": "Return the number of streamlines in a tractogram. Only support trk and tck in\norder to support the lazy loading from nibabel.\n\nFormerly: scil_count_streamlines.py", - "help": "usage: scil_tractogram_count_streamlines.py [-h] [--print_count_alone]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]]\n in_tractogram\n\nReturn the number of streamlines in a tractogram. Only support trk and tck in\norder to support the lazy loading from nibabel.\n\nFormerly: scil_count_streamlines.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --print_count_alone If true, prints the result only. \n Else, prints the bundle name and count formatted as a json dict.(default)\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "streamlines", - "streamlines" - ], - [ - "true", - "true" - ], - [ - "level", - "level" - ], - [ - "bundles", - "bundle" - ], - [ - "result", - "result" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_cut_streamlines", - "docstring": "Filters streamlines and only keeps the parts of streamlines within or\nbetween the ROIs. Two options are available.\n\nInput mask:\n\nThe mask has either 1 entity/blob or\n2 entities/blobs (does not support disconnected voxels).\nThe option --biggest_blob can help if you have such a scenario.\n\nThe 1 entity scenario will 'trim' the streamlines so their longest segment is\nwithin the bounding box or a binary mask.\n\nThe 2 entities scenario will cut streamlines so their segment are within the\nbounding box or going from binary mask #1 to binary mask #2.\n\nInput label:\n\nThe label MUST contain 2 labels different from zero.\nLabel values could be anything.\nThe script will cut streamlines going from label 1 to label 2.\n\nBoth inputs and scenarios will erase data_per_point and data_per_streamline.\n\nFormerly: scil_cut_streamlines.py", - "help": "usage: scil_tractogram_cut_streamlines.py [-h] (--mask MASK | --label LABEL)\n [--label_ids LABEL_IDS LABEL_IDS]\n [--resample STEP_SIZE]\n [--biggest_blob]\n [--compress [COMPRESS_TH]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nFilters streamlines and only keeps the parts of streamlines within or\nbetween the ROIs. Two options are available.\n\nInput mask:\n\nThe mask has either 1 entity/blob or\n2 entities/blobs (does not support disconnected voxels).\nThe option --biggest_blob can help if you have such a scenario.\n\nThe 1 entity scenario will 'trim' the streamlines so their longest segment is\nwithin the bounding box or a binary mask.\n\nThe 2 entities scenario will cut streamlines so their segment are within the\nbounding box or going from binary mask #1 to binary mask #2.\n\nInput label:\n\nThe label MUST contain 2 labels different from zero.\nLabel values could be anything.\nThe script will cut streamlines going from label 1 to label 2.\n\nBoth inputs and scenarios will erase data_per_point and data_per_streamline.\n\nFormerly: scil_cut_streamlines.py\n\npositional arguments:\n in_tractogram Input tractogram file.\n out_tractogram Output tractogram file. Note: data_per_point will be discarded, if any!\n\noptions:\n -h, --help show this help message and exit\n --label_ids LABEL_IDS LABEL_IDS\n List of labels indices to use to cut streamlines (2 values).\n --resample STEP_SIZE Resample streamlines to a specific step-size in mm [None].\n --biggest_blob Use the biggest entity and force the 1 ROI scenario.\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.[0.1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMandatory mask options:\n Choose between mask or label input.\n\n --mask MASK Binary mask containing either 1 or 2 blobs.\n --label LABEL Label containing 2 blobs.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "step", - "step" - ], - [ - "voxel", - "voxels" - ], - [ - "binary", - "binary" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_detect_loops", - "docstring": "This script can be used to remove loops in two types of streamline datasets:\n\n - Whole brain: For this type, the script removes streamlines if they\n make a loop with an angle of more than 360 degrees. It's possible to change\n this angle with the --angle option. Warning: Don't use --qb option for a\n whole brain tractography.\n\n - Bundle dataset: For this type, it is possible to remove loops and\n streamlines outside the bundle. For the sharp angle turn, use --qb option.\n\nSee also:\n scil_tractogram_filter_by_anatomy.py\n\nFormerly: scil_detect_streamlines_loops.py", - "help": "usage: scil_tractogram_detect_loops.py [-h]\n [--looping_tractogram out_filename]\n [--qb [threshold]] [--angle ANGLE]\n [--display_counts] [--no_empty]\n [--indent INDENT] [--sort_keys]\n [--processes NBR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script can be used to remove loops in two types of streamline datasets:\n\n - Whole brain: For this type, the script removes streamlines if they\n make a loop with an angle of more than 360 degrees. It's possible to change\n this angle with the --angle option. Warning: Don't use --qb option for a\n whole brain tractography.\n\n - Bundle dataset: For this type, it is possible to remove loops and\n streamlines outside the bundle. For the sharp angle turn, use --qb option.\n\nSee also:\n scil_tractogram_filter_by_anatomy.py\n\nFormerly: scil_detect_streamlines_loops.py\n\npositional arguments:\n in_tractogram Tractogram input file name.\n out_tractogram Output tractogram without loops.\n\noptions:\n -h, --help show this help message and exit\n --looping_tractogram out_filename\n If set, saves detected looping streamlines.\n --qb [threshold] If set, uses QuickBundles to detect outliers (loops, sharp angle \n turns). Given threshold is the maximal streamline to bundle \n distance for a streamline to be considered as a tracking error.\n Default if set: [8.0]\n --angle ANGLE Maximum looping (or turning) angle of\n a streamline in degrees. [360]\n --display_counts Print streamline count before and after filtering\n --no_empty If set, will not save outputs if they are empty.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\nReferences:\n QuickBundles, based on [Garyfallidis12] Frontiers in Neuroscience, 2012.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "processes", - "processes" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "tractography", - "tractography" - ], - [ - "based", - "based" - ], - [ - "considered", - "considered" - ], - [ - "tracking", - "tracking" - ], - [ - "level", - "level" - ], - [ - "brain", - "brain" - ], - [ - "error", - "error" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_dpp_math", - "docstring": "Performs an operation on data per point (dpp) from input streamlines.\n\nAlthough the input data always comes from the dpp, the output can be either\na dpp or a data_per_streamline (dps), depending on the chosen options.\nTwo modes of operation are supported: dpp and dps.\n - In dps mode, the operation is performed on dpp across the dimension of\n the streamlines resulting in a single value (or array in the 4D case)\n per streamline, stored as dps.\n - In dpp mode, the operation is performed on each point separately,\n resulting in a new dpp.\n\nIf endpoints_only and dpp mode is set the operation will only be calculated at\nthe streamline endpoints the rest of the values along the streamline will be\nNaN.\n\nIf endpoints_only and dps mode is set operation will be calculated across the\ndata at the endpoints and stored as a single value (or array in the 4D case)\nper streamline.\n\nEndpoint only operation:\ncorrelation: correlation calculated between arrays extracted from streamline\nendpoints (data must be multivalued per point) and dps mode must be set.", - "help": "usage: scil_tractogram_dpp_math.py [-h] --mode {dpp,dps} --in_dpp_name key\n [key ...] --out_keys key [key ...]\n [--endpoints_only] [--keep_all_dpp_dps]\n [--overwrite_dpp_dps]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--no_bbox_check]\n OPERATION INPUT_FILE OUTPUT_FILE\n\nPerforms an operation on data per point (dpp) from input streamlines.\n\nAlthough the input data always comes from the dpp, the output can be either\na dpp or a data_per_streamline (dps), depending on the chosen options.\nTwo modes of operation are supported: dpp and dps.\n - In dps mode, the operation is performed on dpp across the dimension of\n the streamlines resulting in a single value (or array in the 4D case)\n per streamline, stored as dps.\n - In dpp mode, the operation is performed on each point separately,\n resulting in a new dpp.\n\nIf endpoints_only and dpp mode is set the operation will only be calculated at\nthe streamline endpoints the rest of the values along the streamline will be\nNaN.\n\nIf endpoints_only and dps mode is set operation will be calculated across the\ndata at the endpoints and stored as a single value (or array in the 4D case)\nper streamline.\n\nEndpoint only operation:\ncorrelation: correlation calculated between arrays extracted from streamline\nendpoints (data must be multivalued per point) and dps mode must be set.\n\npositional arguments:\n OPERATION The type of operation to be performed on the \n streamlines. Must be one of the following: \n [mean, sum, min, max, correlation.]\n INPUT_FILE Input tractogram containing streamlines and metadata.\n OUTPUT_FILE The file where the remaining streamlines \n are saved.\n\noptions:\n -h, --help show this help message and exit\n --mode {dpp,dps} Set to dps if the operation is to be performed \n across all dimensions resulting in a single value per \n streamline. Set to dpp if the operation is to be \n performed on each point separately resulting in a \n single value per point.\n --in_dpp_name key [key ...]\n Name or list of names of the data_per_point for \n operation to be performed on. If more than one dpp \n is selected, the same operation will be applied \n separately to each one.\n --out_keys key [key ...]\n Name of the resulting data_per_point or \n data_per_streamline to be saved in the output \n tractogram. If more than one --in_dpp_name was used, \n enter the same number of --out_keys values.\n --endpoints_only If set, will only perform operation on endpoints \n If not set, will perform operation on all streamline \n points.\n --keep_all_dpp_dps If set, previous data_per_point will be preserved \n in the output tractogram. Else, only --out_dpp_name \n keys will be saved.\n --overwrite_dpp_dps If set, if --keep_all_dpp_dps is set and some \n --out_keys keys already existed in your \n data_per_point or data_per_streamline, allow \n overwriting old data_per_point.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "key", - "key" - ], - [ - "invalid", - "invalid" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "supported", - "supported" - ], - [ - "exist", - "existed" - ], - [ - "Data", - "data", - "data" - ], - [ - "applied", - "applied" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_extract_ushape", - "docstring": "This script extracts streamlines depending on their U-shapeness.\nThis script is a replica of Trackvis method.\n\nWhen ufactor is close to:\n* 0 it defines straight streamlines\n* 1 it defines U-fibers\n* -1 it defines S-fibers\n\nFormerly: scil_extract_ushape.py", - "help": "usage: scil_tractogram_extract_ushape.py [-h] [--minU MINU] [--maxU MAXU]\n [--remaining_tractogram filename]\n [--no_empty] [--display_counts]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script extracts streamlines depending on their U-shapeness.\nThis script is a replica of Trackvis method.\n\nWhen ufactor is close to:\n* 0 it defines straight streamlines\n* 1 it defines U-fibers\n* -1 it defines S-fibers\n\nFormerly: scil_extract_ushape.py\n\npositional arguments:\n in_tractogram Tractogram input file name.\n out_tractogram Output tractogram file name.\n\noptions:\n -h, --help show this help message and exit\n --minU MINU Min ufactor value. [0.5]\n --maxU MAXU Max ufactor value. [1.0]\n --remaining_tractogram filename\n If set, saves remaining streamlines.\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "methods", - "method" - ], - [ - "streamline", - "streamline" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_filter_by_anatomy", - "docstring": "This script filters streamlines in a tractogram according to their geometrical\nproperties (i.e. limiting their length and looping angle) and their anatomical\nending properties (i.e. the anatomical tissue or region their endpoints lie\nin).\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nThe filtering is performed sequentially in four steps, each step processing the\ndata on the output of the previous step:\n\n Step 1 - Remove streamlines below the minimum length and above the\n maximum length. These thresholds must be set with the ``--minL``\n and ``--maxL`` options.\n Step 2 - Ensure that no streamlines end in the cerebrospinal fluid\n according to the provided parcellation. A binary mask can be used\n alternatively through the ``--csf_bin`` option.\n Step 3 - Ensure that no streamlines end in white matter by ensuring that\n they reach the cortical regions according to the provided\n parcellation. The cortical regions of the parcellation can be\n dilated using the ``--ctx_dilation_radius``.\n Step 4 - Remove streamlines if they make a loop with an angle above a\n certain threshold. It's possible to change this angle with the\n ``-a`` option.\n\nLength and loop-based filtering (steps 1 and 2) will not have practical effects\nif no specific thresholds are provided (but will be still executed), since\ndefault values are 0 for the minimum allowed length and infinite for the\nmaximum allowed length and angle.\n\nThe anatomical region endings filtering requires a parcellation or label image\nfile including the cerebrospinal fluid and gray matter (cortical) regions\naccording to the Desikan-Killiany atlas. Intermediate tractograms (results of\neach step and outliers) and volumes can be saved throughout the process.\n\nExample usages:\n\n# Filter length, looping angle and anatomical ending region\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --minL 20 --maxL 200 -a 300\n# Filter only anatomical ending region, with WM dilation and provided csf mask\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --csf_bin csf_bin.nii.gz --ctx_dilation_radius 2\n\nFormerly: scil_filter_streamlines_anatomically.py\n\nNOTE: As of version 2.0.0, the dilation of the cortical mask has changed; from\nan in-house process to scipy's dilation. Results may differ from previous\nversions.", - "help": "usage: scil_tractogram_filter_by_anatomy.py [-h] [--minL MINL] [--maxL MAXL]\n [--angle ANGLE]\n [--csf_bin CSF_BIN]\n [--dilate_ctx value]\n [--save_intermediate_tractograms]\n [--save_volumes] [--save_counts]\n [--save_rejected] [--no_empty]\n [--indent INDENT] [--sort_keys]\n [--processes NBR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram in_wmparc out_path\n\nThis script filters streamlines in a tractogram according to their geometrical\nproperties (i.e. limiting their length and looping angle) and their anatomical\nending properties (i.e. the anatomical tissue or region their endpoints lie\nin).\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nThe filtering is performed sequentially in four steps, each step processing the\ndata on the output of the previous step:\n\n Step 1 - Remove streamlines below the minimum length and above the\n maximum length. These thresholds must be set with the ``--minL``\n and ``--maxL`` options.\n Step 2 - Ensure that no streamlines end in the cerebrospinal fluid\n according to the provided parcellation. A binary mask can be used\n alternatively through the ``--csf_bin`` option.\n Step 3 - Ensure that no streamlines end in white matter by ensuring that\n they reach the cortical regions according to the provided\n parcellation. The cortical regions of the parcellation can be\n dilated using the ``--ctx_dilation_radius``.\n Step 4 - Remove streamlines if they make a loop with an angle above a\n certain threshold. It's possible to change this angle with the\n ``-a`` option.\n\nLength and loop-based filtering (steps 1 and 2) will not have practical effects\nif no specific thresholds are provided (but will be still executed), since\ndefault values are 0 for the minimum allowed length and infinite for the\nmaximum allowed length and angle.\n\nThe anatomical region endings filtering requires a parcellation or label image\nfile including the cerebrospinal fluid and gray matter (cortical) regions\naccording to the Desikan-Killiany atlas. Intermediate tractograms (results of\neach step and outliers) and volumes can be saved throughout the process.\n\nExample usages:\n\n# Filter length, looping angle and anatomical ending region\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --minL 20 --maxL 200 -a 300\n# Filter only anatomical ending region, with WM dilation and provided csf mask\n>>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz\n path/to/output/directory --csf_bin csf_bin.nii.gz --ctx_dilation_radius 2\n\nFormerly: scil_filter_streamlines_anatomically.py\n\nNOTE: As of version 2.0.0, the dilation of the cortical mask has changed; from\nan in-house process to scipy's dilation. Results may differ from previous\nversions.\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n in_wmparc Path of the white matter parcellation atlas (.nii or .nii.gz)\n out_path Path to the output files.\n\noptions:\n -h, --help show this help message and exit\n --minL MINL Minimum length of streamlines, in mm. [0.0]\n --maxL MAXL Maximum length of streamlines, in mm. [inf]\n --angle ANGLE Maximum looping (or turning) angle of a streamline, \n in degrees. [inf]\n --csf_bin CSF_BIN Allow CSF endings filtering with this binary\n mask instead of using the atlas (.nii or .nii.gz)\n --dilate_ctx value If set, dilate the cortical labels. Value is the dilation \n radius, in voxels (an integer > 0)\n --save_intermediate_tractograms\n Save accepted and discarded streamlines after each step.\n --save_volumes Save volumetric images (e.g. binarised label \n images, etc) in the filtering process.\n --save_counts Save the streamline counts to a file (.json)\n --save_rejected Save rejected streamlines to output tractogram.\n --no_empty Do not write file if there is no streamlines.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n\n References:\n [1] J\u00f6rgens, D., Descoteaux, M., Moreno, R., 2021. Challenges for\n tractogram \ufb01ltering. In: \u00d6zarslan, E., Schultz, T., Zhang, E., Fuster,\n A. (Eds.), Anisotropy Across Fields and Scales. Springer. Mathematics\n and Visualization.\n [2] Legarreta, J., Petit, L., Rheault, F., Theaud, G., Lemaire, C.,\n Descoteaux, M., Jodoin, P.M. Filtering in tractography using\n autoencoders (FINTA). Medical Image Analysis. 2021\n \n", - "synonyms": [ - [ - "process", - "process" - ], - [ - "limiting", - "limiting" - ], - [ - "region", - "regions", - "region" - ], - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "white", - "white" - ], - [ - "region", - "regions", - "regions" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "image", - "image" - ], - [ - "volume", - "volumes", - "volumes" - ], - [ - "Data", - "data", - "data" - ], - [ - "processes", - "processes" - ], - [ - "step", - "step" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "voxel", - "voxels" - ], - [ - "atlas", - "atlas" - ], - [ - "threshold", - "thresholds", - "thresholds" - ], - [ - "tractography", - "tractography" - ], - [ - "based", - "based" - ], - [ - "matter", - "matter" - ], - [ - "cortex", - "cortical", - "parietal", - "cortical" - ], - [ - "binary", - "binary" - ], - [ - "anatomical", - "anatomy", - "anatomical" - ], - [ - "processing", - "processing" - ], - [ - "specific", - "specific" - ], - [ - "level", - "level" - ], - [ - "analysis", - "analysis" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_filter_by_length", - "docstring": "Script to filter streamlines based on their lengths.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Filtering by length is its step1)\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_length.py", - "help": "usage: scil_tractogram_filter_by_length.py [-h] [--minL MINL] [--maxL MAXL]\n [--no_empty] [--display_counts]\n [--save_rejected] [--indent INDENT]\n [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nScript to filter streamlines based on their lengths.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Filtering by length is its step1)\n - scil_tractogram_filter_by_orientation.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_length.py\n\npositional arguments:\n in_tractogram Streamlines input file name.\n out_tractogram Streamlines output file name.\n\noptions:\n -h, --help show this help message and exit\n --minL MINL Minimum length of streamlines, in mm. [0.0]\n --maxL MAXL Maximum length of streamlines, in mm. [inf]\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering\n --save_rejected Save rejected streamlines to output tractogram.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "based", - "based" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_filter_by_orientation", - "docstring": "Script to filter streamlines based on their distance traveled in a specific\ndimension (x, y, or z).\n\nUseful to help differentiate bundles.\n\nExamples: In a brain aligned with x coordinates in left - right axis and y\ncoordinates in anterior-posterior axis, a streamline from the ...\n - corpus callosum will likely travel a very short distance in the y axis.\n - cingulum will likely travel a very short distance in the x axis.\n\nNote: we consider that x, y, z are the coordinates of the streamlines; we\ndo not verify if they are aligned with the brain's orientation.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_orientation.py", - "help": "usage: scil_tractogram_filter_by_orientation.py [-h] [--min_x MIN_X]\n [--max_x MAX_X]\n [--min_y MIN_Y]\n [--max_y MAX_Y]\n [--min_z MIN_Z]\n [--max_z MAX_Z] [--use_abs]\n [--no_empty]\n [--display_counts]\n [--save_rejected filename]\n [--indent INDENT]\n [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_tractogram out_tractogram\n\nScript to filter streamlines based on their distance traveled in a specific\ndimension (x, y, or z).\n\nUseful to help differentiate bundles.\n\nExamples: In a brain aligned with x coordinates in left - right axis and y\ncoordinates in anterior-posterior axis, a streamline from the ...\n - corpus callosum will likely travel a very short distance in the y axis.\n - cingulum will likely travel a very short distance in the x axis.\n\nNote: we consider that x, y, z are the coordinates of the streamlines; we\ndo not verify if they are aligned with the brain's orientation.\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_roi.py\n\nFormerly: scil_filter_streamlines_by_orientation.py\n\npositional arguments:\n in_tractogram Streamlines input file name.\n out_tractogram Streamlines output file name.\n\noptions:\n -h, --help show this help message and exit\n --min_x MIN_X Minimum distance in the first dimension, in mm.[0.0]\n --max_x MAX_X Maximum distance in the first dimension, in mm.[inf]\n --min_y MIN_Y Minimum distance in the second dimension, in mm.[0.0]\n --max_y MAX_Y Maximum distance in the second dimension, in mm.[inf]\n --min_z MIN_Z Minimum distance in the third dimension, in mm.[0.0]\n --max_z MAX_Z Maximum distance in the third dimension, in mm.[inf]\n --use_abs If set, will use the total of distances in absolute value (ex, coming back on yourself will contribute to the total distance instead of cancelling it).\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering.\n --save_rejected filename\n Save the SFT of rejected streamlines.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "anterior", - "posterior", - "anterior" - ], - [ - "anterior", - "posterior", - "posterior" - ], - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "corpus", - "corpus" - ], - [ - "orientation", - "orientation" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "left", - "left" - ], - [ - "based", - "based" - ], - [ - "bundles", - "bundles" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ], - [ - "total", - "total" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_filter_by_roi", - "docstring": "Filtering of a tractogram based on any combination of conditions involving a\nROI (ex: keep streamlines whose endoints are inside the ROI, exclude\nstreamlines not entirely included in a ROI, etc.)\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Can reject streamlines with endpoints in the WM or the CSF based on\n labels)\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n\nCondition\n---------\nFor every type of ROI, two or three values are expected: MODE CRITERIA DISTANCE\n(DISTANCE is always optional)\n- MODE must be one of these values: ['any', 'all', 'either_end', 'both_ends']\n - any: any part of the streamline must be in the mask\n - all: all parts of the streamline must be in the mask.\n - either_end: at least one end of the streamline must be in the mask.\n - both_ends: both ends of the streamline must be in the mask.\n- CRITERIA must be one of these values: ['include', 'exclude']\n - Include: If condition from MODE is respected, streamline is included.\n - Exlucde: If condition from MODE is respected, streamline is excluded.\n- DISTANCE must be an int and is optional.\n\n\nType of ROI\n-----------\n- Drawn ROI: Directly loaded from a binary file.\n- Atlas ROI: Selected label from an atlas.\n - ID is one or multiple integer values in the atlas. If multiple values,\n ID needs to be between quotes.\n Example: \"1:6 9 10:15\" will use values between 1 and 6 and values\n between 10 and 15 included as well as value 9.\n- BDO: The ROI is the interior of a bounding box.\n- Planes: The ROI is the equivalent of a one-voxel plane.\n * Using mode 'all' with x/y/z plane works but makes very little sense.\n\nNote: `--drawn_roi MASK.nii.gz all include` is equivalent to\n `--drawn_roi INVERSE_MASK.nii.gz any exclude`\n\nFor example, this allows to find out all streamlines entirely in the WM in one\ncommand (without manually inverting the mask first) or to remove any streamline\nstaying in the GM without getting out.\n\nSupports multiple filtering conditions\n--------------------------------------\nMultiple filtering conditions can be used, with varied ROI types if necessary.\nCombining two conditions is equivalent to a logical AND between the conditions.\nOrder of application does not matter for the final result, but may change the\nintermediate files, if any.\n\nDistance management\n-------------------\nDISTANCE is optional, and it should be used carefully with large voxel size\n(e.g > 2.5mm). The value is in voxel for ROIs and in mm for bounding boxes.\nAnisotropic data will affect each direction differently.\n When using --overwrite_distance, any filtering option with given criteria\nwill have its DISTANCE value replaced.\n\nFormerly: scil_filter_tractogram.py", - "help": "usage: scil_tractogram_filter_by_roi.py [-h]\n [--drawn_roi DRAWN_ROI [DRAWN_ROI ...]]\n [--atlas_roi ATLAS_ROI [ATLAS_ROI ...]]\n [--bdo BDO [BDO ...]]\n [--x_plane X_PLANE [X_PLANE ...]]\n [--y_plane Y_PLANE [Y_PLANE ...]]\n [--z_plane Z_PLANE [Z_PLANE ...]]\n [--filtering_list FILTERING_LIST]\n [--overwrite_distance OVERWRITE_DISTANCE [OVERWRITE_DISTANCE ...]]\n [--save_masks_atlas_roi SAVE_MASKS_ATLAS_ROI]\n [--no_empty] [--display_counts]\n [--save_rejected FILENAME]\n [--indent INDENT] [--sort_keys]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nFiltering of a tractogram based on any combination of conditions involving a\nROI (ex: keep streamlines whose endoints are inside the ROI, exclude\nstreamlines not entirely included in a ROI, etc.)\n\nSee also:\n - scil_tractogram_detect_loops.py\n - scil_tractogram_filter_by_anatomy.py\n (Can reject streamlines with endpoints in the WM or the CSF based on\n labels)\n - scil_tractogram_filter_by_length.py\n - scil_tractogram_filter_by_orientation.py\n\nCondition\n---------\nFor every type of ROI, two or three values are expected: MODE CRITERIA DISTANCE\n(DISTANCE is always optional)\n- MODE must be one of these values: ['any', 'all', 'either_end', 'both_ends']\n - any: any part of the streamline must be in the mask\n - all: all parts of the streamline must be in the mask.\n - either_end: at least one end of the streamline must be in the mask.\n - both_ends: both ends of the streamline must be in the mask.\n- CRITERIA must be one of these values: ['include', 'exclude']\n - Include: If condition from MODE is respected, streamline is included.\n - Exlucde: If condition from MODE is respected, streamline is excluded.\n- DISTANCE must be an int and is optional.\n\nType of ROI\n-----------\n- Drawn ROI: Directly loaded from a binary file.\n- Atlas ROI: Selected label from an atlas.\n - ID is one or multiple integer values in the atlas. If multiple values,\n ID needs to be between quotes.\n Example: \"1:6 9 10:15\" will use values between 1 and 6 and values\n between 10 and 15 included as well as value 9.\n- BDO: The ROI is the interior of a bounding box.\n- Planes: The ROI is the equivalent of a one-voxel plane.\n * Using mode 'all' with x/y/z plane works but makes very little sense.\n\nNote: `--drawn_roi MASK.nii.gz all include` is equivalent to\n `--drawn_roi INVERSE_MASK.nii.gz any exclude`\n\nFor example, this allows to find out all streamlines entirely in the WM in one\ncommand (without manually inverting the mask first) or to remove any streamline\nstaying in the GM without getting out.\n\nSupports multiple filtering conditions\n--------------------------------------\nMultiple filtering conditions can be used, with varied ROI types if necessary.\nCombining two conditions is equivalent to a logical AND between the conditions.\nOrder of application does not matter for the final result, but may change the\nintermediate files, if any.\n\nDistance management\n-------------------\nDISTANCE is optional, and it should be used carefully with large voxel size\n(e.g > 2.5mm). The value is in voxel for ROIs and in mm for bounding boxes.\nAnisotropic data will affect each direction differently.\n When using --overwrite_distance, any filtering option with given criteria\nwill have its DISTANCE value replaced.\n\nFormerly: scil_filter_tractogram.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n out_tractogram Path of the output tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --drawn_roi DRAWN_ROI [DRAWN_ROI ...]\n ROI_NAME MODE CRITERIA DISTANCE (distance in voxel is optional)\n Filename of a hand drawn ROI (.nii or .nii.gz).\n --atlas_roi ATLAS_ROI [ATLAS_ROI ...]\n ATLAS_NAME ID MODE CRITERIA DISTANCE (distance in voxel is optional)\n Filename of an atlas (.nii or .nii.gz).\n --bdo BDO [BDO ...] BDO_NAME MODE CRITERIA DISTANCE (distance in mm is optional)\n Filename of a bounding box (bdo) file from MI-Brain.\n --x_plane X_PLANE [X_PLANE ...]\n PLANE MODE CRITERIA DISTANCE (distance in voxel is optional)\n Slice number in X, in voxel space.\n --y_plane Y_PLANE [Y_PLANE ...]\n PLANE MODE CRITERIA DISTANCE (distance in voxel is optional)\n Slice number in Y, in voxel space.\n --z_plane Z_PLANE [Z_PLANE ...]\n PLANE MODE CRITERIA DISTANCE (distance in voxel is optional)\n Slice number in Z, in voxel space.\n --filtering_list FILTERING_LIST\n Text file containing one rule per line\n (i.e. drawn_roi mask.nii.gz both_ends include 1).\n --overwrite_distance OVERWRITE_DISTANCE [OVERWRITE_DISTANCE ...]\n MODE CRITERIA DISTANCE (distance in voxel for ROIs and in mm for bounding box).\n If set, it will overwrite the distance associated to a specific mode/criteria.\n --save_masks_atlas_roi SAVE_MASKS_ATLAS_ROI\n If set, will save the atlas roi masks. The value to provide is the \n prefix, ex: my_path/atlas_roi_. Whole filename will be \n my_path/atlas_roi_{id}.nii.gz\n --no_empty Do not write file if there is no streamline.\n --display_counts Print streamline count before and after filtering\n --save_rejected FILENAME\n Save rejected streamlines to output tractogram.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "streamlines", - "streamlines" - ], - [ - "planes", - "plane" - ], - [ - "streamline", - "streamline" - ], - [ - "direction", - "direction" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "planes", - "planes" - ], - [ - "space", - "space" - ], - [ - "Data", - "data", - "data" - ], - [ - "application", - "application" - ], - [ - "conditions", - "conditions" - ], - [ - "large", - "large" - ], - [ - "atlas", - "atlas" - ], - [ - "result", - "result" - ], - [ - "based", - "based" - ], - [ - "matter", - "matter" - ], - [ - "binary", - "binary" - ], - [ - "voxel", - "voxel" - ], - [ - "specific", - "specific" - ], - [ - "level", - "level" - ], - [ - "brain", - "brain" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_fix_trk", - "docstring": "This script is made to fix DSI-Studio or Startrack TRK file\n(unknown space/convention) to make it compatible with TrackVis,\nMI-Brain, Dipy Horizon (Stateful Tractogram).\n\nDSI-Studio\n==========\n\nThe script either make it match with an anatomy from DSI-Studio (AC-PC aligned,\nsometimes flipped) or if --in_native_fa is provided it moves it back to native\nDWI space (this involved registration).\n\nSince DSI-Studio sometimes leaves some skull around the brain, the --auto_crop\naims to stabilize registration. If this option fails, manually BET both FA.\nRegistration is more robust at resolution above 2mm (iso), be careful.\n\nIf you are fixing bundles, use this script once with --save_transfo and verify\nresults. Once satisfied, call the scripts on bundles using a bash for loop with\n--load_transfo to save computation.\n\nWe recommand the --cut_invalid to remove invalid points of streamlines rather\nremoving entire streamlines.\n\nThis script was tested on various datasets and worked on all of them. However,\nalways verify the results and if a specific case does not work. Open an issue\non the Scilpy GitHub repository.\n\nStartrack\n==========\n\nThe script will create a new stateful tractogram using the reference in\norder to fix the missing information in the header of the trk.\n\n\nWARNING: This script is still experimental, DSI-Studio and Startrack\nevolve quickly and results may vary depending on the data itself\nas well as DSI-studio/Startrack version.\n\nFormerly: scil_fix_dsi_studio_trk.py", - "help": "usage: scil_tractogram_fix_trk.py [-h] [--software string]\n [--cut_invalid | --remove_invalid]\n [--in_dsi_fa IN_DSI_FA]\n [--in_native_fa IN_NATIVE_FA] [--auto_crop]\n [--save_transfo FILE | --load_transfo FILE]\n [--reference REFERENCE] [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script is made to fix DSI-Studio or Startrack TRK file\n(unknown space/convention) to make it compatible with TrackVis,\nMI-Brain, Dipy Horizon (Stateful Tractogram).\n\nDSI-Studio\n==========\n\nThe script either make it match with an anatomy from DSI-Studio (AC-PC aligned,\nsometimes flipped) or if --in_native_fa is provided it moves it back to native\nDWI space (this involved registration).\n\nSince DSI-Studio sometimes leaves some skull around the brain, the --auto_crop\naims to stabilize registration. If this option fails, manually BET both FA.\nRegistration is more robust at resolution above 2mm (iso), be careful.\n\nIf you are fixing bundles, use this script once with --save_transfo and verify\nresults. Once satisfied, call the scripts on bundles using a bash for loop with\n--load_transfo to save computation.\n\nWe recommand the --cut_invalid to remove invalid points of streamlines rather\nremoving entire streamlines.\n\nThis script was tested on various datasets and worked on all of them. However,\nalways verify the results and if a specific case does not work. Open an issue\non the Scilpy GitHub repository.\n\nStartrack\n==========\n\nThe script will create a new stateful tractogram using the reference in\norder to fix the missing information in the header of the trk.\n\nWARNING: This script is still experimental, DSI-Studio and Startrack\nevolve quickly and results may vary depending on the data itself\nas well as DSI-studio/Startrack version.\n\nFormerly: scil_fix_dsi_studio_trk.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file from DSI studio (.trk).\n out_tractogram Path of the output tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --software string Software used to create in_tractogram.\n Choices: ['dsi_studio', 'startrack']\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_invalid Remove the streamlines landing out of the bounding box.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nDSI options:\n --in_dsi_fa IN_DSI_FA\n Path of the input FA from DSI Studio (.nii.gz).\n --in_native_fa IN_NATIVE_FA\n Path of the input FA from Dipy/MRtrix (.nii.gz).\n Move the tractogram back to a \"proper\" space, include registration.\n --auto_crop If both FA are not already BET, perform registration \n using a centered-cube crop to ignore the skull.\n A good BET for both is more robust.\n --save_transfo FILE Save estimated transformation to avoid recomputing (.txt).\n --load_transfo FILE Load estimated transformation to apply to other files (.txt).\n\nStarTrack options:\n --reference REFERENCE\n Reference anatomy (.nii or .nii.gz).\n", - "synonyms": [ - [ - "order", - "order" - ], - [ - "variety", - "various" - ], - [ - "streamlines", - "streamlines" - ], - [ - "invalid", - "invalid" - ], - [ - "create", - "create" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "work", - "work" - ], - [ - "unknown", - "unknown" - ], - [ - "applied", - "apply" - ], - [ - "space", - "space" - ], - [ - "involved", - "involved" - ], - [ - "Data", - "data", - "data" - ], - [ - "working", - "worked" - ], - [ - "bundles", - "bundles" - ], - [ - "specific", - "specific" - ], - [ - "level", - "level" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_flip", - "docstring": "Flip streamlines locally around specific axes.\n\nIMPORTANT: this script should only be used in case of absolute necessity.\nIt's better to fix the real tools than to force flipping streamlines to\nhave them fit in the tools.\n\nFormerly: scil_flip_streamlines.py", - "help": "usage: scil_tractogram_flip.py [-h] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram {x,y,z}\n [{x,y,z} ...]\n\nFlip streamlines locally around specific axes.\n\nIMPORTANT: this script should only be used in case of absolute necessity.\nIt's better to fix the real tools than to force flipping streamlines to\nhave them fit in the tools.\n\nFormerly: scil_flip_streamlines.py\n\npositional arguments:\n in_tractogram Path of the input tractogram file.\n out_tractogram Path of the output tractogram file.\n {x,y,z} The axes you want to flip. eg: to flip the x and y axes use: x y.\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "tool", - "tools" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_math", - "docstring": "Performs an operation on a list of streamline files. The supported\noperations are:\n\ndifference: Keep the streamlines from the first file that are not in\n any of the following files.\n\nintersection: Keep the streamlines that are present in all files.\n\nunion: Keep all streamlines while removing duplicates.\n\nconcatenate: Keep all streamlines with duplicates.\n\nlazy_concatenate: Keep all streamlines with duplicates, never load the whole\n tractograms in memory. Only works with trk/tck file,\n metadata will be lost and invalid streamlines are kept.\n\nIf a file 'duplicate.trk' have identical streamlines, calling the script using\nthe difference/intersection/union with a single input will remove these\nduplicated streamlines.\n\nTo allow a soft match, use the --precision option to increase the allowed\nthreshold for similarity. A precision of 1 represents 10**(-1), so a\nmaximum distance of 0.1mm is allowed. If the streamlines are identical, the\ndefault value of 3 (or 0.001mm distance) should work.\n\nIf there is a 0.5mm shift, use a precision of 0 (or 1mm distance) and the\n--robust option. Should make it work, but slightly slower. Will merge all\nstreamlines similar when rounded to that precision level.\n\nThe metadata (data per point, data per streamline) of the streamlines that\nare kept in the output will be preserved. This requires that all input files\nshare the same type of metadata. If this is not the case, use the option\n--no_metadata to strip the metadata from the output. Or --fake_metadata to\ninitialize dummy metadata in the file missing them.\n\nFormerly: scil_streamlines_math.py", - "help": "usage: scil_tractogram_math.py [-h] [--precision NBR_OF_DECIMALS] [--robust]\n [--no_metadata] [--fake_metadata]\n [--save_indices OUT_INDEX_FILE] [--save_empty]\n [--no_bbox_check] [--indent INDENT]\n [--sort_keys] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n OPERATION INPUT_FILES [INPUT_FILES ...]\n OUTPUT_FILE\n\nPerforms an operation on a list of streamline files. The supported\noperations are:\n\ndifference: Keep the streamlines from the first file that are not in\n any of the following files.\n\nintersection: Keep the streamlines that are present in all files.\n\nunion: Keep all streamlines while removing duplicates.\n\nconcatenate: Keep all streamlines with duplicates.\n\nlazy_concatenate: Keep all streamlines with duplicates, never load the whole\n tractograms in memory. Only works with trk/tck file,\n metadata will be lost and invalid streamlines are kept.\n\nIf a file 'duplicate.trk' have identical streamlines, calling the script using\nthe difference/intersection/union with a single input will remove these\nduplicated streamlines.\n\nTo allow a soft match, use the --precision option to increase the allowed\nthreshold for similarity. A precision of 1 represents 10**(-1), so a\nmaximum distance of 0.1mm is allowed. If the streamlines are identical, the\ndefault value of 3 (or 0.001mm distance) should work.\n\nIf there is a 0.5mm shift, use a precision of 0 (or 1mm distance) and the\n--robust option. Should make it work, but slightly slower. Will merge all\nstreamlines similar when rounded to that precision level.\n\nThe metadata (data per point, data per streamline) of the streamlines that\nare kept in the output will be preserved. This requires that all input files\nshare the same type of metadata. If this is not the case, use the option\n--no_metadata to strip the metadata from the output. Or --fake_metadata to\ninitialize dummy metadata in the file missing them.\n\nFormerly: scil_streamlines_math.py\n\npositional arguments:\n OPERATION The type of operation to be performed on the streamlines. Must\n be one of the following: difference, intersection, union, concatenate, lazy_concatenate.\n INPUT_FILES The list of files that contain the streamlines to operate on.\n OUTPUT_FILE The file where the remaining streamlines are saved.\n\noptions:\n -h, --help show this help message and exit\n --precision NBR_OF_DECIMALS, -p NBR_OF_DECIMALS\n Precision used to compare streamlines [4].\n --robust, -r Use version robust to small translation/rotation.\n --no_metadata, -n Strip the streamline metadata from the output.\n --fake_metadata Skip the metadata verification, create fake metadata if missing, can lead to unexpected behavior.\n --save_indices OUT_INDEX_FILE, -s OUT_INDEX_FILE\n Save the streamline indices to the supplied json file.\n --save_empty If set, we will save all results, even if tractogram if empty.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "precision", - "precision" - ], - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "increase", - "increase" - ], - [ - "similarity", - "similarity" - ], - [ - "invalid", - "invalid" - ], - [ - "create", - "create" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "supported", - "supported" - ], - [ - "work", - "work" - ], - [ - "Data", - "data", - "data" - ], - [ - "large", - "small" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "memory", - "memory" - ], - [ - "level", - "level" - ], - [ - "difference", - "difference" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_pairwise_comparison", - "docstring": "This script is designed to compare and help visualize differences between two\ntractograms. This can be especially useful in studies where multiple\ntractograms from different algorithms or parameters need to be compared.\n\nA similar script (scil_bundle_pairwise_comparison.py) is available for bundles,\nwith metrics more adapted to bundles (and spatial agreement).\n\nThe difference is computed in terms of\n- A voxel-wise spatial distance between streamlines crossing each voxel.\n This can help to see if both tractography reconstructions at each voxel\n look similar (out_diff.nii.gz)\n- An angular correlation (ACC) between streamline orientation from TODI.\n This compares the local orientation of streamlines at each voxel\n (out_acc.nii.gz)\n- A patch-wise correlation between streamline density maps from both\n tractograms. This compares where the high/low density regions agree or not\n (out_corr.nii.gz)\n- A heatmap combining all the previous metrics using a harmonic means of the\n normalized metrics to summarize general agreement (out_heatmap.nii.gz)", - "help": "usage: scil_tractogram_pairwise_comparison.py [-h] [--out_dir OUT_DIR]\n [--out_prefix OUT_PREFIX]\n [--in_mask IN_FILE]\n [--skip_streamlines_distance]\n [--processes NBR]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram_1 in_tractogram_2\n\nThis script is designed to compare and help visualize differences between two\ntractograms. This can be especially useful in studies where multiple\ntractograms from different algorithms or parameters need to be compared.\n\nA similar script (scil_bundle_pairwise_comparison.py) is available for bundles,\nwith metrics more adapted to bundles (and spatial agreement).\n\nThe difference is computed in terms of\n- A voxel-wise spatial distance between streamlines crossing each voxel.\n This can help to see if both tractography reconstructions at each voxel\n look similar (out_diff.nii.gz)\n- An angular correlation (ACC) between streamline orientation from TODI.\n This compares the local orientation of streamlines at each voxel\n (out_acc.nii.gz)\n- A patch-wise correlation between streamline density maps from both\n tractograms. This compares where the high/low density regions agree or not\n (out_corr.nii.gz)\n- A heatmap combining all the previous metrics using a harmonic means of the\n normalized metrics to summarize general agreement (out_heatmap.nii.gz)\n\npositional arguments:\n in_tractogram_1 Input tractogram 1.\n in_tractogram_2 Input tractogram 2.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Directory where all output files will be saved.\n If not specified, outputs will be saved in the current directory.\n --out_prefix OUT_PREFIX\n Prefix for output files. Useful for distinguishing between different runs [out].\n --in_mask IN_FILE Optional input mask.\n --skip_streamlines_distance\n Skip computation of the spatial distance between streamlines. Slowest part of the computation.\n --processes NBR Number of sub-processes to start. \n Default: [1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "spatial", - "spatial" - ], - [ - "streamlines", - "streamlines" - ], - [ - "studies", - "study", - "studies" - ], - [ - "streamline", - "streamline" - ], - [ - "differences", - "differences" - ], - [ - "orientation", - "orientation" - ], - [ - "region", - "regions", - "regions" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "high", - "low" - ], - [ - "high", - "high" - ], - [ - "maps", - "maps" - ], - [ - "processes", - "processes" - ], - [ - "algorithm", - "algorithms" - ], - [ - "tractography", - "tractography" - ], - [ - "bundles", - "bundles" - ], - [ - "general", - "general" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "parameters", - "parameters" - ], - [ - "difference", - "difference" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_print_info", - "docstring": "Prints information on a loaded tractogram: number of streamlines, and\nmean / min / max / std of\n - length in number of points\n - length in mm\n - step size.\n\nFor trk files: also prints the data_per_point and data_per_streamline keys.\n\nSee also:\n - scil_header_print_info.py to see the header, affine, volume dimension.\n - scil_bundle_shape_measures.py to see bundle-specific information.", - "help": "usage: scil_tractogram_print_info.py [-h] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [--indent INDENT] [--sort_keys]\n in_tractogram\n\nPrints information on a loaded tractogram: number of streamlines, and\nmean / min / max / std of\n - length in number of points\n - length in mm\n - step size.\n\nFor trk files: also prints the data_per_point and data_per_streamline keys.\n\nSee also:\n - scil_header_print_info.py to see the header, affine, volume dimension.\n - scil_bundle_shape_measures.py to see bundle-specific information.\n\npositional arguments:\n in_tractogram Tractogram file.\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "step", - "step" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_project_map_to_streamlines", - "docstring": "Projects maps extracted from a map onto the points of streamlines.\n\nThe default options will take data from a nifti image (3D or 4D) and project it\nonto the points of streamlines. If the image is 4D, the data is stored as a\nlist of 1D arrays per streamline. If the image is 3D, the data is stored as a\nlist of values per streamline.\n\nSee also scil_tractogram_project_streamlines_to_map.py for the reverse action.\n\n* Note that the data from your maps will be projected only on the coordinates\nof the points of your streamlines. Data underlying the whole segments between\ntwo consecutive points is not used. If your streamlines are strongly\ncompressed, or if they have a very big step size, the result will possibly\nreflect poorly your map. You may use scil_tractogram_resample.py to upsample\nyour streamlines first.\n\n* Hint: The streamlines themselves are not modified here, only their dpp. To\navoid multiplying data on disk, you could use the following arguments to save\nthe new dpp in your current tractogram:\n>> scil_tractogram_project_map_to_streamlines.py $in_bundle $in_bundle\n --keep_all_dpp -f", - "help": "usage: scil_tractogram_project_map_to_streamlines.py [-h] --in_maps IN_MAPS\n [IN_MAPS ...]\n --out_dpp_name\n OUT_DPP_NAME\n [OUT_DPP_NAME ...]\n [--trilinear]\n [--endpoints_only]\n [--keep_all_dpp]\n [--overwrite_dpp]\n [--reference REFERENCE]\n [-f]\n [-v [{DEBUG,INFO,WARNING}]]\n in_tractogram\n out_tractogram\n\nProjects maps extracted from a map onto the points of streamlines.\n\nThe default options will take data from a nifti image (3D or 4D) and project it\nonto the points of streamlines. If the image is 4D, the data is stored as a\nlist of 1D arrays per streamline. If the image is 3D, the data is stored as a\nlist of values per streamline.\n\nSee also scil_tractogram_project_streamlines_to_map.py for the reverse action.\n\n* Note that the data from your maps will be projected only on the coordinates\nof the points of your streamlines. Data underlying the whole segments between\ntwo consecutive points is not used. If your streamlines are strongly\ncompressed, or if they have a very big step size, the result will possibly\nreflect poorly your map. You may use scil_tractogram_resample.py to upsample\nyour streamlines first.\n\n* Hint: The streamlines themselves are not modified here, only their dpp. To\navoid multiplying data on disk, you could use the following arguments to save\nthe new dpp in your current tractogram:\n>> scil_tractogram_project_map_to_streamlines.py $in_bundle $in_bundle\n --keep_all_dpp -f\n\npositional arguments:\n in_tractogram Fiber bundle file.\n out_tractogram Output file.\n\noptions:\n -h, --help show this help message and exit\n --in_maps IN_MAPS [IN_MAPS ...]\n Nifti map to project onto streamlines.\n --out_dpp_name OUT_DPP_NAME [OUT_DPP_NAME ...]\n Name of the data_per_point to be saved in the \n output tractogram.\n --trilinear If set, will use trilinear interpolation \n else will use nearest neighbor interpolation \n by default.\n --endpoints_only If set, will only project the map onto the \n endpoints of the streamlines (all other values along \n streamlines will be NaN). If not set, will project \n the map onto all points of the streamlines.\n --keep_all_dpp If set, previous data_per_point will be preserved \n in the output tractogram. Else, only --out_dpp_name \n keys will be saved.\n --overwrite_dpp If set, if --keep_all_dpp is set and some \n --out_dpp_name keys already existed in your \n data_per_point, allow overwriting old data_per_point.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -f Force overwriting of the output files.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "maps", - "map" - ], - [ - "action", - "action" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "image", - "image" - ], - [ - "exist", - "existed" - ], - [ - "maps", - "maps" - ], - [ - "project", - "project" - ], - [ - "Data", - "data", - "data" - ], - [ - "step", - "step" - ], - [ - "result", - "result" - ], - [ - "project", - "projects" - ], - [ - "level", - "level" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_project_streamlines_to_map", - "docstring": "Projects metrics onto the underlying voxels of a streamlines. This script can\nproject data from data_per_point (dpp) or data_per_streamline (dps) to maps.\n\nYou choose to project data from all points of the streamlines, or from the\nendpoints only. The idea then is to visualize the cortical areas affected by\nmetrics (assuming streamlines start/end in the cortex).\n\nSee also scil_tractogram_project_map_to_streamlines.py for the reverse action.\n\nHow to the data is loaded:\n - From dps: uses the same value for each point of the streamline.\n - From dpp: one value per point.\n\nHow the data is used:\n 1. Average all points of the streamline to get a mean value, set this value\n to all points.\n 2. Average the two endpoints and get their mean value, set this value to\n all points.\n 3. Keep each point individually.\n\nHow the data is projected to a map:\n A. Using each point.\n B. Using the endpoints only.\n\nFor more complex operations than the average per streamline, see\nscil_tractogram_dpp_math.py.", - "help": "usage: scil_tractogram_project_streamlines_to_map.py [-h]\n (--use_dps key [key ...] | --use_dpp key [key ...] | --load_dps file [file ...] | --load_dpp file [file ...])\n (--mean_endpoints | --mean_streamline | --point_by_point)\n (--to_endpoints | --to_wm)\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_bundle out_prefix\n\nProjects metrics onto the underlying voxels of a streamlines. This script can\nproject data from data_per_point (dpp) or data_per_streamline (dps) to maps.\n\nYou choose to project data from all points of the streamlines, or from the\nendpoints only. The idea then is to visualize the cortical areas affected by\nmetrics (assuming streamlines start/end in the cortex).\n\nSee also scil_tractogram_project_map_to_streamlines.py for the reverse action.\n\nHow to the data is loaded:\n - From dps: uses the same value for each point of the streamline.\n - From dpp: one value per point.\n\nHow the data is used:\n 1. Average all points of the streamline to get a mean value, set this value\n to all points.\n 2. Average the two endpoints and get their mean value, set this value to\n all points.\n 3. Keep each point individually.\n\nHow the data is projected to a map:\n A. Using each point.\n B. Using the endpoints only.\n\nFor more complex operations than the average per streamline, see\nscil_tractogram_dpp_math.py.\n\npositional arguments:\n in_bundle Fiber bundle file.\n out_prefix Folder + prefix to save endpoints metric(s). We will save \n one nifti file per per dpp/dps key given.\n Ex: my_path/subjX_bundleY_ with --use_dpp key1 will output \n my_path/subjX_bundleY_key1.nii.gz\n\noptions:\n -h, --help show this help message and exit\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nWhere to get the statistics from. (Choose one):\n --use_dps key [key ...]\n Use the data_per_streamline from the tractogram.\n It must be a .trk\n --use_dpp key [key ...]\n Use the data_per_point from the tractogram. \n It must be a trk.\n --load_dps file [file ...]\n Load data per streamline (scalar) .txt or .npy.\n Must load an array with the right shape.\n --load_dpp file [file ...]\n Load data per point (scalar) from .txt or .npy.\n Must load an array with the right shape.\n\nProcessing choices. (Choose one):\n --mean_endpoints Uses one single value per streamline: the mean of the two \n endpoints.\n --mean_streamline Use one single value per streamline: the mean of all \n points of the streamline.\n --point_by_point Directly project the streamlines values onto the map.\n\nWhere to send the statistics. (Choose one):\n --to_endpoints Project metrics onto a mask of the endpoints.\n --to_wm Project metrics into streamlines coverage.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "areas", - "areas" - ], - [ - "streamline", - "streamline" - ], - [ - "maps", - "map" - ], - [ - "key", - "key" - ], - [ - "action", - "action" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "cortex", - "cortical", - "cortex" - ], - [ - "maps", - "maps" - ], - [ - "project", - "project" - ], - [ - "Data", - "data", - "data" - ], - [ - "voxel", - "voxels" - ], - [ - "shape", - "shape" - ], - [ - "complex", - "complex" - ], - [ - "cortex", - "cortical", - "parietal", - "cortical" - ], - [ - "project", - "projects" - ], - [ - "average", - "average" - ], - [ - "processing", - "processing" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_qbx", - "docstring": "Compute clusters using QuickBundlesX and save them separately.\nWe cannot know the number of clusters in advance.\n\nQuickbundles:\nGaryfallidis, E. et al. (2012). Quickbundles, a method for tractography\nsimplification. Frontiers in neuroscience, 6, 175.\n\nQuickbundlesX:\nGaryfallidis, E. et al. (2016) QuickBundlesX: sequential clustering of millions\nof streamlines in multiple levels of detail at record execution time. 24th\nInternational Society of Magnetic Resonance in Medicine (ISMRM).\n\n\"QuickBundlesX shows a remarkable 20+X speedup over its predecessor\"\n\nFormerly: scil_compute_qbx.py", - "help": "usage: scil_tractogram_qbx.py [-h] [--nb_points NB_POINTS]\n [--out_centroids OUT_CENTROIDS]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram dist_thresh out_clusters_dir\n\nCompute clusters using QuickBundlesX and save them separately.\nWe cannot know the number of clusters in advance.\n\nQuickbundles:\nGaryfallidis, E. et al. (2012). Quickbundles, a method for tractography\nsimplification. Frontiers in neuroscience, 6, 175.\n\nQuickbundlesX:\nGaryfallidis, E. et al. (2016) QuickBundlesX: sequential clustering of millions\nof streamlines in multiple levels of detail at record execution time. 24th\nInternational Society of Magnetic Resonance in Medicine (ISMRM).\n\n\"QuickBundlesX shows a remarkable 20+X speedup over its predecessor\"\n\nFormerly: scil_compute_qbx.py\n\npositional arguments:\n in_tractogram Tractogram filename.\n Path of the input tractogram or bundle.\n dist_thresh Last QuickBundlesX threshold in mm. Typically \n the value are between 10-20mm.\n out_clusters_dir Path where to save the clusters directory.\n\noptions:\n -h, --help show this help message and exit\n --nb_points NB_POINTS\n Streamlines will be resampled to have this number of points [20].\n --out_centroids OUT_CENTROIDS\n Output tractogram filename.\n Format must be readable by the Nibabel API.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "methods", - "method" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "level", - "levels" - ], - [ - "bundles", - "bundle" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "tractography", - "tractography" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_register", - "docstring": "Generate a linear transformation matrix from the registration of 2 tractograms.\nTypically, this script is run before scil_tractogram_apply_transform.py.\n\nFor more information on how to use the various registration scripts, see the\ndoc at:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_register_tractogram.py", - "help": "usage: scil_tractogram_register.py [-h] [--out_name OUT_NAME] [--only_rigid]\n [--moving_tractogram_ref MOVING_TRACTOGRAM_REF]\n [--static_tractogram_ref STATIC_TRACTOGRAM_REF]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n moving_tractogram static_tractogram\n\nGenerate a linear transformation matrix from the registration of 2 tractograms.\nTypically, this script is run before scil_tractogram_apply_transform.py.\n\nFor more information on how to use the various registration scripts, see the\ndoc at:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_register_tractogram.py\n\npositional arguments:\n moving_tractogram Path of the moving tractogram.\n static_tractogram Path of the target tractogram.\n\noptions:\n -h, --help show this help message and exit\n --out_name OUT_NAME Filename of the transformation matrix. \n The registration type will be appended as a suffix,\n [_.txt]. \n Default: [transformation.txt]\n --only_rigid If set, will only use a rigid transformation (uses affine by default).\n --moving_tractogram_ref MOVING_TRACTOGRAM_REF\n Reference anatomy for moving_tractogram (if tck/vtk/fib/dpy) file\n support (.nii or .nii.gz).\n --static_tractogram_ref STATIC_TRACTOGRAM_REF\n Reference anatomy for static_tractogram (if tck/vtk/fib/dpy) file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nReferences:\n[1] E. Garyfallidis, O. Ocegueda, D. Wassermann, M. Descoteaux\nRobust and efficient linear registration of white-matter fascicles in the\nspace of streamlines, NeuroImage, Volume 117, 15 August 2015, Pages 124-140\n(http://www.sciencedirect.com/science/article/pii/S1053811915003961)\n", - "synonyms": [ - [ - "variety", - "various" - ], - [ - "streamlines", - "streamlines" - ], - [ - "white", - "white" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "space", - "space" - ], - [ - "matter", - "matter" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_remove_invalid", - "docstring": "Removal of streamlines that are out of the volume bounding box. In voxel space\nno negative coordinate and no above volume dimension coordinate are possible.\nAny streamline that do not respect these two conditions are removed.\n\nThe --cut_invalid option will cut streamlines so that their longest segment are\nwithin the bounding box\n\nFormerly: scil_remove_invalid_streamlines.py", - "help": "usage: scil_tractogram_remove_invalid.py [-h] [--cut_invalid]\n [--remove_single_point]\n [--remove_overlapping_points]\n [--threshold THRESHOLD] [--no_empty]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nRemoval of streamlines that are out of the volume bounding box. In voxel space\nno negative coordinate and no above volume dimension coordinate are possible.\nAny streamline that do not respect these two conditions are removed.\n\nThe --cut_invalid option will cut streamlines so that their longest segment are\nwithin the bounding box\n\nFormerly: scil_remove_invalid_streamlines.py\n\npositional arguments:\n in_tractogram Tractogram filename. Format must be one of \n trk, tck, vtk, fib, dpy.\n out_tractogram Output filename. Format must be one of \n trk, tck, vtk, fib, dpy.\n\noptions:\n -h, --help show this help message and exit\n --cut_invalid Cut invalid streamlines rather than removing them.\n Keep the longest segment only.\n --remove_single_point\n Consider single point streamlines invalid.\n --remove_overlapping_points\n Consider streamlines with overlapping points invalid.\n --threshold THRESHOLD\n Maximum distance between two points to be considered overlapping [0.001 mm].\n --no_empty Do not save empty tractogram.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "invalid", - "invalid" - ], - [ - "positive", - "negative" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "space", - "space" - ], - [ - "conditions", - "conditions" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "considered", - "considered" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_resample", - "docstring": "Script to resample a tractogram to a set number of streamlines.\nDefault behavior:\n- IF number of requested streamlines is lower than streamline count: DOWNSAMPLE\n- IF number of requested streamlines is higher than streamline count: UPSAMPLE\nTo prevent upsample if not desired use --never_upsample.\n\nCan be useful to build training sets for machine learning algorithms, to\nupsample under-represented bundles or downsample over-represented bundles.\n\nWorks by either selecting a subset of streamlines or by generating new\nstreamlines by adding gaussian noise to existing ones.\n\nUpsampling:\n Includes smoothing to compensate for the noisiness of new streamlines\n generated by the process.\nDownsampling:\n Includes the possibility of choosing randomly *per Quickbundle cluster* to\n ensure that all clusters are represented in the final tractogram.\n\nExample usage:\n$ scil_tractogram_resample.py input.trk 1000 output.trk --point_wise_std 0.5 --gaussian 5 --keep_invalid_streamlines\n$ scil_visualize_bundles.py output.trk --local_coloring --width=0.1", - "help": "usage: scil_tractogram_resample.py [-h] [--never_upsample]\n [--point_wise_std POINT_WISE_STD]\n [--tube_radius TUBE_RADIUS]\n [--gaussian SIGMA] [-e ERROR_RATE]\n [--keep_invalid_streamlines]\n [--downsample_per_cluster]\n [--qbx_thresholds QBX_THRESHOLDS [QBX_THRESHOLDS ...]]\n [--seed SEED] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram nb_streamlines out_tractogram\n\nScript to resample a tractogram to a set number of streamlines.\nDefault behavior:\n- IF number of requested streamlines is lower than streamline count: DOWNSAMPLE\n- IF number of requested streamlines is higher than streamline count: UPSAMPLE\nTo prevent upsample if not desired use --never_upsample.\n\nCan be useful to build training sets for machine learning algorithms, to\nupsample under-represented bundles or downsample over-represented bundles.\n\nWorks by either selecting a subset of streamlines or by generating new\nstreamlines by adding gaussian noise to existing ones.\n\nUpsampling:\n Includes smoothing to compensate for the noisiness of new streamlines\n generated by the process.\nDownsampling:\n Includes the possibility of choosing randomly *per Quickbundle cluster* to\n ensure that all clusters are represented in the final tractogram.\n\nExample usage:\n$ scil_tractogram_resample.py input.trk 1000 output.trk --point_wise_std 0.5 --gaussian 5 --keep_invalid_streamlines\n$ scil_visualize_bundles.py output.trk --local_coloring --width=0.1\n\npositional arguments:\n in_tractogram Input tractography file.\n nb_streamlines Number of streamlines to resample the tractogram to.\n out_tractogram Output tractography file.\n\noptions:\n -h, --help show this help message and exit\n --never_upsample Make sure to never upsample a tractogram.\n Useful when downsample batch of files using bash.\n --seed SEED Use a specific random seed for the resampling.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nUpsampling params:\n --point_wise_std POINT_WISE_STD\n Noise to add to existing streamlines points to generate new ones [1].\n --tube_radius TUBE_RADIUS\n Maximum distance to generate streamlines around the original ones [1].\n --gaussian SIGMA Sigma for smoothing. Use the value of surrounding X,Y,Z points on the streamline to blur the streamlines.\n A good sigma choice would be around 5.\n -e ERROR_RATE Maximum compression distance in mm [0.1].\n --keep_invalid_streamlines\n Keep invalid newly generated streamlines that may go out of the \n bounding box.\n\nDownsampling params:\n --downsample_per_cluster\n If set, downsampling will be done per cluster (computed with \n Quickbundles) to ensure that at least some streamlines are \n kept per bundle. Else, random downsampling is performed (default).\n --qbx_thresholds QBX_THRESHOLDS [QBX_THRESHOLDS ...]\n If you chose option '--downsample_per_cluster', you may set \n the QBx threshold value(s) here. Default: [40, 30, 20]\n", - "synonyms": [ - [ - "process", - "process" - ], - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "higher", - "lower" - ], - [ - "invalid", - "invalid" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "learning", - "learning" - ], - [ - "bundles", - "bundle" - ], - [ - "possibility", - "possibility" - ], - [ - "random", - "random" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "algorithm", - "algorithms" - ], - [ - "tractography", - "tractography" - ], - [ - "bundles", - "bundles" - ], - [ - "specific", - "specific" - ], - [ - "level", - "level" - ], - [ - "higher", - "higher" - ], - [ - "random", - "randomly" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_resample_nb_points", - "docstring": "Script to resample a set of streamlines to either a new number of points per\nstreamline or to a fixed step size. WARNING: data_per_point is not carried.\n\nFormerly: scil_resample_streamlines.py", - "help": "usage: scil_tractogram_resample_nb_points.py [-h]\n (--nb_pts_per_streamline NB_PTS_PER_STREAMLINE | --step_size STEP_SIZE)\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nScript to resample a set of streamlines to either a new number of points per\nstreamline or to a fixed step size. WARNING: data_per_point is not carried.\n\nFormerly: scil_resample_streamlines.py\n\npositional arguments:\n in_tractogram Streamlines input file name.\n out_tractogram Streamlines output file name.\n\noptions:\n -h, --help show this help message and exit\n --nb_pts_per_streamline NB_PTS_PER_STREAMLINE\n Number of points per streamline in the output.\n --step_size STEP_SIZE\n Step size in the output (in mm).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "step", - "step" - ], - [ - "streamlines", - "streamlines" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "level", - "level" - ], - [ - "streamline", - "streamline" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_seed_density_map", - "docstring": "Compute a density map of seeds saved in .trk file.\n\nFormerly: scil_compute_seed_density_map.py", - "help": "usage: scil_tractogram_seed_density_map.py [-h] [--binary [FIXED_VALUE]]\n [--no_bbox_check]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n tractogram_filename\n seed_density_filename\n\nCompute a density map of seeds saved in .trk file.\n\nFormerly: scil_compute_seed_density_map.py\n\npositional arguments:\n tractogram_filename Tracts filename. Format must be .trk. \n File should contain a \"seeds\" value in the data_per_streamline.\n These seeds must be in space: voxel, origin: corner.\n seed_density_filename\n Output seed density filename. Format must be Nifti.\n\noptions:\n -h, --help show this help message and exit\n --binary [FIXED_VALUE]\n If set, will store the same value for all intersected voxels, creating a binary map.\n When set without a value, 1 is used (and dtype uint8).\n If a value is given, will be used as the stored value.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "maps", - "map" - ], - [ - "invalid", - "invalid" - ], - [ - "space", - "space" - ], - [ - "create", - "creating" - ], - [ - "voxel", - "voxels" - ], - [ - "binary", - "binary" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "tract", - "tracts", - "tracts" - ], - [ - "intersected", - "intersected" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_segment_and_score", - "docstring": "Scores input tractogram overall and bundlewise.\n\nOutputs\n-------\n\n - results.json: Contains a full tractometry report.\n - processing_stats.json: Contains information on the segmentation of\n bundles (ex: the number of wpc per criteria).\n - Splits the input tractogram into\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (if args.compute_ic)\n segmented_WPC/*_wpc.trk (if args.save_wpc_separately)\n IS.trk OR NC.trk (if args.compute_ic)\n\nBy default, if a streamline fits in many bundles, it will be included in every\none. This means a streamline may be a VS for a bundle and an IS for\n(potentially many) others. If you want to assign each streamline to at most one\nbundle, use the `--unique` flag.\n\nConfig file\n-----------\n\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\nMandatory:\n - endpoints OR [head AND tail]: filename for the endpoints ROI.\n If 'enpoints' is used, we will automatically separate the mask into two\n ROIs, acting as head and tail. Quality check is strongly recommended.\n\nOptional:\n Concerning metrics:\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n Concerning inclusion criteria (other streamlines will be WPC):\n - all_mask: ROI serving as \"all\" criteria: to be included in the bundle,\n ALL points of a streamline must be inside the mask.*\n - any_mask: ROI serving as \"any\" criteria: streamlines\n must touch that mask in at least one point (\"any\" point) to be included\n in the bundle.\n - angle: angle criteria. Streamlines containing loops and sharp turns above\n given angle will be rejected from the bundle.\n - length: maximum and minimum lengths per bundle.\n - length_x / length_x_abs: maximum and minimum total distance in the x\n direction (i.e. first coordinate).**\n - length_y / length_y_abs: maximum and minimum total distance in the y\n direction (i.e. second coordinate).**\n - length_z / length_z_abs: maximum and minimum total distance in the z\n direction (i.e. third coordinate).**\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n** With absolute values: coming back on yourself will contribute to the total\ndistance instead of cancelling it.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n \"angle\": 300,\n \"length\": [140, 150],\n \"endpoints\": \"PATH/file1.nii.gz\"\n }\n}", - "help": "usage: scil_tractogram_segment_and_score.py [-h] [--json_prefix p]\n [--gt_dir DIR]\n [--use_gt_masks_as_all_masks]\n [--dilate_endpoints NB_PASS]\n [--remove_invalid]\n [--save_wpc_separately]\n [--compute_ic] [--unique]\n [--remove_wpc_belonging_to_another_bundle]\n [--no_empty] [--indent INDENT]\n [--sort_keys] [--no_bbox_check]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram gt_config out_dir\n\nScores input tractogram overall and bundlewise.\n\nOutputs\n-------\n\n - results.json: Contains a full tractometry report.\n - processing_stats.json: Contains information on the segmentation of\n bundles (ex: the number of wpc per criteria).\n - Splits the input tractogram into\n segmented_VB/*_VS.trk.\n segmented_IB/*_*_IC.trk (if args.compute_ic)\n segmented_WPC/*_wpc.trk (if args.save_wpc_separately)\n IS.trk OR NC.trk (if args.compute_ic)\n\nBy default, if a streamline fits in many bundles, it will be included in every\none. This means a streamline may be a VS for a bundle and an IS for\n(potentially many) others. If you want to assign each streamline to at most one\nbundle, use the `--unique` flag.\n\nConfig file\n-----------\n\nThe config file needs to be a json containing a dict of the ground-truth\nbundles as keys. The value for each bundle is itself a dictionnary with:\n\nMandatory:\n - endpoints OR [head AND tail]: filename for the endpoints ROI.\n If 'enpoints' is used, we will automatically separate the mask into two\n ROIs, acting as head and tail. Quality check is strongly recommended.\n\nOptional:\n Concerning metrics:\n - gt_mask: expected result. OL and OR metrics will be computed from this.*\n\n Concerning inclusion criteria (other streamlines will be WPC):\n - all_mask: ROI serving as \"all\" criteria: to be included in the bundle,\n ALL points of a streamline must be inside the mask.*\n - any_mask: ROI serving as \"any\" criteria: streamlines\n must touch that mask in at least one point (\"any\" point) to be included\n in the bundle.\n - angle: angle criteria. Streamlines containing loops and sharp turns above\n given angle will be rejected from the bundle.\n - length: maximum and minimum lengths per bundle.\n - length_x / length_x_abs: maximum and minimum total distance in the x\n direction (i.e. first coordinate).**\n - length_y / length_y_abs: maximum and minimum total distance in the y\n direction (i.e. second coordinate).**\n - length_z / length_z_abs: maximum and minimum total distance in the z\n direction (i.e. third coordinate).**\n\n* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will\nbe created. If it is a nifti file, it will be considered to be a mask.\n** With absolute values: coming back on yourself will contribute to the total\ndistance instead of cancelling it.\n\nExemple config file:\n{\n \"Ground_truth_bundle_0\": {\n \"gt_mask\": \"PATH/bundle0.nii.gz\",\n \"angle\": 300,\n \"length\": [140, 150],\n \"endpoints\": \"PATH/file1.nii.gz\"\n }\n}\n\nTractometry\n-----------\nGlobal connectivity metrics:\n\n- Computed by default:\n - VS: valid streamlines, belonging to a bundle (i.e. respecting all the\n criteria for that bundle; endpoints, limit_mask, gt_mask.).\n - IS: invalid streamlines. All other streamlines. IS = IC + NC.\n\n- Optional:\n - WPC: wrong path connections, streamlines connecting correct ROIs but not\n respecting the other criteria for that bundle. Such streamlines always\n exist but they are only saved separately if specified in the options.\n Else, they are merged back with the IS.\n By definition. WPC are only computed if \"limits masks\" are provided.\n - IC: invalid connections, streamlines joining an incorrect combination of\n ROIs. Use carefully, quality depends on the quality of your ROIs and no\n analysis is done on the shape of the streamlines.\n - NC: no connections. Invalid streamlines minus invalid connections.\n\n- Fidelity metrics:\n - OL: Overlap. Percentage of ground truth voxels containing streamline(s)\n for a given bundle.\n - OR: Overreach. Amount of voxels containing streamline(s) when they\n shouldn't, for a given bundle. We compute two versions :\n OR_pct_vs = divided by the total number of voxel covered by the bundle.\n (percentage of the voxels touched by VS).\n Values range between 0 and 100%. Values are not defined when we\n recovered no streamline for a bundle, but we set the OR_pct_vs to 0\n in that case.\n OR_pct_gt = divided by the total size of the ground truth bundle mask.\n Values could be higher than 100%.\n - f1 score: which is the same as the Dice score.\n\npositional arguments:\n in_tractogram Input tractogram to score\n gt_config .json dict configured as specified above.\n out_dir Output directory for the resulting segmented bundles.\n\noptions:\n -h, --help show this help message and exit\n --json_prefix p Prefix of the two output json files. Ex: 'study_x_'.Files will be saved inside out_dir.\n Suffixes will be 'processing_stats.json' and 'results.json'.\n --no_empty Do not write file if there is no streamline.\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nAdditions to gt_config:\n --gt_dir DIR Root path of the ground truth files listed in the gt_config. \n If not set, filenames in the config file are considered \n as absolute paths.\n --use_gt_masks_as_all_masks\n If set, the gt_config's 'gt_mask' will also be used as\n 'all_mask' for each bundle. Note that this means the\n OR will necessarily be 0.\n\nPreprocessing:\n --dilate_endpoints NB_PASS\n Dilate endpoint masks n-times. Default: 0.\n --remove_invalid Remove invalid streamlines before scoring.\n\nTractometry choices:\n --save_wpc_separately\n If set, streamlines rejected from VC based on the config\n file criteria will be saved separately from IS (and IC)\n in one file *_wpc.tck per bundle.\n --compute_ic If set, IS are split into NC + IC, where IC are computed as one bundle per\n pair of ROI not belonging to a true connection, named\n *_*_IC.tck.\n --unique If set, streamlines are assigned to the first bundle they fit in and not to all.\n --remove_wpc_belonging_to_another_bundle\n If set, WPC actually belonging to any VB (in the \n case of overlapping ROIs) will be removed\n from the WPC classification.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "connect", - "connecting", - "connects", - "connecting" - ], - [ - "direction", - "direction" - ], - [ - "connectivity", - "connectivity" - ], - [ - "unique", - "unique" - ], - [ - "based", - "based" - ], - [ - "shape", - "shape" - ], - [ - "considered", - "considered" - ], - [ - "higher", - "higher" - ], - [ - "streamlines", - "streamlines" - ], - [ - "assigned", - "assigned" - ], - [ - "bundles", - "bundle" - ], - [ - "bundles", - "bundles" - ], - [ - "level", - "level" - ], - [ - "analysis", - "analysis" - ], - [ - "invalid", - "invalid" - ], - [ - "connections", - "connections" - ], - [ - "true", - "true" - ], - [ - "result", - "result" - ], - [ - "valid", - "valid" - ], - [ - "connection", - "connection" - ], - [ - "total", - "total" - ], - [ - "size", - "size" - ], - [ - "streamline", - "streamline" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "global", - "global" - ], - [ - "voxel", - "voxels" - ], - [ - "exist", - "exist" - ], - [ - "voxel", - "voxel" - ], - [ - "defined", - "defined" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_segment_bundles", - "docstring": "Compute BundleSeg & supports multi-atlas & multi-parameters (RBx-like).\nThe model needs to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are not sure about the transformation 'direction' you can try\nscil_tractogram_segment_bundles.py (with the -v option), a warning will popup\nif the provided transformation is not used correctly.\n\nThe number of folders inside 'models_directories' will increase the number of\nruns. Each folder is considered like an atlas and bundles inside will initiate\nmore BundleSeg executions. The more atlases you have, the more robust the\nrecognition will be.\n\n--minimal_vote_ratio is a value between 0 and 1. If you have 5 input model\ndirectories and a minimal_vote_ratio of 0.5, you will need at least 3 votes\n\nExample data and usage available at: https://zenodo.org/record/7950602\n\nFor RAM usage, it is recommanded to use this heuristic:\n (size of inputs tractogram (GB) * number of processes) < RAM (GB)\nThis is important because many instances of data structures are initialized\nin parallel and can lead to a RAM overflow.\n\nFormerly: scil_recognize_multi_bundles.py", - "help": "usage: scil_tractogram_segment_bundles.py [-h] [--out_dir OUT_DIR]\n [--minimal_vote_ratio MINIMAL_VOTE_RATIO]\n [--seed SEED] [--inverse]\n [--reference REFERENCE]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractograms [in_tractograms ...]\n in_config_file in_directory\n in_transfo\n\nCompute BundleSeg & supports multi-atlas & multi-parameters (RBx-like).\nThe model needs to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are not sure about the transformation 'direction' you can try\nscil_tractogram_segment_bundles.py (with the -v option), a warning will popup\nif the provided transformation is not used correctly.\n\nThe number of folders inside 'models_directories' will increase the number of\nruns. Each folder is considered like an atlas and bundles inside will initiate\nmore BundleSeg executions. The more atlases you have, the more robust the\nrecognition will be.\n\n--minimal_vote_ratio is a value between 0 and 1. If you have 5 input model\ndirectories and a minimal_vote_ratio of 0.5, you will need at least 3 votes\n\nExample data and usage available at: https://zenodo.org/record/7950602\n\nFor RAM usage, it is recommanded to use this heuristic:\n (size of inputs tractogram (GB) * number of processes) < RAM (GB)\nThis is important because many instances of data structures are initialized\nin parallel and can lead to a RAM overflow.\n\nFormerly: scil_recognize_multi_bundles.py\n\npositional arguments:\n in_tractograms Input tractogram filename (.trk or .tck).\n in_config_file Path of the config file (.json)\n in_directory Path of parent folder of models directories.\n Each folder inside will be considered as adifferent atlas.\n in_transfo Path for the transformation to model space (.txt, .npy or .mat).\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Path for the output directory [voting_results].\n --minimal_vote_ratio MINIMAL_VOTE_RATIO\n Streamlines will only be considered for saving if\n recognized often enough [0.5].\n --seed SEED Random number generator seed 0.\n --inverse Use the inverse transformation.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] St-Onge, Etienne, Kurt G. Schilling, and Francois Rheault.\n\"BundleSeg: A versatile,reliable and reproducible approach to white\nmatter bundle segmentation.\" International Workshop on Computational\nDiffusion MRI. Cham: Springer Nature Switzerland (2023)\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "structure", - "structures", - "structures" - ], - [ - "increase", - "increase" - ], - [ - "reliable", - "reliable" - ], - [ - "direction", - "direction" - ], - [ - "white", - "white" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "diffusion", - "diffusion" - ], - [ - "space", - "space" - ], - [ - "random", - "random" - ], - [ - "Data", - "data", - "data" - ], - [ - "processes", - "processes" - ], - [ - "atlas", - "atlas" - ], - [ - "bundles", - "bundles" - ], - [ - "matter", - "matter" - ], - [ - "considered", - "considered" - ], - [ - "level", - "level" - ], - [ - "parameters", - "parameters" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_segment_bundles_for_connectivity", - "docstring": "Compute a connectivity matrix from a tractogram and a parcellation.\n\nCurrent strategy is to keep the longest streamline segment connecting 2\nregions. If the streamline crosses other gray matter regions before reaching\nits final connected region, the kept connection is still the longest. This is\nrobust to compressed streamlines.\n\nThe output file is a hdf5 (.h5) where the keys are 'LABEL1_LABEL2' and each\ngroup is composed of 'data', 'offsets' and 'lengths' from the array_sequence.\nThe 'data' is stored in VOX/CORNER for simplicity and efficiency. See script\nscil_tractogram_convert_hdf5_to_trk.py to convert to a list of .trk bundles.\n\nFor the --outlier_threshold option the default is a recommended good trade-off\nfor a freesurfer parcellation. With smaller parcels (brainnetome, glasser) the\nthreshold should most likely be reduced.\n\nGood candidate connections to QC are the brainstem to precentral gyrus\nconnection and precentral left to precentral right connection, or equivalent\nin your parcellation.\n\nNOTE: this script can take a while to run. Please be patient.\nExample: on a tractogram with 1.8M streamlines, running on a SSD:\n- 15 minutes without post-processing, only saving final bundles.\n- 30 minutes with full post-processing, only saving final bundles.\n- 60 minutes with full post-processing, saving all possible files.\n\nFormerly: scil_decompose_connectivity.py", - "help": "usage: scil_tractogram_segment_bundles_for_connectivity.py [-h] [--no_pruning]\n [--no_remove_loops]\n [--no_remove_outliers]\n [--no_remove_curv_dev]\n [--min_length MIN_LENGTH]\n [--max_length MAX_LENGTH]\n [--outlier_threshold OUTLIER_THRESHOLD]\n [--loop_max_angle LOOP_MAX_ANGLE]\n [--curv_qb_distance CURV_QB_DISTANCE]\n [--out_dir OUT_DIR]\n [--save_raw_connections]\n [--save_intermediate]\n [--save_discarded]\n [--out_labels_list OUT_FILE]\n [--reference REFERENCE]\n [--no_bbox_check]\n [--processes NBR]\n [-v [{DEBUG,INFO,WARNING}]]\n [-f]\n in_tractograms\n [in_tractograms ...]\n in_labels out_hdf5\n\nCompute a connectivity matrix from a tractogram and a parcellation.\n\nCurrent strategy is to keep the longest streamline segment connecting 2\nregions. If the streamline crosses other gray matter regions before reaching\nits final connected region, the kept connection is still the longest. This is\nrobust to compressed streamlines.\n\nThe output file is a hdf5 (.h5) where the keys are 'LABEL1_LABEL2' and each\ngroup is composed of 'data', 'offsets' and 'lengths' from the array_sequence.\nThe 'data' is stored in VOX/CORNER for simplicity and efficiency. See script\nscil_tractogram_convert_hdf5_to_trk.py to convert to a list of .trk bundles.\n\nFor the --outlier_threshold option the default is a recommended good trade-off\nfor a freesurfer parcellation. With smaller parcels (brainnetome, glasser) the\nthreshold should most likely be reduced.\n\nGood candidate connections to QC are the brainstem to precentral gyrus\nconnection and precentral left to precentral right connection, or equivalent\nin your parcellation.\n\nNOTE: this script can take a while to run. Please be patient.\nExample: on a tractogram with 1.8M streamlines, running on a SSD:\n- 15 minutes without post-processing, only saving final bundles.\n- 30 minutes with full post-processing, only saving final bundles.\n- 60 minutes with full post-processing, saving all possible files.\n\nFormerly: scil_decompose_connectivity.py\n\npositional arguments:\n in_tractograms Tractogram filenames. Format must be one of \n trk, tck, vtk, fib, dpy.\n in_labels Labels file name (nifti). Labels must have 0 as background.\n out_hdf5 Output hdf5 file (.h5).\n\noptions:\n -h, --help show this help message and exit\n --out_labels_list OUT_FILE\n Save the labels list as text file.\n Needed for scil_connectivity_compute_matrices.py and others.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of \n tractograms (ignores the presence of invalid streamlines).\n --processes NBR Number of sub-processes to start. \n Default: [1]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nPost-processing options:\n --no_pruning If set, will NOT prune on length.\n Length criteria in --min_length, --max_length.\n --no_remove_loops If set, will NOT remove streamlines making loops.\n Angle criteria based on --loop_max_angle.\n --no_remove_outliers If set, will NOT remove outliers using QB.\n Criteria based on --outlier_threshold.\n --no_remove_curv_dev If set, will NOT remove streamlines that deviate from the mean curvature.\n Threshold based on --curv_qb_distance.\n\nPruning options:\n --min_length MIN_LENGTH\n Pruning minimal segment length. [20.0]\n --max_length MAX_LENGTH\n Pruning maximal segment length. [200.0]\n\nOutliers and loops options:\n --outlier_threshold OUTLIER_THRESHOLD\n Outlier removal threshold when using hierarchical QB. [0.6]\n --loop_max_angle LOOP_MAX_ANGLE\n Maximal winding angle over which a streamline is considered as looping. [330.0]\n --curv_qb_distance CURV_QB_DISTANCE\n Clustering threshold for centroids curvature filtering with QB. [10.0]\n\nSaving options:\n --out_dir OUT_DIR Output directory for each connection as separate file (.trk).\n --save_raw_connections\n If set, will save all raw cut connections in a subdirectory.\n --save_intermediate If set, will save the intermediate results of filtering.\n --save_discarded If set, will save discarded streamlines in subdirectories.\n Includes loops, outliers and qb_loops.\n", - "synonyms": [ - [ - "region", - "regions", - "region" - ], - [ - "dorsolateral", - "gyrus", - "occipital", - "parietal", - "gyrus" - ], - [ - "streamlines", - "streamlines" - ], - [ - "comprised", - "composed" - ], - [ - "connect", - "connecting", - "connects", - "connecting" - ], - [ - "streamline", - "streamline" - ], - [ - "connectivity", - "connectivity" - ], - [ - "invalid", - "invalid" - ], - [ - "post", - "post" - ], - [ - "region", - "regions", - "regions" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "larger", - "smaller" - ], - [ - "parcels", - "parcels" - ], - [ - "Data", - "data", - "data" - ], - [ - "connections", - "connections" - ], - [ - "processes", - "processes" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "left", - "left" - ], - [ - "based", - "based" - ], - [ - "bundles", - "bundles" - ], - [ - "matter", - "matter" - ], - [ - "connection", - "connection" - ], - [ - "considered", - "considered" - ], - [ - "processing", - "processing" - ], - [ - "level", - "level" - ], - [ - "connected", - "connected" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_segment_one_bundle", - "docstring": "Compute a simple Recobundles (single-atlas & single-parameters).\nThe model need to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are unsure about the transformation 'direction' use the verbose\noption (-v) and try with and without the --inverse flag. If you are not using\nthe right transformation 'direction' a warning will popup. If there is no\nwarning in both case it means the transformation is very close to identity and\nboth 'direction' will work.\n\nFormerly: scil_recognize_single_bundles.py", - "help": "usage: scil_tractogram_segment_one_bundle.py [-h]\n [--tractogram_clustering_thr TRACTOGRAM_CLUSTERING_THR]\n [--model_clustering_thr MODEL_CLUSTERING_THR]\n [--pruning_thr PRUNING_THR]\n [--slr_threads SLR_THREADS]\n [--seed SEED] [--inverse]\n [--no_empty]\n [--in_pickle IN_PICKLE | --out_pickle OUT_PICKLE]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram in_model in_transfo\n out_tractogram\n\nCompute a simple Recobundles (single-atlas & single-parameters).\nThe model need to be cleaned and lightweight.\nTransform should come from ANTs: (using the --inverse flag)\nAntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF\n\nIf you are unsure about the transformation 'direction' use the verbose\noption (-v) and try with and without the --inverse flag. If you are not using\nthe right transformation 'direction' a warning will popup. If there is no\nwarning in both case it means the transformation is very close to identity and\nboth 'direction' will work.\n\nFormerly: scil_recognize_single_bundles.py\n\npositional arguments:\n in_tractogram Input tractogram filename.\n in_model Model to use for recognition.\n in_transfo Path for the transformation to model space (.txt, .npy or .mat).\n out_tractogram Output tractogram filename.\n\noptions:\n -h, --help show this help message and exit\n --tractogram_clustering_thr TRACTOGRAM_CLUSTERING_THR\n Clustering threshold used for the whole brain [8mm].\n --model_clustering_thr MODEL_CLUSTERING_THR\n Clustering threshold used for the model [4mm].\n --pruning_thr PRUNING_THR\n MDF threshold used for final streamlines selection [6mm].\n --slr_threads SLR_THREADS\n Number of threads for SLR [1].\n --seed SEED Random number generator seed [None].\n --inverse Use the inverse transformation.\n --no_empty Do not write file if there is no streamline.\n --in_pickle IN_PICKLE\n Input pickle clusters map file.\n Will override the tractogram_clustering_thr parameter.\n --out_pickle OUT_PICKLE\n Output pickle clusters map file.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nGaryfallidis, E., Cote, M. A., Rheault, F., ... &\nDescoteaux, M. (2018). Recognition of white matter\nbundles using local and global streamline-based registration and\nclustering. NeuroImage, 170, 283-295.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "maps", - "map" - ], - [ - "direction", - "direction" - ], - [ - "white", - "white" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "global", - "global" - ], - [ - "work", - "work" - ], - [ - "space", - "space" - ], - [ - "parameter", - "parameter" - ], - [ - "random", - "random" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "atlas", - "atlas" - ], - [ - "based", - "based" - ], - [ - "bundles", - "bundles" - ], - [ - "matter", - "matter" - ], - [ - "level", - "level" - ], - [ - "parameters", - "parameters" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_shuffle", - "docstring": "Shuffle the ordering of streamlines.\n\nFormerly: scil_shuffle_streamlines.py", - "help": "usage: scil_tractogram_shuffle.py [-h] [--seed SEED] [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nShuffle the ordering of streamlines.\n\nFormerly: scil_shuffle_streamlines.py\n\npositional arguments:\n in_tractogram Input tractography file.\n out_tractogram Output tractography file.\n\noptions:\n -h, --help show this help message and exit\n --seed SEED Random number generator seed [None].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "random", - "random" - ], - [ - "streamlines", - "streamlines" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "level", - "level" - ], - [ - "tractography", - "tractography" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_smooth", - "docstring": "This script will smooth the streamlines, usually to remove the 'wiggles' in\nprobabilistic tracking.\n\nTwo choices of methods are available:\n- Gaussian will use the surrounding coordinates for smoothing. Streamlines are\nresampled to 1mm step-size and the smoothing is performed on the coordinate\narray. The sigma will be indicative of the number of points surrounding the\ncenter points to be used for blurring.\n- Spline will fit a spline curve to every streamline using a sigma and the\nnumber of control points. The sigma represents the allowed distance from the\ncontrol points. The final streamlines are obtained by evaluating the spline at\nconstant intervals so that it will have the same number of points as initially.\n\nThis script enforces endpoints to remain the same.\n\nWARNING:\n- too low of a sigma (e.g: 1) with a lot of control points (e.g: 15)\nwill create crazy streamlines that could end up out of the bounding box.\n- data_per_point will be lost.\n\nFormerly: scil_smooth_streamlines.py", - "help": "usage: scil_tractogram_smooth.py [-h]\n (--gaussian SIGMA | --spline SIGMA NB_CTRL_POINT)\n [--compress [COMPRESS_TH]]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_tractogram\n\nThis script will smooth the streamlines, usually to remove the 'wiggles' in\nprobabilistic tracking.\n\nTwo choices of methods are available:\n- Gaussian will use the surrounding coordinates for smoothing. Streamlines are\nresampled to 1mm step-size and the smoothing is performed on the coordinate\narray. The sigma will be indicative of the number of points surrounding the\ncenter points to be used for blurring.\n- Spline will fit a spline curve to every streamline using a sigma and the\nnumber of control points. The sigma represents the allowed distance from the\ncontrol points. The final streamlines are obtained by evaluating the spline at\nconstant intervals so that it will have the same number of points as initially.\n\nThis script enforces endpoints to remain the same.\n\nWARNING:\n- too low of a sigma (e.g: 1) with a lot of control points (e.g: 15)\nwill create crazy streamlines that could end up out of the bounding box.\n- data_per_point will be lost.\n\nFormerly: scil_smooth_streamlines.py\n\npositional arguments:\n in_tractogram Input tractography file.\n out_tractogram Output tractography file.\n\noptions:\n -h, --help show this help message and exit\n --gaussian SIGMA Sigma for smoothing. Use the value of surronding\n X,Y,Z points on the streamline to blur the streamlines.\n A good sigma choice would be around 5.\n --spline SIGMA NB_CTRL_POINT\n Sigma for smoothing. Model each streamline as a spline.\n A good sigma choice would be around 5 and control point around 10.\n --compress [COMPRESS_TH]\n If set, compress the resulting streamline. Value is the maximum \n compression distance in mm.[0.1]\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "probabilistic", - "probabilistic" - ], - [ - "streamline", - "streamline" - ], - [ - "create", - "create" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "high", - "low" - ], - [ - "step", - "step" - ], - [ - "tractography", - "tractography" - ], - [ - "methods", - "methods" - ], - [ - "tracking", - "tracking" - ], - [ - "level", - "level" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_tractogram_split", - "docstring": "Split a tractogram into multiple files, 2 options available :\nSplit into X files, or split into files of Y streamlines.\n\nBy default, streamlines to add to each chunk will be chosen randomly.\nOptionally, you can split streamlines...\n - sequentially (the first n/nb_chunks streamlines in the first chunk and so\n on).\n - randomly, but per Quickbundles clusters.\n\nFormerly: scil_split_tractogram.py", - "help": "usage: scil_tractogram_split.py [-h] [--out_dir OUT_DIR]\n (--chunk_size CHUNK_SIZE | --nb_chunks NB_CHUNKS)\n [--split_per_cluster | --do_not_randomize]\n [--qbx_thresholds t [t ...]] [--seed SEED]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_tractogram out_prefix\n\nSplit a tractogram into multiple files, 2 options available :\nSplit into X files, or split into files of Y streamlines.\n\nBy default, streamlines to add to each chunk will be chosen randomly.\nOptionally, you can split streamlines...\n - sequentially (the first n/nb_chunks streamlines in the first chunk and so\n on).\n - randomly, but per Quickbundles clusters.\n\nFormerly: scil_split_tractogram.py\n\npositional arguments:\n in_tractogram Tractogram input file name.\n out_prefix Prefix for the output tractogram, index will be appended \n automatically (ex, _0.trk), based on input type.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Put all output tractogram in a specific directory.\n --chunk_size CHUNK_SIZE\n The maximum number of streamlines per file.\n --nb_chunks NB_CHUNKS\n Divide the file in equal parts.\n --split_per_cluster If set, splitting will be done per cluster (computed with \n Quickbundles) to ensure that at least some streamlines are \n kept from each bundle in each chunk. Else, random splitting is\n performed (default).\n --do_not_randomize If set, splitting is done sequentially through the original \n sft instead of using random indices.\n --qbx_thresholds t [t ...]\n If you chose option '--split_per_cluster', you may set the \n QBx threshold value(s) here. Default: [40, 30, 20]\n --seed SEED Use a specific random seed for the subsampling.\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "random", - "random" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "based", - "based" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ], - [ - "random", - "randomly" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_bingham_fit", - "docstring": "Visualize 2-dimensional Bingham volume slice loaded from disk. The volume is\nassumed to be saved from scil_fodf_to_bingham.py.\n\nGiven an image of Bingham coefficients, this script displays a slice in a\ngiven orientation.", - "help": "usage: scil_viz_bingham_fit.py [-h] [--slice_index SLICE_INDEX]\n [--win_dims WIDTH HEIGHT]\n [--interactor {image,trackball}]\n [--axis_name {sagittal,coronal,axial}]\n [--silent] [--output OUTPUT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}]\n [--color_per_lobe]\n in_bingham\n\nVisualize 2-dimensional Bingham volume slice loaded from disk. The volume is\nassumed to be saved from scil_fodf_to_bingham.py.\n\nGiven an image of Bingham coefficients, this script displays a slice in a\ngiven orientation.\n\npositional arguments:\n in_bingham Input SH image file.\n\noptions:\n -h, --help show this help message and exit\n --slice_index SLICE_INDEX\n Index of the slice to visualize along a given axis. Defaults to middle of volume.\n --win_dims WIDTH HEIGHT\n The dimensions for the vtk window. [(768, 768)]\n --interactor {image,trackball}\n Specify interactor mode for vtk window. [trackball]\n --axis_name {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n --silent Disable interactive visualization.\n --output OUTPUT Path to output file.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}\n Name of the sphere used to reconstruct SF. [symmetric362]\n --color_per_lobe Color each bingham distribution with a different color. [False]\n", - "synonyms": [ - [ - "axial", - "axial" - ], - [ - "orientation", - "orientation" - ], - [ - "coronal", - "sagittal", - "coronal" - ], - [ - "image", - "image" - ], - [ - "middle", - "middle" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "coronal", - "sagittal", - "sagittal" - ], - [ - "level", - "level" - ], - [ - "false", - "false" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_bundle", - "docstring": "Visualize bundles.\n\nExample usages:\n\n# Visualize streamlines as tubes, each bundle with a different color\n>>> scil_viz_bundle.py path_to_bundles/ --shape tube --random_coloring 1337\n\n# Visualize a tractogram with each streamlines drawn as lines, colored with\n# their local orientation, but only load 1 in 10 streamlines\n>>> scil_viz_bundle.py tractogram.trk --shape line --subsample 10\n\n# Visualize CSTs as large tubes and color them from a list of colors in a file\n>>> scil_viz_bundle.py path_to_bundles/CST_* --width 0.5\n --color_dict colors.json", - "help": "usage: scil_viz_bundle.py [-h]\n [--random_coloring SEED | --uniform_coloring R G B | --local_coloring | --color_dict JSON | --color_from_streamlines KEY | --color_from_points KEY]\n [--shape {line,tube}] [--width WIDTH]\n [--subsample SUBSAMPLE] [--downsample DOWNSAMPLE]\n [--background R G B] [-v [{DEBUG,INFO,WARNING}]]\n in_bundles [in_bundles ...]\n\nVisualize bundles.\n\nExample usages:\n\n# Visualize streamlines as tubes, each bundle with a different color\n>>> scil_viz_bundle.py path_to_bundles/ --shape tube --random_coloring 1337\n\n# Visualize a tractogram with each streamlines drawn as lines, colored with\n# their local orientation, but only load 1 in 10 streamlines\n>>> scil_viz_bundle.py tractogram.trk --shape line --subsample 10\n\n# Visualize CSTs as large tubes and color them from a list of colors in a file\n>>> scil_viz_bundle.py path_to_bundles/CST_* --width 0.5\n --color_dict colors.json\n\npositional arguments:\n in_bundles List of tractography files supported by nibabel.\n\noptions:\n -h, --help show this help message and exit\n --shape {line,tube} Display streamlines either as lines or tubes.\n [Default: tube]\n --width WIDTH Width of tubes or lines representing streamlines\n [Default: 0.25]\n --subsample SUBSAMPLE\n Only load 1 in N streamlines.\n [Default: 1]\n --downsample DOWNSAMPLE\n Downsample streamlines to N points.\n [Default: None]\n --background R G B RBG values [0, 255] of the color of the background.\n [Default: [0, 0, 0]]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nColouring options:\n --random_coloring SEED\n Assign a random color to bundles.\n --uniform_coloring R G B\n Assign a uniform color to streamlines.\n --local_coloring Assign coloring to streamlines depending on their local orientations.\n --color_dict JSON JSON file containing colors for each bundle.\n Bundle filenames are indicated as keys and colors as values.\n A 'default' key and value can be included.\n --color_from_streamlines KEY\n Extract a color per streamline from the data_per_streamline property of the tractogram at the specified key.\n --color_from_points KEY\n Extract a color per point from the data_per_point property of the tractogram at the specified key.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "streamline", - "streamline" - ], - [ - "key", - "key" - ], - [ - "indicating", - "indicated" - ], - [ - "orientation", - "orientation" - ], - [ - "supported", - "supported" - ], - [ - "bundles", - "bundle" - ], - [ - "random", - "random" - ], - [ - "orientation", - "orientations" - ], - [ - "large", - "large" - ], - [ - "tractography", - "tractography" - ], - [ - "bundles", - "bundles" - ], - [ - "shape", - "shape" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_bundle_screenshot_mni", - "docstring": "Register bundle to a template for screenshots using a reference.\nThe template can be any MNI152 (any resolution, cropped or not)\nIf your in_anat has a skull, select a MNI152 template with a skull and\nvice-versa.\n\nIf the bundle is already in MNI152 space, do not use --target_template.\n\nAxial, coronal and sagittal slices are captured.\nSagittal can be capture from the left (default) or the right.\n\nFor the --roi argument: If 1 value is provided, the ROI will be white,\nif 4 values are provided, the ROI will be colored with the RGB values\nprovided, if 5 values are provided, it is RGBA (values from 0-255).", - "help": "usage: scil_viz_bundle_screenshot_mni.py [-h]\n [--target_template TARGET_TEMPLATE]\n [--local_coloring | --uniform_coloring R G B | --reference_coloring COLORBAR]\n [--roi ROI [ROI ...]] [--right]\n [--anat_opacity ANAT_OPACITY]\n [--output_suffix OUTPUT_SUFFIX]\n [--out_dir OUT_DIR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_bundle in_anat\n\nRegister bundle to a template for screenshots using a reference.\nThe template can be any MNI152 (any resolution, cropped or not)\nIf your in_anat has a skull, select a MNI152 template with a skull and\nvice-versa.\n\nIf the bundle is already in MNI152 space, do not use --target_template.\n\nAxial, coronal and sagittal slices are captured.\nSagittal can be capture from the left (default) or the right.\n\nFor the --roi argument: If 1 value is provided, the ROI will be white,\nif 4 values are provided, the ROI will be colored with the RGB values\nprovided, if 5 values are provided, it is RGBA (values from 0-255).\n\npositional arguments:\n in_bundle Path of the input bundle.\n in_anat Path of the reference file (.nii or nii.gz).\n\noptions:\n -h, --help show this help message and exit\n --target_template TARGET_TEMPLATE\n Path to the target MNI152 template for registration. If in_anat has a skull, select a MNI152 template with a skull and vice-versa.\n --local_coloring Color streamlines using local segments orientation.\n --uniform_coloring R G B\n Color streamlines with uniform coloring.\n --reference_coloring COLORBAR\n Color streamlines with reference coloring (0-255).\n --roi ROI [ROI ...] Path to a ROI file (.nii or nii.gz).\n --right Take screenshot from the right instead of the left for the sagittal plane.\n --anat_opacity ANAT_OPACITY\n Set the opacity for the anatomy, use 0 for complete transparency, 1 for opaque. [0.3]\n --output_suffix OUTPUT_SUFFIX\n Add a suffix to the output, else the axis name is used.\n --out_dir OUT_DIR Put all images in a specific directory.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "axial", - "axial" - ], - [ - "streamlines", - "streamlines" - ], - [ - "planes", - "plane" - ], - [ - "white", - "white" - ], - [ - "orientation", - "orientation" - ], - [ - "coronal", - "sagittal", - "coronal" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "bundles", - "bundle" - ], - [ - "space", - "space" - ], - [ - "left", - "left" - ], - [ - "coronal", - "sagittal", - "sagittal" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_bundle_screenshot_mosaic", - "docstring": "Visualize bundles from a list. The script will output a mosaic (image) with\nscreenshots, 6 views per bundle in the list.", - "help": "usage: scil_viz_bundle_screenshot_mosaic.py [-h] [--uniform_coloring R G B]\n [--random_coloring SEED]\n [--zoom ZOOM] [--ttf TTF]\n [--ttf_size TTF_SIZE]\n [--opacity_background OPACITY_BACKGROUND]\n [--resolution_of_thumbnails RESOLUTION_OF_THUMBNAILS]\n [--light_screenshot]\n [--no_information]\n [--no_bundle_name]\n [--no_streamline_number]\n [--reference REFERENCE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_volume in_bundles\n [in_bundles ...] out_image\n\nVisualize bundles from a list. The script will output a mosaic (image) with\nscreenshots, 6 views per bundle in the list.\n\npositional arguments:\n in_volume Volume used as background (e.g. T1, FA, b0).\n in_bundles List of tractography files supported by nibabel or binary mask files.\n out_image Name of the output image mosaic (e.g. mosaic.jpg, mosaic.png).\n\noptions:\n -h, --help show this help message and exit\n --uniform_coloring R G B\n Assign an uniform color to streamlines (or ROIs).\n --random_coloring SEED\n Assign a random color to streamlines (or ROIs).\n --zoom ZOOM Rendering zoom. A value greater than 1 is a zoom-in,\n a value less than 1 is a zoom-out [1.0].\n --ttf TTF Path of the true type font to use for legends.\n --ttf_size TTF_SIZE Font size (int) to use for the legends [35].\n --opacity_background OPACITY_BACKGROUND\n Opacity of background image, between 0 and 1.0 [0.4].\n --resolution_of_thumbnails RESOLUTION_OF_THUMBNAILS\n Resolution of thumbnails used in mosaic [300].\n --light_screenshot Keep only 3 views instead of 6 [False].\n --no_information Don't display axis and bundle information [False].\n --no_bundle_name Don't display bundle name [False].\n --no_streamline_number\n Don't display bundle streamlines number [False].\n --reference REFERENCE\n Reference anatomy for tck/vtk/fib/dpy file\n support (.nii or .nii.gz).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "greater", - "greater" - ], - [ - "streamlines", - "streamlines" - ], - [ - "views", - "views" - ], - [ - "anatomical", - "anatomy", - "anatomy" - ], - [ - "supported", - "supported" - ], - [ - "bundles", - "bundle" - ], - [ - "image", - "image" - ], - [ - "rendered", - "rendering", - "rendering" - ], - [ - "random", - "random" - ], - [ - "true", - "true" - ], - [ - "tractography", - "tractography" - ], - [ - "bundles", - "bundles" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "level", - "level" - ], - [ - "false", - "false" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_connectivity", - "docstring": "Script to display a connectivity matrix and adjust the desired visualization.\nMade to work with scil_tractogram_segment_bundles_for_connectivity.py and\nscil_connectivity_reorder_rois.py.\n\nThis script can either display the axis labels as:\n- Coordinates (0..N)\n- Labels (using --labels_list)\n- Names (using --labels_list and --lookup_table)\nExamples of labels_list.txt and lookup_table.json can be found in the\nfreesurfer_flow output (https://github.com/scilus/freesurfer_flow)\n\nIf the matrix was made from a bigger matrix using\nscil_connectivity_reorder_rois.py, provide the text file(s), using\n--labels_list and/or --reorder_txt.\n\nThe chord chart is always displaying parting in the order they are defined\n(clockwise), the color is attributed in that order following a colormap. The\nthickness of the line represent the 'size/intensity', the greater the value is\nthe thicker the line will be. In order to hide the low values, two options are\navailable:\n- Angle threshold + alpha, any connections with a small angle on the chord\n chart will be slightly transparent to increase the focus on bigger\n connections.\n- Percentile, hide any connections with a value below that percentile", - "help": "", - "synonyms": [ - [ - "order", - "order" - ], - [ - "greater", - "greater" - ], - [ - "represent", - "represent" - ], - [ - "increase", - "increase" - ], - [ - "connectivity", - "connectivity" - ], - [ - "high", - "low" - ], - [ - "work", - "work" - ], - [ - "connections", - "connections" - ], - [ - "large", - "small" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "defined", - "defined" - ], - [ - "size", - "size" - ], - [ - "larger", - "bigger" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_dti_screenshot", - "docstring": "Register DWI to a template for screenshots.\nThe templates are on http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009\n\nFor quick quality control, the MNI template can be downsampled to 2mm iso.\nAxial, coronal and sagittal slices are captured.", - "help": "usage: scil_viz_dti_screenshot.py [-h] [--shells SHELLS [SHELLS ...]]\n [--out_suffix OUT_SUFFIX]\n [--out_dir OUT_DIR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_dwi in_bval in_bvec in_template\n\nRegister DWI to a template for screenshots.\nThe templates are on http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009\n\nFor quick quality control, the MNI template can be downsampled to 2mm iso.\nAxial, coronal and sagittal slices are captured.\n\npositional arguments:\n in_dwi Path of the input diffusion volume.\n in_bval Path of the bval file, in FSL format.\n in_bvec Path of the bvec file, in FSL format.\n in_template Path to the target MNI152 template for \n registration, use the one provided online.\n\noptions:\n -h, --help show this help message and exit\n --shells SHELLS [SHELLS ...]\n Shells to use for DTI fit (usually below 1200), b0 must be listed.\n --out_suffix OUT_SUFFIX\n Add a suffix to the output, else the axis name is used.\n --out_dir OUT_DIR Put all images in a specific directory.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "axial", - "axial" - ], - [ - "coronal", - "sagittal", - "coronal" - ], - [ - "diffusion", - "diffusion" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "coronal", - "sagittal", - "sagittal" - ], - [ - "specific", - "specific" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_fodf", - "docstring": "Visualize 2-dimensional fODF slice loaded from disk.\n\nGiven an image of SH coefficients, this script displays a slice in a\ngiven orientation. The user can also add a background on top of which the\nfODF are to be displayed. Using a full SH basis, the script can be used to\nvisualize asymmetric fODF. The user can supply a peaks image to visualize\npeaks on top of fODF.\n\nIf a transparency_mask is given (e.g. a brain mask), all values outside the\nmask non-zero values are set to full transparency in the saved scene.\n\n!!! CAUTION !!! The script is memory intensive about (9kB of allocated RAM per\nvoxel, or 9GB for a 1M voxel volume) with a sphere interpolated to 362 points.", - "help": "usage: scil_viz_fodf.py [-h] [--slice_index SLICE_INDEX]\n [--win_dims WIDTH HEIGHT]\n [--interactor {image,trackball}]\n [--axis_name {sagittal,coronal,axial}] [--silent]\n [--in_transparency_mask IN_TRANSPARENCY_MASK]\n [--output OUTPUT] [-f]\n [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}]\n [--sphere {repulsion200,repulsion100,symmetric724,symmetric642,symmetric362,repulsion724}]\n [--sph_subdivide SPH_SUBDIVIDE] [--mask MASK]\n [--colormap COLORMAP | --color_rgb COLOR_RGB COLOR_RGB COLOR_RGB]\n [--scale SCALE] [--radial_scale_off] [--norm_off]\n [-v [{DEBUG,INFO,WARNING}]] [--background BACKGROUND]\n [--bg_range MIN MAX] [--bg_opacity BG_OPACITY]\n [--bg_offset BG_OFFSET]\n [--bg_interpolation {nearest,linear}]\n [--bg_color BG_COLOR BG_COLOR BG_COLOR]\n [--peaks PEAKS]\n [--peaks_color PEAKS_COLOR PEAKS_COLOR PEAKS_COLOR]\n [--peaks_width PEAKS_WIDTH]\n [--peaks_values PEAKS_VALUES | --peaks_length PEAKS_LENGTH]\n [--variance VARIANCE] [--variance_k VARIANCE_K]\n [--var_color VAR_COLOR VAR_COLOR VAR_COLOR]\n in_fodf\n\nVisualize 2-dimensional fODF slice loaded from disk.\n\nGiven an image of SH coefficients, this script displays a slice in a\ngiven orientation. The user can also add a background on top of which the\nfODF are to be displayed. Using a full SH basis, the script can be used to\nvisualize asymmetric fODF. The user can supply a peaks image to visualize\npeaks on top of fODF.\n\nIf a transparency_mask is given (e.g. a brain mask), all values outside the\nmask non-zero values are set to full transparency in the saved scene.\n\n!!! CAUTION !!! The script is memory intensive about (9kB of allocated RAM per\nvoxel, or 9GB for a 1M voxel volume) with a sphere interpolated to 362 points.\n\npositional arguments:\n in_fodf Input SH image file.\n\noptions:\n -h, --help show this help message and exit\n --slice_index SLICE_INDEX\n Index of the slice to visualize along a given axis. Defaults to middle of volume.\n --win_dims WIDTH HEIGHT\n The dimensions for the vtk window. [(768, 768)]\n --interactor {image,trackball}\n Specify interactor mode for vtk window. [trackball]\n --axis_name {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n --silent Disable interactive visualization.\n --in_transparency_mask IN_TRANSPARENCY_MASK\n Input mask image file.\n --output OUTPUT Path to output file.\n -f Force overwriting of the output files.\n --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}\n Spherical harmonics basis used for the SH coefficients. \n Must be either descoteaux07', 'tournier07', \n 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]:\n 'descoteaux07' : SH basis from the Descoteaux et al.\n MRM 2007 paper\n 'tournier07' : SH basis from the new Tournier et al.\n NeuroImage 2019 paper, as in MRtrix 3.\n 'descoteaux07_legacy': SH basis from the legacy Dipy implementation\n of the Descoteaux et al. MRM 2007 paper\n 'tournier07_legacy' : SH basis from the legacy Tournier et al.\n NeuroImage 2007 paper.\n --sphere {repulsion200,repulsion100,symmetric724,symmetric642,symmetric362,repulsion724}\n Name of the sphere used to reconstruct SF. [symmetric362]\n --sph_subdivide SPH_SUBDIVIDE\n Number of subdivisions for given sphere. If not supplied, use the given sphere as is.\n --mask MASK Optional mask file. Only fODF inside the mask are displayed.\n --colormap COLORMAP Colormap for the ODF slicer. If None, then a RGB colormap will be used. [None]\n --color_rgb COLOR_RGB COLOR_RGB COLOR_RGB\n Uniform color for the ODF slicer given as RGB, scaled between 0 and 1. [None]\n --scale SCALE Scaling factor for FODF. [0.5]\n --radial_scale_off Disable radial scale for ODF slicer.\n --norm_off Disable normalization of ODF slicer.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nBackground arguments:\n --background BACKGROUND\n Background image file. If RGB, values must be between 0 and 255.\n --bg_range MIN MAX The range of values mapped to range [0, 1] for background image. [(bg.min(), bg.max())]\n --bg_opacity BG_OPACITY\n The opacity of the background image. Opacity of 0.0 means transparent and 1.0 is completely visible. [1.0]\n --bg_offset BG_OFFSET\n The offset of the background image. [0.5]\n --bg_interpolation {nearest,linear}\n Interpolation mode for the background image. [nearest]\n --bg_color BG_COLOR BG_COLOR BG_COLOR\n The color of the overall background, behind everything. Must be RGB values scaled between 0 and 1. [(0, 0, 0)]\n\nPeaks arguments:\n --peaks PEAKS Peaks image file.\n --peaks_color PEAKS_COLOR PEAKS_COLOR PEAKS_COLOR\n Color used for peaks, as RGB values scaled between 0 and 1. If None, then a RGB colormap is used. [None]\n --peaks_width PEAKS_WIDTH\n Width of peaks segments. [1.0]\n\nPeaks scaling arguments:\n Choose between peaks values and arbitrary length.\n\n --peaks_values PEAKS_VALUES\n Peaks values file.\n --peaks_length PEAKS_LENGTH\n Length of the peaks segments. [0.65]\n\nVariance arguments:\n For the visualization of fodf uncertainty, the variance is used as follow: mean + k * sqrt(variance), where mean is the input fodf (in_fodf) and k is the scaling factor (variance_k).\n\n --variance VARIANCE FODF variance file.\n --variance_k VARIANCE_K\n Scaling factor (k) for the computation of the fodf uncertainty. [1]\n --var_color VAR_COLOR VAR_COLOR VAR_COLOR\n Color of variance outline. Must be RGB values scaled between 0 and 1. [(1, 1, 1)]\n", - "synonyms": [ - [ - "axial", - "axial" - ], - [ - "variance", - "variance" - ], - [ - "orientation", - "orientation" - ], - [ - "coronal", - "sagittal", - "coronal" - ], - [ - "image", - "image" - ], - [ - "middle", - "middle" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "coronal", - "sagittal", - "sagittal" - ], - [ - "memory", - "memory" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "brain", - "brain" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_gradients_screenshot", - "docstring": "Vizualisation for directions on a sphere, either from a gradient sampling (i.e.\na list of b-vectors) or from a Dipy sphere.", - "help": "usage: scil_viz_gradients_screenshot.py [-h]\n (--in_gradient_scheme IN_GRADIENT_SCHEME [IN_GRADIENT_SCHEME ...] | --dipy_sphere {symmetric362,symmetric642,symmetric724,repulsion724,repulsion100,repulsion200})\n [--dis-sym]\n [--out_basename OUT_BASENAME]\n [--res RES] [--dis-sphere]\n [--dis-proj] [--plot_shells]\n [--same-color] [--opacity OPACITY]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n\nVizualisation for directions on a sphere, either from a gradient sampling (i.e.\na list of b-vectors) or from a Dipy sphere.\n\noptions:\n -h, --help show this help message and exit\n --in_gradient_scheme IN_GRADIENT_SCHEME [IN_GRADIENT_SCHEME ...]\n Gradient sampling filename. (only accepts .bvec and\n .bval together or only .b).\n --dipy_sphere {symmetric362,symmetric642,symmetric724,repulsion724,repulsion100,repulsion200}\n Dipy sphere choice.\n --dis-sym Disable antipodal symmetry.\n --out_basename OUT_BASENAME\n Output file name picture without extension (will be\n png file(s)).\n --res RES Resolution of the output picture(s).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided\n level. Default level is warning, default when using -v\n is info.\n -f Force overwriting of the output files.\n\nEnable/Disable renderings.:\n --dis-sphere Disable the rendering of the sphere.\n --dis-proj Disable rendering of the projection supershell.\n --plot_shells Enable rendering each shell individually.\n\nRendering options.:\n --same-color Use same color for all shell.\n --opacity OPACITY Opacity for the shells.\n", - "synonyms": [ - [ - "rendered", - "rendering", - "rendering" - ], - [ - "projection", - "projection" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_tractogram_seeds", - "docstring": "Visualize seeds used to generate the tractogram or bundle.\nWhen tractography was run, each streamline produced by the tracking algorithm\nsaved its seeding point (its origin).\n\nThe tractogram must have been generated from scil_tracking_local.py or\nscil_tracking_pft.py with the --save_seeds option.", - "help": "usage: scil_viz_tractogram_seeds.py [-h] [--save SAVE]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n tractogram\n\nVisualize seeds used to generate the tractogram or bundle.\nWhen tractography was run, each streamline produced by the tracking algorithm\nsaved its seeding point (its origin).\n\nThe tractogram must have been generated from scil_tracking_local.py or\nscil_tracking_pft.py with the --save_seeds option.\n\npositional arguments:\n tractogram Tractogram file (must be trk)\n\noptions:\n -h, --help show this help message and exit\n --save SAVE If set, save a screenshot of the result in the specified filename\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "seeding", - "seeding" - ], - [ - "streamline", - "streamline" - ], - [ - "bundles", - "bundle" - ], - [ - "algorithm", - "algorithm" - ], - [ - "result", - "result" - ], - [ - "tractography", - "tractography" - ], - [ - "tracking", - "tracking" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_tractogram_seeds_3d", - "docstring": "Visualize seeds as 3D points, with heatmaps corresponding to seed density\n\nExample usages:\n\n>>> scil_viz_tractogram_seeds_3d.py seeds.nii.gz --tractogram tractogram.trk", - "help": "usage: scil_viz_tractogram_seeds_3d.py [-h] [--tractogram TRACTOGRAM]\n [--colormap COLORMAP]\n [--seed_opacity SEED_OPACITY]\n [--tractogram_shape {line,tube}]\n [--tractogram_opacity TRACTOGRAM_OPACITY]\n [--tractogram_width TRACTOGRAM_WIDTH]\n [--tractogram_color R G B [R G B ...]]\n [--background R G B [R G B ...]]\n [-v [{DEBUG,INFO,WARNING}]]\n in_seed_map\n\nVisualize seeds as 3D points, with heatmaps corresponding to seed density\n\nExample usages:\n\n>>> scil_viz_tractogram_seeds_3d.py seeds.nii.gz --tractogram tractogram.trk\n\npositional arguments:\n in_seed_map Seed density map.\n\noptions:\n -h, --help show this help message and exit\n --tractogram TRACTOGRAM\n Tractogram coresponding to the seeds.\n --colormap COLORMAP Name of the map for the density coloring. Can be any colormap that matplotlib offers.\n [Default: bone]\n --seed_opacity SEED_OPACITY\n Opacity of the contour generated.\n [Default: 0.5]\n --tractogram_shape {line,tube}\n Display streamlines either as lines or tubes.\n [Default: tube]\n --tractogram_opacity TRACTOGRAM_OPACITY\n Opacity of the streamlines.\n [Default: 0.5]\n --tractogram_width TRACTOGRAM_WIDTH\n Width of tubes or lines representing streamlines.\n [Default: 0.05]\n --tractogram_color R G B [R G B ...]\n Color for the tractogram.\n --background R G B [R G B ...]\n RBG values [0, 255] of the color of the background.\n [Default: [0, 0, 0]]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n", - "synonyms": [ - [ - "streamlines", - "streamlines" - ], - [ - "maps", - "map" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_volume_histogram", - "docstring": "Script to display a histogram of a metric (FA, MD, etc.) from a binary mask\n(wm mask, vascular mask, ect.).\nThese two images must be coregister with each other.\n\n>>> scil_viz_volume_histogram.py metric.nii.gz mask_bin.nii.gz 8\n out_filename_image.png", - "help": "usage: scil_viz_volume_histogram.py [-h] [--title TITLE] [--x_label X_LABEL]\n [--colors COLORS] [--show_only]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_metric in_mask n_bins out_png\n\nScript to display a histogram of a metric (FA, MD, etc.) from a binary mask\n(wm mask, vascular mask, ect.).\nThese two images must be coregister with each other.\n\n>>> scil_viz_volume_histogram.py metric.nii.gz mask_bin.nii.gz 8\n out_filename_image.png\n\npositional arguments:\n in_metric Metric map ex : FA, MD,... .\n in_mask Binary mask data to extract value.\n n_bins Number of bins to use for the histogram.\n out_png Output filename for the figure.\n\noptions:\n -h, --help show this help message and exit\n --show_only Do not save the figure, only display.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nHistogram options:\n --title TITLE Use the provided info for the histogram title. [Histogram]\n --x_label X_LABEL Use the provided info for the x axis name.\n --colors COLORS Use the provided info for the bars color. [#0504aa]\n", - "synonyms": [ - [ - "maps", - "map" - ], - [ - "Data", - "data", - "data" - ], - [ - "binary", - "binary" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_volume_scatterplot", - "docstring": "Script to display scatter plot between two maps (ex. FA and MD, ihMT and MT).\nBy default, no mask is applied to the data.\nDifferent options are available to mask or threshold data:\n - a binary mask\n - two probability maps, which can be used to threshold maps with\n --in_prob_maps. A same threshold is applied on these two maps (--thr).\n - parcellation, which can be used to plot values for each region of\n an atlas (--in_atlas) or a subset of regions (--specific_label).\n Atlas option required a json file (--atlas_lut) with indices and\n names of each label corresponding to the atlas as following:\n \"1\": \"lh_A8m\",\n \"2\": \"rh_A8m\",\n The numbers must be corresponding to the label indices in the json file.\n\nBe careful, you can not use all of them at the same time.\n\nFor general scatter plot without mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n\nFor scatter plot with mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_bin_mask mask_wm.nii.gz\n\nFor tissue probability scatter plot:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --prob_maps wm_map.nii.gz gm_map.nii.gz\n\nFor scatter plot using atlas:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n --specific_label 34 67 87", - "help": "usage: scil_viz_volume_scatterplot.py [-h] [--out_dir OUT_DIR] [--thr THR]\n [--not_exclude_zero]\n [--in_bin_mask IN_BIN_MASK | --in_prob_maps IN_PROB_MAPS IN_PROB_MAPS | --in_atlas IN_ATLAS]\n [--atlas_lut ATLAS_LUT]\n [--specific_label SPECIFIC_LABEL [SPECIFIC_LABEL ...]]\n [--in_folder] [--title TITLE]\n [--x_label X_LABEL] [--y_label Y_LABEL]\n [--label LABEL]\n [--label_prob LABEL_PROB]\n [--marker MARKER]\n [--marker_size MARKER_SIZE]\n [--transparency TRANSPARENCY]\n [--dpi DPI] [--colors color1 color2]\n [--show_only]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_x_map in_y_map out_name\n\nScript to display scatter plot between two maps (ex. FA and MD, ihMT and MT).\nBy default, no mask is applied to the data.\nDifferent options are available to mask or threshold data:\n - a binary mask\n - two probability maps, which can be used to threshold maps with\n --in_prob_maps. A same threshold is applied on these two maps (--thr).\n - parcellation, which can be used to plot values for each region of\n an atlas (--in_atlas) or a subset of regions (--specific_label).\n Atlas option required a json file (--atlas_lut) with indices and\n names of each label corresponding to the atlas as following:\n \"1\": \"lh_A8m\",\n \"2\": \"rh_A8m\",\n The numbers must be corresponding to the label indices in the json file.\n\nBe careful, you can not use all of them at the same time.\n\nFor general scatter plot without mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n\nFor scatter plot with mask:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_bin_mask mask_wm.nii.gz\n\nFor tissue probability scatter plot:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --prob_maps wm_map.nii.gz gm_map.nii.gz\n\nFor scatter plot using atlas:\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n\n>>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png\n --in_atlas atlas.nii.gz --atlas_lut atlas.json\n --specific_label 34 67 87\n\npositional arguments:\n in_x_map Map in x axis, FA for example.\n in_y_map Map in y axis, MD for example.\n out_name Output filename for the figure without extension.\n\noptions:\n -h, --help show this help message and exit\n --out_dir OUT_DIR Output directory to save scatter plot.\n --thr THR Use to apply threshold only on probability maps (same for both map) with --in_prob_maps option. [0.9]\n --not_exclude_zero Keep zero value in data.\n --in_bin_mask IN_BIN_MASK\n Binary mask. Use this option to extract x and y maps value from specific mask or region: wm_mask or roi_mask for example.\n --in_prob_maps IN_PROB_MAPS IN_PROB_MAPS\n Probability maps, WM and GW for example.\n --in_atlas IN_ATLAS Path to the input atlas image.\n --show_only Do not save the figure, only display. Not avalaible with --in_atlas option.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nAtlas options:\n --atlas_lut ATLAS_LUT\n Path of the LUT file corresponding to atlas used to name the regions of interest.\n --specific_label SPECIFIC_LABEL [SPECIFIC_LABEL ...]\n Label list to use to do scatter plot. Label must corresponding to atlas LUT file. [None]\n --in_folder Save label plots in subfolder \"Label_plots\".\n\nScatter plot options:\n --title TITLE Use the provided info for the title name. [Scatter Plot]\n --x_label X_LABEL Use the provided info for the x axis name. [x]\n --y_label Y_LABEL Use the provided info for the y axis name. [y]\n --label LABEL Use the provided info for the legend box corresponding to mask or first probability map. [None]\n --label_prob LABEL_PROB\n Use the provided info for the legend box corresponding to the second probability map. [Threshold prob_map 2]\n --marker MARKER Use the provided info for the marker shape. [.]\n --marker_size MARKER_SIZE\n Use the provided info for the marker size. [15]\n --transparency TRANSPARENCY\n Use the provided info for the point transparency. [0.4]\n --dpi DPI Use the provided info for the dpi resolution. [300]\n --colors color1 color2\n", - "synonyms": [ - [ - "region", - "regions", - "region" - ], - [ - "maps", - "map" - ], - [ - "region", - "regions", - "regions" - ], - [ - "image", - "image" - ], - [ - "maps", - "maps" - ], - [ - "applied", - "apply" - ], - [ - "probability", - "probability" - ], - [ - "Data", - "data", - "data" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "atlas", - "atlas" - ], - [ - "shape", - "shape" - ], - [ - "applied", - "applied" - ], - [ - "general", - "general" - ], - [ - "binary", - "binary" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_volume_screenshot", - "docstring": "Take screenshot(s) of one or more slices in a given image volume along the\nrequested axis. If slice indices are not provided, all slices in the volume\nare used. The name of the output images are suffixed with _slice_{id}, with\nid being the slice number in the volume. If a labelmap image is provided (e.g.\na tissue segmentation map), it is overlaid on the volume slices. Same goes if\na mask is provided, with the difference that it can be rendered as a\ntransparency overlay as well as a contour.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nExample:\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_annotated.png\n --display_slice_number --display_lr\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_masked.png\n --transparency brainmask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial.png\n --slices 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_sagittal.png --axis sagittal\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_plasma_cmap.png\n --slices 30 40 50 60 70 80 90 100 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_overlay.png\n --slices 30 40 50 60 70 80 90 100 --overlays brain_mask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_contour.png\n --slices 30 40 50 60 70 80 90 100\n --overlays brain_mask.nii.gz --overlays_as_contours\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_map.png\n --slices 30 40 50 60 70 80 90 100 --labelmap tissue_map.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_viridis_cmap.png\n --slices 30 40 50 60 70 80 90 100\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_peaks.png\n --slices 30 40 50 60 70 80 90 100 --peaks peaks.nii.gz --volume_opacity 0.5", - "help": "usage: scil_viz_volume_screenshot.py [-h]\n [--volume_cmap_name VOLUME_CMAP_NAME]\n [--volume_opacity VOLUME_OPACITY]\n [--transparency TRANSPARENCY]\n [--slices SID [SID ...]]\n [--axis {sagittal,coronal,axial}]\n [--size WIDTH HEIGHT]\n [--display_slice_number] [--display_lr]\n [--labelmap LABELMAP]\n [--labelmap_cmap_name LABELMAP_CMAP_NAME]\n [--labelmap_opacity LABELMAP_OPACITY]\n [--overlays OVERLAYS [OVERLAYS ...]]\n [--overlays_as_contours]\n [--overlays_colors R G B [R G B ...]]\n [--overlays_opacity OVERLAYS_OPACITY]\n [--peaks PEAKS [PEAKS ...]]\n [--peaks_width PEAKS_WIDTH]\n [--peaks_opacity PEAKS_OPACITY]\n [-v [{DEBUG,INFO,WARNING}]]\n volume out_fname\n\nTake screenshot(s) of one or more slices in a given image volume along the\nrequested axis. If slice indices are not provided, all slices in the volume\nare used. The name of the output images are suffixed with _slice_{id}, with\nid being the slice number in the volume. If a labelmap image is provided (e.g.\na tissue segmentation map), it is overlaid on the volume slices. Same goes if\na mask is provided, with the difference that it can be rendered as a\ntransparency overlay as well as a contour.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nExample:\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_annotated.png\n --display_slice_number --display_lr\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_masked.png\n --transparency brainmask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial.png\n --slices 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_sagittal.png --axis sagittal\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_plasma_cmap.png\n --slices 30 40 50 60 70 80 90 100 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_overlay.png\n --slices 30 40 50 60 70 80 90 100 --overlays brain_mask.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_contour.png\n --slices 30 40 50 60 70 80 90 100\n --overlays brain_mask.nii.gz --overlays_as_contours\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_map.png\n --slices 30 40 50 60 70 80 90 100 --labelmap tissue_map.nii.gz\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_viridis_cmap.png\n --slices 30 40 50 60 70 80 90 100\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_peaks.png\n --slices 30 40 50 60 70 80 90 100 --peaks peaks.nii.gz --volume_opacity 0.5\n\npositional arguments:\n volume Input 3D Nifti file (.nii/.nii.gz).\n out_fname Name of the output image(s). If multiple slices are provided (or none), their index will be append to the name (e.g. volume.jpg, volume.png becomes volume_slice_0.jpg, volume_slice_0.png).\n\noptions:\n -h, --help show this help message and exit\n --transparency TRANSPARENCY\n Transparency Nifti image (.nii/.nii.gz). Can either be a binary mask or a scalar image in the range [0, 1].\n --size WIDTH HEIGHT Size of the output image. [(768, 768)]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n\nSlicing:\n --slices SID [SID ...]\n Slice indices to screenshot. If None are supplied, all slices inside the transparency mask are selected.\n --axis {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n\nInput overlays:\n --labelmap LABELMAP Input labelmap file (.nii/.nii.gz).\n --overlays OVERLAYS [OVERLAYS ...]\n 3D Nifti image(s) to overlay (.nii/.nii.gz).\n --peaks PEAKS [PEAKS ...]\n Peaks Nifti image (.nii/.nii.gz).\n\nVolume rendering:\n --volume_cmap_name VOLUME_CMAP_NAME\n Colormap name for the 3D Nifti image data. [None]\n --volume_opacity VOLUME_OPACITY\n Opacity value for the 3D Nifti image data. [1.0]\n --labelmap_cmap_name LABELMAP_CMAP_NAME\n Colormap name for the labelmap image data. [viridis]\n --labelmap_opacity LABELMAP_OPACITY\n Opacity value for the labelmap image data. [0.5]\n\nPeaks rendering:\n --peaks_width PEAKS_WIDTH\n Width of the peaks lines. [3.0]\n --peaks_opacity PEAKS_OPACITY\n Opacity value for the peaks overlay. [1.0]\n\nOverlay rendering:\n --overlays_as_contours\n Display overlays contours and reduce the opacity of their inner region (see the `--overlays_opacity` argument).\n --overlays_colors R G B [R G B ...]\n Colors for the overlays or contours. You may provide a single color, for all overlays/contours, or one color for each. Each color is given as three values: R G B\n --overlays_opacity OVERLAYS_OPACITY\n Opacity value for the masks overlays. When combined with `--overlays_as_contours`, this will be the opacity of the region inside the computed contours. [0.5]\n\nAnnotations:\n --display_slice_number\n If true, displays the slice number in the upper left corner.\n --display_lr If true, add left and right annotations to the images.\n", - "synonyms": [ - [ - "region", - "regions", - "region" - ], - [ - "axial", - "axial" - ], - [ - "maps", - "map" - ], - [ - "coronal", - "sagittal", - "coronal" - ], - [ - "image", - "image" - ], - [ - "rendered", - "rendering", - "rendering" - ], - [ - "rendered", - "rendering", - "rendered" - ], - [ - "Data", - "data", - "data" - ], - [ - "true", - "true" - ], - [ - "left", - "left" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "coronal", - "sagittal", - "sagittal" - ], - [ - "level", - "level" - ], - [ - "size", - "size" - ], - [ - "difference", - "difference" - ] - ], - "keywords": [] - }, - { - "name": "scil_viz_volume_screenshot_mosaic", - "docstring": "Compose a mosaic of screenshots of the given image volume slices along the\nrequested axis. The provided transparency mask (e.g. a brain mask volume) is\nused to set the screenshot values outside the mask non-zero values to full\ntransparency. Additionally, if a labelmap image is provided (e.g. a tissue\nsegmentation map), it is overlaid on the volume slices. Also, a series of\nmasks can be provided and will be used to generate contours overlaid on each\nvolume slice.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nThe screenshots are overlapped according to the given factors.\n\nThe mosaic supports either horizontal, vertical or matrix arrangements.\n\nExample:\n>>> scil_viz_volume_screenshot_mosaic.py 1 8 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial.png 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --overlap_factor 0.6 0.5 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 tissues.nii.gz brain_mask.nii.gz\n mosaic_overlap_tissues_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_viridis_cmap.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_contours.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --overlays wm_mask.nii.gz gm_mask.nii.gz csf_mask.nii.gz", - "help": "usage: scil_viz_volume_screenshot_mosaic.py [-h]\n [--volume_cmap_name VOLUME_CMAP_NAME]\n [--volume_opacity VOLUME_OPACITY]\n [--axis {sagittal,coronal,axial}]\n [--size WIDTH HEIGHT]\n [--labelmap LABELMAP]\n [--labelmap_cmap_name LABELMAP_CMAP_NAME]\n [--labelmap_opacity LABELMAP_OPACITY]\n [--overlays OVERLAYS [OVERLAYS ...]]\n [--overlays_as_contours]\n [--overlays_colors R G B [R G B ...]]\n [--overlays_opacity OVERLAYS_OPACITY]\n [--overlap rWIDTH rHEIGHT]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n rows cols volume transparency\n out_fname SID [SID ...]\n\nCompose a mosaic of screenshots of the given image volume slices along the\nrequested axis. The provided transparency mask (e.g. a brain mask volume) is\nused to set the screenshot values outside the mask non-zero values to full\ntransparency. Additionally, if a labelmap image is provided (e.g. a tissue\nsegmentation map), it is overlaid on the volume slices. Also, a series of\nmasks can be provided and will be used to generate contours overlaid on each\nvolume slice.\n\nA labelmap image can be provided as the image volume, without requiring it as\nthe optional argument if only the former needs to be plot.\n\nThe screenshots are overlapped according to the given factors.\n\nThe mosaic supports either horizontal, vertical or matrix arrangements.\n\nExample:\n>>> scil_viz_volume_screenshot_mosaic.py 1 8 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial.png 30 40 50 60 70 80 90 100\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --overlap_factor 0.6 0.5 --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 tissues.nii.gz brain_mask.nii.gz\n mosaic_overlap_tissues_axial_plasma_cmap.png 30 40 50 60 70 80 90 100\n --volume_cmap_name plasma\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_viridis_cmap.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis\n\n>>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz\n mosaic_overlap_t1_sagittal_tissue_contours.png\n 30 40 50 60 70 80 90 100 --axis sagittal\n --overlays wm_mask.nii.gz gm_mask.nii.gz csf_mask.nii.gz\n\npositional arguments:\n rows The mosaic row count.\n cols The mosaic column count.\n volume Input 3D Nifti file (.nii/.nii.gz).\n transparency Transparency Nifti image (.nii/.nii.gz). Can either be a binary mask or a scalar image in the range [0, 1].\n out_fname Name of the output image (e.g. img.jpg, img.png).\n SID Slice indices to screenshot.\n\noptions:\n -h, --help show this help message and exit\n --axis {sagittal,coronal,axial}\n Name of the axis to visualize. [axial]\n --size WIDTH HEIGHT Size of the output image. [(768, 768)]\n --overlap rWIDTH rHEIGHT\n The overlap factor as a ratio of each image dimension. [(0.6, 0.0)]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nInput overlays:\n --labelmap LABELMAP Input labelmap file (.nii/.nii.gz).\n --overlays OVERLAYS [OVERLAYS ...]\n 3D Nifti image(s) to overlay (.nii/.nii.gz).\n\nVolume rendering:\n --volume_cmap_name VOLUME_CMAP_NAME\n Colormap name for the 3D Nifti image data. [None]\n --volume_opacity VOLUME_OPACITY\n Opacity value for the 3D Nifti image data. [1.0]\n --labelmap_cmap_name LABELMAP_CMAP_NAME\n Colormap name for the labelmap image data. [viridis]\n --labelmap_opacity LABELMAP_OPACITY\n Opacity value for the labelmap image data. [0.5]\n\nOverlay rendering:\n --overlays_as_contours\n Display overlays contours and reduce the opacity of their inner region (see the `--overlays_opacity` argument).\n --overlays_colors R G B [R G B ...]\n Colors for the overlays or contours. You may provide a single color, for all overlays/contours, or one color for each. Each color is given as three values: R G B\n --overlays_opacity OVERLAYS_OPACITY\n Opacity value for the masks overlays. When combined with `--overlays_as_contours`, this will be the opacity of the region inside the computed contours. [0.5]\n", - "synonyms": [ - [ - "region", - "regions", - "region" - ], - [ - "axial", - "axial" - ], - [ - "maps", - "map" - ], - [ - "coronal", - "sagittal", - "coronal" - ], - [ - "image", - "image" - ], - [ - "rendered", - "rendering", - "rendering" - ], - [ - "Data", - "data", - "data" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "coronal", - "sagittal", - "sagittal" - ], - [ - "level", - "level" - ], - [ - "brain", - "brain" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_apply_transform", - "docstring": "Transform Nifti (.nii.gz) using an affine/rigid transformation.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_apply_transform_to_image.py.", - "help": "usage: scil_volume_apply_transform.py [-h] [--inverse] [--keep_dtype]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file in_target_file in_transfo\n out_name\n\nTransform Nifti (.nii.gz) using an affine/rigid transformation.\n\nFor more information on how to use the registration script, follow this link:\nhttps://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html\n\nFormerly: scil_apply_transform_to_image.py.\n\npositional arguments:\n in_file Path of the file to be transformed (nii or nii.gz)\n in_target_file Path of the reference target file (.nii.gz).\n in_transfo Path of the file containing the 4x4 \n transformation, matrix (.txt, .npy or .mat).\n out_name Output filename of the transformed data.\n\noptions:\n -h, --help show this help message and exit\n --inverse Apply the inverse transformation.\n --keep_dtype If True, keeps the data_type of the input image (in_file) when saving the output image (out_name).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "image", - "image" - ], - [ - "applied", - "apply" - ], - [ - "Data", - "data", - "data" - ], - [ - "true", - "true" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_b0_synthesis", - "docstring": "Wrapper for SyNb0 available in Dipy, to run it on a single subject.\nRequires Skull-Strip b0 and t1w images as input, the script will normalize the\nt1w's WM to 110, co-register both images, then register it to the appropriate\ntemplate, run SyNb0 and then transform the result back to the original space.\n\nSyNb0 is a deep learning model that predicts a synthetic a distortion-free\nb0 image from a distorted b0 and T1w.\n\nThis script must be used carefully, as it is meant to be used in an\nenvironment with the following dependencies already installed (not installed by\ndefault in Scilpy):\n- tensorflow-addons\n- tensorrt\n- tensorflow", - "help": "usage: scil_volume_b0_synthesis.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_b0 in_b0_mask in_t1 in_t1_mask out_b0\n\nWrapper for SyNb0 available in Dipy, to run it on a single subject.\nRequires Skull-Strip b0 and t1w images as input, the script will normalize the\nt1w's WM to 110, co-register both images, then register it to the appropriate\ntemplate, run SyNb0 and then transform the result back to the original space.\n\nSyNb0 is a deep learning model that predicts a synthetic a distortion-free\nb0 image from a distorted b0 and T1w.\n\nThis script must be used carefully, as it is meant to be used in an\nenvironment with the following dependencies already installed (not installed by\ndefault in Scilpy):\n- tensorflow-addons\n- tensorrt\n- tensorflow\n\npositional arguments:\n in_b0 Input b0 image.\n in_b0_mask Input b0 mask.\n in_t1 Input t1w image.\n in_t1_mask Input t1w mask.\n out_b0 Output b0 image without distortion.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\n[1] Schilling, Kurt G., et al. \"Synthesized b0 for diffusion distortion\n correction (Synb0-DisCo).\" Magnetic resonance imaging 64 (2019): 62-70.\n", - "synonyms": [ - [ - "subject", - "subject" - ], - [ - "imaging", - "imaging" - ], - [ - "learning", - "learning" - ], - [ - "image", - "image" - ], - [ - "diffusion", - "diffusion" - ], - [ - "space", - "space" - ], - [ - "result", - "result" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_count_non_zero_voxels", - "docstring": "Count the number of non-zero voxels in an image file.\n\nIf you give it an image with more than 3 dimensions, it will summarize the 4th\n(or more) dimension to one voxel, and then find non-zero voxels over this.\nThis means that if there is at least one non-zero voxel in the 4th dimension,\nthis voxel of the 3D volume will be considered as non-zero.\n\nFormerly: scil_count_non_zero_voxels.py", - "help": "usage: scil_volume_count_non_zero_voxels.py [-h] [--out OUT_FILE] [--stats]\n [--id VALUE_ID]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n IN_FILE\n\nCount the number of non-zero voxels in an image file.\n\nIf you give it an image with more than 3 dimensions, it will summarize the 4th\n(or more) dimension to one voxel, and then find non-zero voxels over this.\nThis means that if there is at least one non-zero voxel in the 4th dimension,\nthis voxel of the 3D volume will be considered as non-zero.\n\nFormerly: scil_count_non_zero_voxels.py\n\npositional arguments:\n IN_FILE Input file name, in nifti format.\n\noptions:\n -h, --help show this help message and exit\n --out OUT_FILE Name of the output file, which will be saved as a text file.\n --stats If set, output the value using a stats format. Using this synthax will append\n a line to the output file, instead of creating a file with only one line.\n This is useful to create a file to be used as the source of data for a graph.\n Can be combined with --id\n --id VALUE_ID Id of the current count. If used, the value of this argument will be\n output (followed by a \":\") before the count value.\n Mostly useful with --stats.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "create", - "create" - ], - [ - "image", - "image" - ], - [ - "Data", - "data", - "data" - ], - [ - "create", - "creating" - ], - [ - "voxel", - "voxels" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "considered", - "considered" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_crop", - "docstring": "Crop a volume using a given or an automatically computed bounding box. If a\npreviously computed bounding box file is given, the cropping will be applied\nand the affine fixed accordingly.\n\nWarning: This works well on masked images (like with FSL-Bet) volumes since\nit's looking for non-zero data. Therefore, you should validate the results on\nother types of images that haven't been masked.\n\nFormerly: scil_crop_volume.py", - "help": "usage: scil_volume_crop.py [-h] [--ignore_voxel_size]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n [--input_bbox INPUT_BBOX | --output_bbox OUTPUT_BBOX]\n in_image out_image\n\nCrop a volume using a given or an automatically computed bounding box. If a\npreviously computed bounding box file is given, the cropping will be applied\nand the affine fixed accordingly.\n\nWarning: This works well on masked images (like with FSL-Bet) volumes since\nit's looking for non-zero data. Therefore, you should validate the results on\nother types of images that haven't been masked.\n\nFormerly: scil_crop_volume.py\n\npositional arguments:\n in_image Path of the nifti file to crop.\n out_image Path of the cropped nifti file to write.\n\noptions:\n -h, --help show this help message and exit\n --ignore_voxel_size Ignore voxel size compatibility test between input bounding box and data. Warning, use only if you know what you are doing.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n --input_bbox INPUT_BBOX\n Path of the pickle file from which to take the bounding box to crop input file.\n --output_bbox OUTPUT_BBOX\n Path of the pickle file where to write the computed bounding box. (.pickle extension)\n", - "synonyms": [ - [ - "volume", - "volumes", - "volumes" - ], - [ - "Data", - "data", - "data" - ], - [ - "applied", - "applied" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_flip", - "docstring": "Flip the volume according to the specified axis.\n\nFormerly: scil_flip_volume.py", - "help": "usage: scil_volume_flip.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image dimension [dimension ...]\n\nFlip the volume according to the specified axis.\n\nFormerly: scil_flip_volume.py\n\npositional arguments:\n in_image Path of the input volume (nifti).\n out_image Path of the output volume (nifti).\n dimension The axes you want to flip. eg: to flip the x and y axes use: x y.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "volume", - "volumes", - "volume" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_math", - "docstring": "Performs an operation on a list of images. The supported operations are\nlisted below.\n\nThis script is loading all images in memory, will often crash after a few\nhundred images.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of images.\n> scil_volume_math.py multiplication img.nii.gz 10 mult_10.nii.gz\n\nFormerly: scil_image_math.py", - "help": "usage: scil_volume_math.py [-h] [--data_type DATA_TYPE] [--exclude_background]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference,concatenate,dilation,erosion,closing,opening,blur}\n in_args [in_args ...] out_image\n\nPerforms an operation on a list of images. The supported operations are\nlisted below.\n\nThis script is loading all images in memory, will often crash after a few\nhundred images.\n\nSome operations such as multiplication or addition accept float value as\nparameters instead of images.\n> scil_volume_math.py multiplication img.nii.gz 10 mult_10.nii.gz\n\nFormerly: scil_image_math.py\n\n lower_threshold: IMG THRESHOLD\n All values below the threshold will be set to zero.\n All values above the threshold will be set to one.\n \n upper_threshold: IMG THRESHOLD\n All values below the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_eq: IMG THRESHOLD\n All values below the threshold will be set to zero.\n All values above or equal the threshold will be set to one.\n \n upper_threshold_eq: IMG THRESHOLD\n All values below or equal the threshold will be set to one.\n All values above the threshold will be set to zero.\n Equivalent to lower_threshold followed by an inversion.\n \n lower_threshold_otsu: IMG\n All values below or equal to the Otsu threshold will be set to zero.\n All values above the Otsu threshold will be set to one.\n (Otsu's method is an algorithm to perform automatic image thresholding\n of the background.)\n \n upper_threshold_otsu: IMG\n All values below the Otsu threshold will be set to one.\n All values above or equal to the Otsu threshold will be set to zero.\n Equivalent to lower_threshold_otsu followed by an inversion.\n \n lower_clip: IMG THRESHOLD\n All values below the threshold will be set to threshold.\n \n upper_clip: IMG THRESHOLD\n All values above the threshold will be set to threshold.\n \n absolute_value: IMG\n All negative values will become positive.\n \n round: IMG\n Round all decimal values to the closest integer.\n \n ceil: IMG\n Ceil all decimal values to the next integer.\n \n floor: IMG\n Floor all decimal values to the previous integer.\n \n normalize_sum: IMG\n Normalize the image so the sum of all values is one.\n \n normalize_max: IMG\n Normalize the image so the maximum value is one.\n \n log_10: IMG\n Apply a log (base 10) to all non zeros values of an image.\n \n log_e: IMG\n Apply a natural log to all non zeros values of an image.\n \n convert: IMG\n Perform no operation, but simply change the data type.\n \n invert: IMG\n Operation on binary image to interchange 0s and 1s in a binary mask.\n \n addition: IMGs\n Add multiple images together.\n \n subtraction: IMG_1 IMG_2\n Subtract first image by the second (IMG_1 - IMG_2).\n \n multiplication: IMGs\n Multiply multiple images together (danger of underflow and overflow)\n \n division: IMG_1 IMG_2\n Divide first image by the second (danger of underflow and overflow)\n Ignore zeros values, excluded from the operation.\n \n mean: IMGs\n Compute the mean of images.\n If a single 4D image is provided, average along the last dimension.\n \n std: IMGs\n Compute the standard deviation average of multiple images.\n If a single 4D image is provided, compute the STD along the last\n dimension.\n \n correlation: IMGs\n Computes the correlation of the 3x3x3 neighborhood of each voxel, for\n all pair of input images. The final image is the average correlation\n (through all pairs).\n For a given pair of images\n - Background is considered as 0. May lead to very high correlations\n close to the border of the background regions, or very poor ones if the\n background in both images differ.\n - Images are zero-padded. For the same reason as higher, may lead to\n very high correlations if you have data close to the border of the\n image.\n - NaN values (if a voxel's neighborhood is entirely uniform; std 0) are\n replaced by\n - 0 if at least one neighborhood was entirely containing background.\n - 1 if the voxel's neighborhoods are uniform in both images\n - 0 if the voxel's neighborhoods is uniform in one image, but not\n the other.\n\n UPDATE AS OF VERSION 2.0: Random noise was previously added in the\n process to help avoid NaN values. Now replaced by either 0 or 1 as\n explained above.\n \n union: IMGs\n Operation on binary image to keep voxels, that are non-zero, in at\n least one file.\n \n intersection: IMGs\n Operation on binary image to keep the voxels, that are non-zero,\n are present in all files.\n \n difference: IMG_1 IMG_2\n Operation on binary image to keep voxels from the first file that are\n not in the second file (non-zeros).\n \n concatenate: IMGs\n Concatenate a list of 3D and 4D images into a single 4D image.\n \n dilation: IMG, VALUE\n Binary morphological operation to spatially extend the values of an\n image to their neighbors. VALUE is in voxels: an integer > 0.\n \n erosion: IMG, VALUE\n Binary morphological operation to spatially shrink the volume contained\n in a binary image. VALUE is in voxels: an integer > 0.\n \n closing: IMG, VALUE\n Binary morphological operation, dilation followed by an erosion.\n \n opening: IMG, VALUE\n Binary morphological operation, erosion followed by a dilation.\n \n blur: IMG, VALUE\n Apply a gaussian blur to a single image. VALUE is sigma, the standard\n deviation of the Gaussian kernel.\n \n\npositional arguments:\n {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference,concatenate,dilation,erosion,closing,opening,blur}\n The type of operation to be performed on the images.\n in_args The list of image files or parameters. Refer to each operation's documentation of the expected arguments.\n out_image Output image path.\n\noptions:\n -h, --help show this help message and exit\n --data_type DATA_TYPE\n Data type of the output image. Use the format: \n uint8, int16, int/float32, int/float64.\n --exclude_background Does not affect the background of the original images.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "process", - "process" - ], - [ - "methods", - "method" - ], - [ - "region", - "regions", - "regions" - ], - [ - "positive", - "negative" - ], - [ - "supported", - "supported" - ], - [ - "image", - "image" - ], - [ - "high", - "high" - ], - [ - "algorithm", - "algorithm" - ], - [ - "applied", - "apply" - ], - [ - "positive", - "positive" - ], - [ - "random", - "random" - ], - [ - "Data", - "data", - "data" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "voxel", - "voxels" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "average", - "average" - ], - [ - "considered", - "considered" - ], - [ - "memory", - "memory" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "higher", - "higher" - ], - [ - "parameters", - "parameters" - ], - [ - "difference", - "difference" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_remove_outliers_ransac", - "docstring": "Remove outliers from image using the RANSAC algorithm.\nThe RANSAC algorithm parameters are sensitive to the input data.\n\nNOTE: Current default parameters are tuned for ad/md/rd images only.\n\nFormerly: scil_remove_outliers_ransac.py", - "help": "usage: scil_volume_remove_outliers_ransac.py [-h] [--min_fit MIN_FIT]\n [--max_iter MAX_ITER]\n [--fit_thr FIT_THR]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image\n\nRemove outliers from image using the RANSAC algorithm.\nThe RANSAC algorithm parameters are sensitive to the input data.\n\nNOTE: Current default parameters are tuned for ad/md/rd images only.\n\nFormerly: scil_remove_outliers_ransac.py\n\npositional arguments:\n in_image Nifti image.\n out_image Corrected Nifti image.\n\noptions:\n -h, --help show this help message and exit\n --min_fit MIN_FIT The minimum number of data values required to fit the model. [50]\n --max_iter MAX_ITER The maximum number of iterations allowed in the algorithm. [1000]\n --fit_thr FIT_THR Threshold value for determining when a data point fits a model. [0.01]\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "image", - "image" - ], - [ - "algorithm", - "algorithm" - ], - [ - "Data", - "data", - "data" - ], - [ - "threshold", - "thresholds", - "threshold" - ], - [ - "level", - "level" - ], - [ - "parameters", - "parameters" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_resample", - "docstring": "Script to resample a dataset to match the resolution of another\nreference dataset or to the resolution specified as in argument.\n\nFormerly: scil_resample_volume.py", - "help": "usage: scil_volume_resample.py [-h]\n (--ref REF | --volume_size VOLUME_SIZE [VOLUME_SIZE ...] | --voxel_size VOXEL_SIZE [VOXEL_SIZE ...] | --iso_min)\n [--interp {nn,lin,quad,cubic}]\n [--enforce_dimensions]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_image out_image\n\nScript to resample a dataset to match the resolution of another\nreference dataset or to the resolution specified as in argument.\n\nFormerly: scil_resample_volume.py\n\npositional arguments:\n in_image Path of the input volume.\n out_image Path of the resampled volume.\n\noptions:\n -h, --help show this help message and exit\n --ref REF Reference volume to resample to.\n --volume_size VOLUME_SIZE [VOLUME_SIZE ...]\n Sets the size for the volume. If the value is set to is Y, it will resample to a shape of Y x Y x Y.\n --voxel_size VOXEL_SIZE [VOXEL_SIZE ...]\n Sets the voxel size. If the value is set to is Y, it will set a voxel size of Y x Y x Y.\n --iso_min Resample the volume to R x R x R with R being the smallest current voxel dimension.\n --interp {nn,lin,quad,cubic}\n Interpolation mode.\n nn: nearest neighbour\n lin: linear\n quad: quadratic\n cubic: cubic\n Defaults to linear\n --enforce_dimensions Enforce the reference volume dimension.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "shape", - "shape" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ], - [ - "size", - "size" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_reshape_to_reference", - "docstring": "Reshape / reslice / resample *.nii or *.nii.gz using a reference.\nThis script can be used to align freesurfer/civet output, as .mgz,\nto the original input image.\n\n>>> scil_volume_reshape_to_reference.py wmparc.mgz t1.nii.gz wmparc_t1.nii.gz\\\n --interpolation nearest\n\nFormerly: scil_reshape_to_reference.py", - "help": "usage: scil_volume_reshape_to_reference.py [-h]\n [--interpolation {linear,nearest}]\n [--keep_dtype]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_file in_ref_file out_file\n\nReshape / reslice / resample *.nii or *.nii.gz using a reference.\nThis script can be used to align freesurfer/civet output, as .mgz,\nto the original input image.\n\n>>> scil_volume_reshape_to_reference.py wmparc.mgz t1.nii.gz wmparc_t1.nii.gz\\\n --interpolation nearest\n\nFormerly: scil_reshape_to_reference.py\n\npositional arguments:\n in_file Path of the image (.nii or .mgz) to be reshaped.\n in_ref_file Path of the reference image (.nii).\n out_file Output filename of the reshaped image (.nii).\n\noptions:\n -h, --help show this help message and exit\n --interpolation {linear,nearest}\n Interpolation: \"linear\" or \"nearest\". [linear]\n --keep_dtype If True, keeps the data_type of the input image (in_file) when saving the output image (out_file).\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "true", - "true" - ], - [ - "level", - "level" - ], - [ - "image", - "image" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_stats_in_ROI", - "docstring": "Compute the statistics (mean, std) of scalar maps, which can represent\ndiffusion metrics, in a ROI. Prints the results.\n\nThe mask can either be a binary mask, or a weighting mask. If the mask is\na weighting mask it should either contain floats between 0 and 1 or should be\nnormalized with --normalize_weights. IMPORTANT: if the mask contains weights\n(and not 0 and 1 exclusively), the standard deviation will also be weighted.", - "help": "usage: scil_volume_stats_in_ROI.py [-h]\n (--metrics_dir dir | --metrics file [file ...])\n [--bin] [--normalize_weights]\n [--indent INDENT] [--sort_keys]\n [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_mask\n\nCompute the statistics (mean, std) of scalar maps, which can represent\ndiffusion metrics, in a ROI. Prints the results.\n\nThe mask can either be a binary mask, or a weighting mask. If the mask is\na weighting mask it should either contain floats between 0 and 1 or should be\nnormalized with --normalize_weights. IMPORTANT: if the mask contains weights\n(and not 0 and 1 exclusively), the standard deviation will also be weighted.\n\npositional arguments:\n in_mask Mask volume filename.\n Can be a binary mask or a weighted mask.\n\noptions:\n -h, --help show this help message and exit\n --bin If set, will consider every value of the mask higherthan 0 to be \n part of the mask (equivalent weighting for every voxel).\n --normalize_weights If set, the weights will be normalized to the [0,1] range.\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n\nMetrics input options:\n --metrics_dir dir Name of the directory containing metrics files: we will \n load all nifti files.\n --metrics file [file ...]\n Metrics nifti filename. List of the names of the metrics file, \n in nifti format.\n\nJson options:\n --indent INDENT Indent for json pretty print.\n --sort_keys Sort keys in output json.\n", - "synonyms": [ - [ - "represent", - "represent" - ], - [ - "weighted", - "weighted" - ], - [ - "diffusion", - "diffusion" - ], - [ - "maps", - "maps" - ], - [ - "volume", - "volumes", - "volume" - ], - [ - "binary", - "binary" - ], - [ - "voxel", - "voxel" - ], - [ - "level", - "level" - ] - ], - "keywords": [] - }, - { - "name": "scil_volume_stats_in_labels", - "docstring": "Computes the information from the input map for each cortical region\n(corresponding to an atlas).\n\nHint: For instance, this script could be useful if you have a seed map from a\nspecific bundle, to know from which regions it originated.\n\nFormerly: scil_compute_seed_by_labels.py", - "help": "usage: scil_volume_stats_in_labels.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f]\n in_labels in_labels_lut in_map\n\nComputes the information from the input map for each cortical region\n(corresponding to an atlas).\n\nHint: For instance, this script could be useful if you have a seed map from a\nspecific bundle, to know from which regions it originated.\n\nFormerly: scil_compute_seed_by_labels.py\n\npositional arguments:\n in_labels Path of the input label file.\n in_labels_lut Path of the LUT file corresponding to labels,used to name the regions of interest.\n in_map Path of the input map file. Expecting a 3D file.\n\noptions:\n -h, --help show this help message and exit\n -v [{DEBUG,INFO,WARNING}]\n Produces verbose output depending on the provided level. \n Default level is warning, default when using -v is info.\n -f Force overwriting of the output files.\n", - "synonyms": [ - [ - "region", - "regions", - "region" - ], - [ - "maps", - "map" - ], - [ - "region", - "regions", - "regions" - ], - [ - "bundles", - "bundle" - ], - [ - "atlas", - "atlas" - ], - [ - "cortex", - "cortical", - "parietal", - "cortical" - ], - [ - "level", - "level" - ], - [ - "specific", - "specific" - ] - ], - "keywords": [] - } - ] -} \ No newline at end of file From eb801e7c99963f3c212f08466a21e25e8e116f0c Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Mon, 1 Jul 2024 22:42:49 -0400 Subject: [PATCH 14/69] search in docstring then in helpfiles instead of letting the user chose between the two --- scripts/scil_search_keywords.py | 101 +++++++++++++++++--------------- 1 file changed, 54 insertions(+), 47 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index b6dac857d..6ca14f436 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -47,10 +47,8 @@ def _build_arg_parser(): p.add_argument('keywords', nargs='+', help='Search the provided list of keywords.') - p.add_argument('--search_parser', action='store_true', - help='Search through and display the full script argparser ' - 'instead of looking only at the docstring. (warning: ' - 'much slower).') + p.add_argument('--full_parser', action='store_true', + help='Display the full script argparser help.') add_verbose_arg(p) @@ -81,63 +79,58 @@ def main(): matches = [] - # Search through the argparser instead of the docstring - if args.search_parser: - #Use precomputed help files - for help_file in sorted(hidden_dir.glob('*.help')): - script_name = pathlib.Path(help_file.stem).stem - with open(help_file, 'r') as f: - search_text = f.read() + + # Search through the docstring + for script in sorted(script_dir.glob('*.py')): + #Remove the .py extension + filename = script.stem + if filename == '__init__' or filename =='scil_search_keywords': + continue + + search_text = _get_docstring_from_script_path(str(script)) - # Test intersection of all keywords, either in filename or docstring - if not _contains_stemmed_keywords(stemmed_keywords, search_text, script_name): - continue + if not _contains_stemmed_keywords(stemmed_keywords, search_text, filename): + continue - matches.append(script_name) - search_text = search_text or 'No docstring available!' + matches.append(filename) + search_text = search_text or 'No docstring available!' - display_filename = script_name - display_short_info, display_long_info = _split_first_sentence( - search_text) + display_filename = filename + display_short_info, display_long_info = _split_first_sentence( + search_text) - # Highlight found keywords - for keyword in args.keywords: - display_short_info = display_short_info.replace(keyword, f'{Fore.RED}{Style.BRIGHT}{keyword}{Style.RESET_ALL}') - display_long_info = display_long_info.replace(keyword, f'{Fore.RED}{Style.BRIGHT}{keyword}{Style.RESET_ALL}') + # Highlight found keywords using colorama + display_short_info = _highlight_keywords(display_short_info, stemmed_keywords) + display_long_info = _highlight_keywords(display_long_info, stemmed_keywords) - # Print everything - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") - logging.info(display_short_info) - logging.debug(display_long_info) - logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") - logging.info("\n") - - # Search through the docstring instead of the argparser - else: - for script in sorted(script_dir.glob('*.py')): - #Remove the .py extension - filename = script.stem - if filename == '__init__' or filename =='scil_search_keywords': - continue - - search_text = _get_docstring_from_script_path(str(script)) + # Print everything + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") + logging.info(display_short_info) + logging.debug(display_long_info) + logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") + logging.info("\n") + + + # If no matches found in docstrings, check in the help files - # Test intersection of all keywords, either in filename or docstring - if not _contains_stemmed_keywords(stemmed_keywords, search_text, filename): + if not matches: + for help_file in sorted(hidden_dir.glob('*.help')): #Use precomputed help files + script_name = pathlib.Path(help_file.stem).stem + with open(help_file, 'r') as f: + search_text = f.read() + + # Test intersection of all keywords, either in filename or docstring + if not _contains_stemmed_keywords(stemmed_keywords, search_text, script_name): continue - matches.append(filename) + matches.append(script_name) search_text = search_text or 'No docstring available!' - display_filename = filename + display_filename = script_name display_short_info, display_long_info = _split_first_sentence( search_text) - # Highlight found keywords using colorama - display_short_info = _highlight_keywords(display_short_info, stemmed_keywords) - display_long_info = _highlight_keywords(display_long_info, stemmed_keywords) - # Print everything logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") logging.info(display_short_info) @@ -183,6 +176,20 @@ def main(): logging.info(_make_title(' No results found! ')) + # Display full argparser if --full_parser is used + if args.full_parser: + for script in sorted(script_dir.glob('*.py')): + filename = script.stem + if filename == '__init__' or filename == 'scil_search_keywords': + continue + help_file = hidden_dir / f"{filename}.py.help" + if help_file.exists(): + with open(help_file, 'r') as f: + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{filename}{Style.RESET_ALL}") + logging.info(f.read()) + logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") + logging.info("\n") + def _make_title(text): return f'{Fore.BLUE}{Style.BRIGHT}{text.center(SPACING_LEN, SPACING_CHAR)}{Style.RESET_ALL}' From 22825e881796eb0ff1b8f707875d2a006de06246 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Tue, 2 Jul 2024 12:12:43 -0400 Subject: [PATCH 15/69] Add search by objects --- scripts/scil_search_keywords.py | 63 +++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 26 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 6ca14f436..84eabc5c3 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -41,9 +41,18 @@ KEYWORDS_FILE_PATH = pathlib.Path(__file__).parent.parent / 'scilpy-bot-scripts'/'Vocabulary'/'Keywords.json' SYNONYMS_FILE_PATH = pathlib.Path(__file__).parent.parent / 'scilpy-bot-scripts'/'Vocabulary'/'Synonyms.json' +OBJECTS = [ + 'aodf', 'bids', 'bingham', 'btensor', 'bundle', 'connectivity', 'denoising', + 'dki', 'dti','dwi', 'fodf', 'freewater', 'frf', 'gradients', 'header', 'json', + 'labels', 'lesions', 'mti', 'NODDI', 'sh', 'surface', 'tracking', + 'tractogram', 'viz', 'volume' +] + def _build_arg_parser(): p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) + p.add_argument('--object', choices=OBJECTS, required=True, + help='Choose the object you want to work on.' ) p.add_argument('keywords', nargs='+', help='Search the provided list of keywords.') @@ -79,43 +88,43 @@ def main(): matches = [] - + # Search through the docstring - for script in sorted(script_dir.glob('*.py')): + + for script in sorted(script_dir.glob('scil_{}_*.py'.format(args.object))): #Remove the .py extension filename = script.stem if filename == '__init__' or filename =='scil_search_keywords': continue - + search_text = _get_docstring_from_script_path(str(script)) # Test intersection of all keywords, either in filename or docstring - if not _contains_stemmed_keywords(stemmed_keywords, search_text, filename): - continue - - matches.append(filename) - search_text = search_text or 'No docstring available!' - - display_filename = filename - display_short_info, display_long_info = _split_first_sentence( - search_text) + if _contains_stemmed_keywords(stemmed_keywords, search_text, filename): + + matches.append(filename) + search_text = search_text or 'No docstring available!' - # Highlight found keywords using colorama - display_short_info = _highlight_keywords(display_short_info, stemmed_keywords) - display_long_info = _highlight_keywords(display_long_info, stemmed_keywords) + display_filename = filename + display_short_info, display_long_info = _split_first_sentence( + search_text) - # Print everything - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") - logging.info(display_short_info) - logging.debug(display_long_info) - logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") - logging.info("\n") + # Highlight found keywords using colorama + display_short_info = _highlight_keywords(display_short_info, stemmed_keywords) + display_long_info = _highlight_keywords(display_long_info, stemmed_keywords) + # Print everything + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") + logging.info(display_short_info) + logging.debug(display_long_info) + logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") + logging.info("\n") # If no matches found in docstrings, check in the help files - if not matches: - for help_file in sorted(hidden_dir.glob('*.help')): #Use precomputed help files + if not matches: + + for help_file in sorted(hidden_dir.glob('scil_{}_*.py'.format(args.object))): #Use precomputed help files script_name = pathlib.Path(help_file.stem).stem with open(help_file, 'r') as f: search_text = f.read() @@ -143,9 +152,11 @@ def main(): keywords_data = json.load(f) if not matches: - print("search by scripts keywords") + print("search by scripts keywords...") for script in keywords_data['scripts']: script_name = script['name'] + if not script_name.startswith(f'scil_{args.object}_'): + continue script_keywords = script['keywords'] if all([stem in _stem_text(' '.join(script_keywords)) for stem in stemmed_keywords]): matches.append(script_name) @@ -160,7 +171,7 @@ def main(): if not matches: for keyword in args.keywords: synonyms = _get_synonyms(keyword, synonyms_data) - for script in sorted(script_dir.glob('*.py')): + for script in sorted(script_dir.glob('scil_{}_*.py'.format(args.object))): filename = script.stem if filename == '__init__' or filename == 'scil_search_keywords': continue @@ -178,7 +189,7 @@ def main(): # Display full argparser if --full_parser is used if args.full_parser: - for script in sorted(script_dir.glob('*.py')): + for script in sorted(script_dir.glob('scil_{}_*.py'.format(args.object))): filename = script.stem if filename == '__init__' or filename == 'scil_search_keywords': continue From d0c105c069cf27e46fa7b68b593b966a3b134923 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Tue, 2 Jul 2024 12:38:13 -0400 Subject: [PATCH 16/69] let user chose between objects instead o giving it as argument --- scripts/scil_search_keywords.py | 36 ++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 84eabc5c3..7195eef3c 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -48,11 +48,25 @@ 'tractogram', 'viz', 'volume' ] +def prompt_user_for_object(): + print("Available objects:") + for idx, obj in enumerate(OBJECTS): + print(f"{idx + 1}. {obj}") + while True: + try: + choice = int(input("Choose the object you want to work on (enter the number): ")) + if 1 <= choice <= len(OBJECTS): + return OBJECTS[choice - 1] + else: + print(f"Please enter a number between 1 and {len(OBJECTS)}.") + except ValueError: + print("Invalid input. Please enter a number.") + def _build_arg_parser(): p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) - p.add_argument('--object', choices=OBJECTS, required=True, - help='Choose the object you want to work on.' ) + #p.add_argument('--object', choices=OBJECTS, required=True, + # help='Choose the object you want to work on.' ) p.add_argument('keywords', nargs='+', help='Search the provided list of keywords.') @@ -72,6 +86,7 @@ def main(): else: logging.getLogger().setLevel(logging.getLevelName(args.verbose)) + selected_object = prompt_user_for_object() stemmed_keywords = _stem_keywords(args.keywords) script_dir = pathlib.Path(__file__).parent @@ -90,8 +105,8 @@ def main(): # Search through the docstring - - for script in sorted(script_dir.glob('scil_{}_*.py'.format(args.object))): + logging.info(f"Searching through docstrings for '{selected_object}' scripts...") + for script in sorted(script_dir.glob('scil_{}_*.py'.format(selected_object))): #Remove the .py extension filename = script.stem if filename == '__init__' or filename =='scil_search_keywords': @@ -123,8 +138,8 @@ def main(): # If no matches found in docstrings, check in the help files if not matches: - - for help_file in sorted(hidden_dir.glob('scil_{}_*.py'.format(args.object))): #Use precomputed help files + logging.info(f"No matches found in docstrings. Searching through help files for '{selected_object}' scripts...") + for help_file in sorted(hidden_dir.glob('scil_{}_*.py'.format(selected_object))): #Use precomputed help files script_name = pathlib.Path(help_file.stem).stem with open(help_file, 'r') as f: search_text = f.read() @@ -152,10 +167,10 @@ def main(): keywords_data = json.load(f) if not matches: - print("search by scripts keywords...") + logging.info("No matches found in help files. Searching by script keywords...") for script in keywords_data['scripts']: script_name = script['name'] - if not script_name.startswith(f'scil_{args.object}_'): + if not script_name.startswith(f'scil_{selected_object}_'): continue script_keywords = script['keywords'] if all([stem in _stem_text(' '.join(script_keywords)) for stem in stemmed_keywords]): @@ -169,9 +184,10 @@ def main(): synonyms_data = json.load(f) if not matches: + logging.info("No matches found by script keywords. Searching by synonyms...") for keyword in args.keywords: synonyms = _get_synonyms(keyword, synonyms_data) - for script in sorted(script_dir.glob('scil_{}_*.py'.format(args.object))): + for script in sorted(script_dir.glob('scil_{}_*.py'.format(selected_object))): filename = script.stem if filename == '__init__' or filename == 'scil_search_keywords': continue @@ -189,7 +205,7 @@ def main(): # Display full argparser if --full_parser is used if args.full_parser: - for script in sorted(script_dir.glob('scil_{}_*.py'.format(args.object))): + for script in sorted(script_dir.glob('scil_{}_*.py'.format(selected_object))): filename = script.stem if filename == '__init__' or filename == 'scil_search_keywords': continue From 21559c6bfbb0644c3c4869215ff2b04da11445fd Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Tue, 2 Jul 2024 14:19:42 -0400 Subject: [PATCH 17/69] display scripts by score --- scripts/scil_search_keywords.py | 81 ++++++++++++++++++++++----------- 1 file changed, 55 insertions(+), 26 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 7195eef3c..893e941f0 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -113,11 +113,10 @@ def main(): continue search_text = _get_docstring_from_script_path(str(script)) + score = _calculate_score(stemmed_keywords, search_text, filename=filename) - # Test intersection of all keywords, either in filename or docstring - if _contains_stemmed_keywords(stemmed_keywords, search_text, filename): - - matches.append(filename) + if score > 0: + matches.append((filename, score)) search_text = search_text or 'No docstring available!' display_filename = filename @@ -129,7 +128,7 @@ def main(): display_long_info = _highlight_keywords(display_long_info, stemmed_keywords) # Print everything - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename} - Score: {score}%{Style.RESET_ALL}") logging.info(display_short_info) logging.debug(display_long_info) logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") @@ -143,24 +142,23 @@ def main(): script_name = pathlib.Path(help_file.stem).stem with open(help_file, 'r') as f: search_text = f.read() - - # Test intersection of all keywords, either in filename or docstring - if not _contains_stemmed_keywords(stemmed_keywords, search_text, script_name): - continue - matches.append(script_name) - search_text = search_text or 'No docstring available!' + score = _calculate_score(stemmed_keywords, search_text, filename=filename) - display_filename = script_name - display_short_info, display_long_info = _split_first_sentence( - search_text) + if score > 0: + matches.append((script_name, score)) + search_text = search_text or 'No docstring available!' - # Print everything - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") - logging.info(display_short_info) - logging.debug(display_long_info) - logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") - logging.info("\n") + display_filename = script_name + display_short_info, display_long_info = _split_first_sentence( + search_text) + + # Print everything + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename} - Score: {score}%{Style.RESET_ALL}") + logging.info(display_short_info) + logging.debug(display_long_info) + logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") + logging.info("\n") # If no matches found, check in the keywords file with open(KEYWORDS_FILE_PATH, 'r') as f: @@ -173,9 +171,12 @@ def main(): if not script_name.startswith(f'scil_{selected_object}_'): continue script_keywords = script['keywords'] - if all([stem in _stem_text(' '.join(script_keywords)) for stem in stemmed_keywords]): - matches.append(script_name) - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{script_name}{Style.RESET_ALL}") + score = _calculate_score(stemmed_keywords, ' '.join(script_keywords)) + + if score > 0: + matches.append((script_name, score)) + first_sentence, _ = _split_first_sentence(search_text) + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename} - Score: {score}%{Style.RESET_ALL}: {first_sentence}") @@ -193,15 +194,20 @@ def main(): continue search_text = _get_docstring_from_script_path(str(script)) if any(synonym in search_text for synonym in synonyms): - matches.append(filename) + score = _calculate_score(synonyms, search_text) + matches.append((filename, score)) first_sentence, _ = _split_first_sentence(search_text) logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{filename}{Style.RESET_ALL}: {first_sentence}") + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{filename}- Score: {score}%{Style.RESET_ALL}: {first_sentence}") logging.info("\n") if not matches: logging.info(_make_title(' No results found! ')) - + else: + matches.sort(key=lambda x: x[1], reverse=True) + logging.info(_make_title(' Results Ordered By Score ')) + for match in matches: + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match[0]} - Score: {match[1]}%{Style.RESET_ALL}") # Display full argparser if --full_parser is used if args.full_parser: @@ -381,5 +387,28 @@ def _get_synonyms(keyword, synonyms_data): return synonym_set return [] +def _calculate_score(keywords, text, filename=""): + """ + Calculate the score based on the presence of keywords in the given text and filename. + + Parameters + ---------- + keywords : list of str + List of keywords to search for. + text : str + Text to search within (e.g., docstring or help file content). + filename : str, optional + Filename to search within (default is an empty string). + + Returns + ------- + int + Score as a percentage representing the ratio of found keywords to the total number of keywords. + """ + text = _stem_text(text) + filename = _stem_text(filename) + found_keywords = sum(1 for keyword in keywords if keyword in text or keyword in filename) + return int((found_keywords / len(keywords)) * 100) + if __name__ == '__main__': main() \ No newline at end of file From b765dad8ba51e0fe1215869feca9831c96f55a33 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Wed, 3 Jul 2024 23:02:32 -0400 Subject: [PATCH 18/69] calculate frequency of keywords in each script --- scripts/scil_search_keywords.py | 65 +++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 27 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 893e941f0..0c5090180 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -102,6 +102,7 @@ def main(): matches = [] + scores = {} # Search through the docstring @@ -116,7 +117,9 @@ def main(): score = _calculate_score(stemmed_keywords, search_text, filename=filename) if score > 0: - matches.append((filename, score)) + matches.append(filename) + scores[filename] = score + search_text = search_text or 'No docstring available!' display_filename = filename @@ -128,7 +131,7 @@ def main(): display_long_info = _highlight_keywords(display_long_info, stemmed_keywords) # Print everything - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename} - Score: {score}%{Style.RESET_ALL}") + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") logging.info(display_short_info) logging.debug(display_long_info) logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") @@ -143,10 +146,12 @@ def main(): with open(help_file, 'r') as f: search_text = f.read() - score = _calculate_score(stemmed_keywords, search_text, filename=filename) + score = _calculate_score(stemmed_keywords, search_text, script_name) if score > 0: matches.append((script_name, score)) + scores[script_name] = score + search_text = search_text or 'No docstring available!' display_filename = script_name @@ -154,7 +159,7 @@ def main(): search_text) # Print everything - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename} - Score: {score}%{Style.RESET_ALL}") + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") logging.info(display_short_info) logging.debug(display_long_info) logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") @@ -171,12 +176,14 @@ def main(): if not script_name.startswith(f'scil_{selected_object}_'): continue script_keywords = script['keywords'] - score = _calculate_score(stemmed_keywords, ' '.join(script_keywords)) + score = _calculate_score(stemmed_keywords, ' '.join(script_keywords), script_name) if score > 0: - matches.append((script_name, score)) + matches.append(script_name) + scores[script_name] = score + first_sentence, _ = _split_first_sentence(search_text) - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename} - Score: {score}%{Style.RESET_ALL}: {first_sentence}") + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}: {first_sentence}") @@ -194,8 +201,8 @@ def main(): continue search_text = _get_docstring_from_script_path(str(script)) if any(synonym in search_text for synonym in synonyms): - score = _calculate_score(synonyms, search_text) - matches.append((filename, score)) + matches.append(filename) + scores[filename] = _calculate_score(synonyms, search_text, filename) first_sentence, _ = _split_first_sentence(search_text) logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info(f"{Fore.BLUE}{Style.BRIGHT}{filename}- Score: {score}%{Style.RESET_ALL}: {first_sentence}") @@ -203,11 +210,11 @@ def main(): if not matches: logging.info(_make_title(' No results found! ')) - else: - matches.sort(key=lambda x: x[1], reverse=True) - logging.info(_make_title(' Results Ordered By Score ')) - for match in matches: - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match[0]} - Score: {match[1]}%{Style.RESET_ALL}") + # Sort matches by score and print them + sorted_matches = sorted(matches, key=lambda x: scores[x], reverse=True) + logging.info(_make_title(' Results Ordered by Score ')) + for match in sorted_matches: + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match}{Style.RESET_ALL}: Score = {scores[match]}") # Display full argparser if --full_parser is used if args.full_parser: @@ -387,28 +394,32 @@ def _get_synonyms(keyword, synonyms_data): return synonym_set return [] -def _calculate_score(keywords, text, filename=""): +def _calculate_score(keywords, text, filename): """ - Calculate the score based on the presence of keywords in the given text and filename. - + Calculate a score for how well the text and filename match the keywords. + Parameters ---------- keywords : list of str - List of keywords to search for. + Keywords to search for. text : str - Text to search within (e.g., docstring or help file content). - filename : str, optional - Filename to search within (default is an empty string). - + Text to search within. + filename : str + Filename to search within. + Returns ------- int - Score as a percentage representing the ratio of found keywords to the total number of keywords. + Score based on the frequency of keywords in the text and filename. """ - text = _stem_text(text) - filename = _stem_text(filename) - found_keywords = sum(1 for keyword in keywords if keyword in text or keyword in filename) - return int((found_keywords / len(keywords)) * 100) + text = text.lower() + filename = filename.lower() + score = 0 + for keyword in keywords: + keyword = keyword.lower() + score += text.count(keyword) + score += filename.count(keyword) + return score if __name__ == '__main__': main() \ No newline at end of file From 829f164d3c1d4da1fdaa0912caf7fe79684e22a9 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Wed, 3 Jul 2024 23:12:03 -0400 Subject: [PATCH 19/69] don't display 'Results ordered by score' if no matches were found --- scripts/scil_search_keywords.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 0c5090180..2b1ca561d 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -210,11 +210,13 @@ def main(): if not matches: logging.info(_make_title(' No results found! ')) + # Sort matches by score and print them - sorted_matches = sorted(matches, key=lambda x: scores[x], reverse=True) - logging.info(_make_title(' Results Ordered by Score ')) - for match in sorted_matches: - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match}{Style.RESET_ALL}: Score = {scores[match]}") + else: + sorted_matches = sorted(matches, key=lambda x: scores[x], reverse=True) + logging.info(_make_title(' Results Ordered by Score ')) + for match in sorted_matches: + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match}{Style.RESET_ALL}: Score = {scores[match]}") # Display full argparser if --full_parser is used if args.full_parser: From 8bfb5bf52551cb68a625af144e183427f2ddcd42 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Sat, 13 Jul 2024 13:38:47 -0400 Subject: [PATCH 20/69] stem the search text in the scoring function --- scripts/scil_search_keywords.py | 51 +++++++++++---------------------- 1 file changed, 17 insertions(+), 34 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 2b1ca561d..c79e52627 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -45,7 +45,7 @@ 'aodf', 'bids', 'bingham', 'btensor', 'bundle', 'connectivity', 'denoising', 'dki', 'dti','dwi', 'fodf', 'freewater', 'frf', 'gradients', 'header', 'json', 'labels', 'lesions', 'mti', 'NODDI', 'sh', 'surface', 'tracking', - 'tractogram', 'viz', 'volume' + 'tractogram', 'viz', 'volume', 'qball', 'rgb', 'lesions' ] def prompt_user_for_object(): @@ -122,7 +122,7 @@ def main(): search_text = search_text or 'No docstring available!' - display_filename = filename + display_filename = filename + '.py' display_short_info, display_long_info = _split_first_sentence( search_text) @@ -149,12 +149,12 @@ def main(): score = _calculate_score(stemmed_keywords, search_text, script_name) if score > 0: - matches.append((script_name, score)) + matches.append(script_name) scores[script_name] = score search_text = search_text or 'No docstring available!' - display_filename = script_name + display_filename = script_name + '.py' display_short_info, display_long_info = _split_first_sentence( search_text) @@ -182,9 +182,11 @@ def main(): matches.append(script_name) scores[script_name] = score + display_filename = script_name + '.py' first_sentence, _ = _split_first_sentence(search_text) + logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}: {first_sentence}") - + logging.info("\n") # If still no matches found, check for synonyms in the synonyms file @@ -204,8 +206,9 @@ def main(): matches.append(filename) scores[filename] = _calculate_score(synonyms, search_text, filename) first_sentence, _ = _split_first_sentence(search_text) + display_filename = filename + '.py' logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{filename}- Score: {score}%{Style.RESET_ALL}: {first_sentence}") + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{filename}{Style.RESET_ALL}: {first_sentence}") logging.info("\n") if not matches: @@ -216,7 +219,8 @@ def main(): sorted_matches = sorted(matches, key=lambda x: scores[x], reverse=True) logging.info(_make_title(' Results Ordered by Score ')) for match in sorted_matches: - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match}{Style.RESET_ALL}: Score = {scores[match]}") + display_filename = match + '.py' + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}: Score = {scores[match]}") # Display full argparser if --full_parser is used if args.full_parser: @@ -227,7 +231,8 @@ def main(): help_file = hidden_dir / f"{filename}.py.help" if help_file.exists(): with open(help_file, 'r') as f: - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{filename}{Style.RESET_ALL}") + display_filename = filename + '.py' + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") logging.info(f.read()) logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info("\n") @@ -319,28 +324,6 @@ def _stem_text(text): words = nltk.word_tokenize(text) return ' '.join([stemmer.stem(word) for word in words]) -def _contains_stemmed_keywords(stemmed_keywords,text, filename): - """ - Check if stemmed keywords are present in the text or filename. - - Parameters - ---------- - stemmed_keywords : list of str - Stemmed keywords to search for. - text : str - Text to search within. - filename : str - Filename to search within. - - Returns - ------- - bool - True if all stemmed keywords are found in the text or filename, False otherwise. - """ - stemmed_text = _stem_text(text) - stemmed_filename = _stem_text(filename) - return all([stem in stemmed_text or stem in stemmed_filename for stem in stemmed_keywords]) - def _generate_help_files(): """ Call the external script generate_help_files to generate help files @@ -414,13 +397,13 @@ def _calculate_score(keywords, text, filename): int Score based on the frequency of keywords in the text and filename. """ - text = text.lower() - filename = filename.lower() + stemmed_text = _stem_text(text.lower()) + stemmed_filename = _stem_text(filename.lower()) score = 0 for keyword in keywords: keyword = keyword.lower() - score += text.count(keyword) - score += filename.count(keyword) + score += stemmed_text.count(keyword) + score += stemmed_filename.count(keyword) return score if __name__ == '__main__': From a05a7a740372b00ad828ea78e5c44e4bdd3f1071 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Mon, 22 Jul 2024 14:27:38 -0400 Subject: [PATCH 21/69] add some new synonyms --- scilpy-bot-scripts/Vocabulary/Synonyms.json | 5 +++++ scripts/scil_search_keywords.py | 2 -- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/scilpy-bot-scripts/Vocabulary/Synonyms.json b/scilpy-bot-scripts/Vocabulary/Synonyms.json index ffad9b9d8..ab34aab8c 100644 --- a/scilpy-bot-scripts/Vocabulary/Synonyms.json +++ b/scilpy-bot-scripts/Vocabulary/Synonyms.json @@ -12,6 +12,11 @@ "multi shell", "msmt" ], + [ + "SH", + "Spherical Harmonics" + ], + [ "single-shell", "single shell", diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index c79e52627..1a38a0fbe 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -27,8 +27,6 @@ nltk.download('punkt', quiet=True) -init(autoreset=True) - RED = '\033[31m' BOLD = '\033[1m' END_COLOR = '\033[0m' From c7aec39a6a3a658777e27f80042457acce8e952b Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Mon, 22 Jul 2024 15:53:08 -0400 Subject: [PATCH 22/69] add stemming to words between double quotes --- scripts/scil_search_keywords.py | 61 ++++++++++++++++++++++++++++++--- 1 file changed, 56 insertions(+), 5 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 1a38a0fbe..5f43a8054 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -22,6 +22,7 @@ from nltk.stem import PorterStemmer from colorama import init, Fore, Style import json +import re from scilpy.io.utils import add_verbose_arg @@ -85,7 +86,9 @@ def main(): logging.getLogger().setLevel(logging.getLevelName(args.verbose)) selected_object = prompt_user_for_object() + keywords, phrases = _extract_keywords_and_phrases(args.keywords) stemmed_keywords = _stem_keywords(args.keywords) + stemmed_phrases = [_stem_phrase(phrase) for phrase in phrases] script_dir = pathlib.Path(__file__).parent hidden_dir = script_dir / '.hidden' @@ -112,7 +115,7 @@ def main(): continue search_text = _get_docstring_from_script_path(str(script)) - score = _calculate_score(stemmed_keywords, search_text, filename=filename) + score = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, filename=filename) if score > 0: matches.append(filename) @@ -144,7 +147,7 @@ def main(): with open(help_file, 'r') as f: search_text = f.read() - score = _calculate_score(stemmed_keywords, search_text, script_name) + score = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, script_name) if score > 0: matches.append(script_name) @@ -174,7 +177,7 @@ def main(): if not script_name.startswith(f'scil_{selected_object}_'): continue script_keywords = script['keywords'] - score = _calculate_score(stemmed_keywords, ' '.join(script_keywords), script_name) + score = _calculate_score(stemmed_keywords, stemmed_phrases,' '.join(script_keywords), script_name) if score > 0: matches.append(script_name) @@ -202,7 +205,7 @@ def main(): search_text = _get_docstring_from_script_path(str(script)) if any(synonym in search_text for synonym in synonyms): matches.append(filename) - scores[filename] = _calculate_score(synonyms, search_text, filename) + scores[filename] = _calculate_score(synonyms,[], search_text, filename) first_sentence, _ = _split_first_sentence(search_text) display_filename = filename + '.py' logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") @@ -322,6 +325,23 @@ def _stem_text(text): words = nltk.word_tokenize(text) return ' '.join([stemmer.stem(word) for word in words]) +def _stem_phrase(phrase): + """ + Stem all words in a phrase using PorterStemmer. + + Parameters + ---------- + phrase : str + Phrase to be stemmed. + + Returns + ------- + str + Stemmed phrase. + """ + words = phrase.split() + return ' '.join([stemmer.stem(word) for word in words]) + def _generate_help_files(): """ Call the external script generate_help_files to generate help files @@ -377,7 +397,31 @@ def _get_synonyms(keyword, synonyms_data): return synonym_set return [] -def _calculate_score(keywords, text, filename): +def _extract_keywords_and_phrases(keywords): + """ + Extract keywords and phrases from the provided list. + + Parameters + ---------- + keywords : list of str + List of keywords and phrases. + + Returns + ------- + list of str, list of str + List of individual keywords and list of phrases. + """ + keywords_list = [] + phrases_list = [] + phrase_pattern = re.compile(r'\"(.+?)\"') + for keyword in keywords: + if phrase_pattern.match(keyword): + phrases_list.append(keyword.strip('"')) + else: + keywords_list.append(keyword) + return keywords_list, phrases_list + +def _calculate_score(keywords, phrases, text, filename): """ Calculate a score for how well the text and filename match the keywords. @@ -385,6 +429,8 @@ def _calculate_score(keywords, text, filename): ---------- keywords : list of str Keywords to search for. + phrases : list of str + Phrases to search for. text : str Text to search within. filename : str @@ -402,6 +448,11 @@ def _calculate_score(keywords, text, filename): keyword = keyword.lower() score += stemmed_text.count(keyword) score += stemmed_filename.count(keyword) + for phrase in phrases: + phrase_words = phrase.split() + for i in range(len(stemmed_text) - len(phrase_words) + 1): + if stemmed_text[i:i+len(phrase_words)] == phrase_words: + score += 1 return score if __name__ == '__main__': From 1836ac861a788445761301fd126b0bd136fea2dd Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Tue, 23 Jul 2024 01:33:09 -0400 Subject: [PATCH 23/69] modify score and add search_category argument --- scripts/scil_search_keywords.py | 290 +++++--------------------------- 1 file changed, 41 insertions(+), 249 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 5f43a8054..b926ab80a 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -14,32 +14,23 @@ """ import argparse -import ast import logging import pathlib -import subprocess import nltk -from nltk.stem import PorterStemmer from colorama import init, Fore, Style import json -import re +from scilpy.utils.scilpy_bot import ( + _get_docstring_from_script_path, _split_first_sentence, _stem_keywords, + _stem_text, _stem_phrase, _generate_help_files, _highlight_keywords, + _get_synonyms, _extract_keywords_and_phrases, _calculate_score, _make_title +) + +from scilpy.utils.scilpy_bot import SPACING_LEN, KEYWORDS_FILE_PATH, SYNONYMS_FILE_PATH from scilpy.io.utils import add_verbose_arg nltk.download('punkt', quiet=True) -RED = '\033[31m' -BOLD = '\033[1m' -END_COLOR = '\033[0m' -SPACING_CHAR = '=' -SPACING_LEN = 80 - -stemmer = PorterStemmer() - -# Path to the JSON file containing script information and keywords -KEYWORDS_FILE_PATH = pathlib.Path(__file__).parent.parent / 'scilpy-bot-scripts'/'Vocabulary'/'Keywords.json' -SYNONYMS_FILE_PATH = pathlib.Path(__file__).parent.parent / 'scilpy-bot-scripts'/'Vocabulary'/'Synonyms.json' - OBJECTS = [ 'aodf', 'bids', 'bingham', 'btensor', 'bundle', 'connectivity', 'denoising', 'dki', 'dti','dwi', 'fodf', 'freewater', 'frf', 'gradients', 'header', 'json', @@ -71,6 +62,9 @@ def _build_arg_parser(): p.add_argument('--full_parser', action='store_true', help='Display the full script argparser help.') + + p.add_argument('--search_category', action='store_true', + help='Search within a specific category of scripts.') add_verbose_arg(p) @@ -85,7 +79,10 @@ def main(): else: logging.getLogger().setLevel(logging.getLevelName(args.verbose)) - selected_object = prompt_user_for_object() + selected_object = None + if args.search_category: + selected_object = prompt_user_for_object() + keywords, phrases = _extract_keywords_and_phrases(args.keywords) stemmed_keywords = _stem_keywords(args.keywords) stemmed_phrases = [_stem_phrase(phrase) for phrase in phrases] @@ -105,21 +102,23 @@ def main(): matches = [] scores = {} + # Determine the pattern to search for + search_pattern = f'scil_{"{}_" if selected_object else ""}*.py' # Search through the docstring logging.info(f"Searching through docstrings for '{selected_object}' scripts...") - for script in sorted(script_dir.glob('scil_{}_*.py'.format(selected_object))): + for script in sorted(script_dir.glob(search_pattern.format(selected_object))): #Remove the .py extension filename = script.stem if filename == '__init__' or filename =='scil_search_keywords': continue search_text = _get_docstring_from_script_path(str(script)) - score = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, filename=filename) + score_details = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, filename=filename) - if score > 0: + if score_details['total_score'] > 0: matches.append(filename) - scores[filename] = score + scores[filename] = score_details search_text = search_text or 'No docstring available!' @@ -135,6 +134,10 @@ def main(): logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") logging.info(display_short_info) logging.debug(display_long_info) + for word, score in score_details.items(): + if word != 'total_score': + logging.info(f"{Fore.GREEN}Occurence of '{word}': {score}{Style.RESET_ALL}") + logging.info(f"Total Score: {score_details['total_score']}") logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info("\n") @@ -142,16 +145,16 @@ def main(): if not matches: logging.info(f"No matches found in docstrings. Searching through help files for '{selected_object}' scripts...") - for help_file in sorted(hidden_dir.glob('scil_{}_*.py'.format(selected_object))): #Use precomputed help files + for help_file in sorted(hidden_dir.glob(search_pattern.format(selected_object))): #Use precomputed help files script_name = pathlib.Path(help_file.stem).stem with open(help_file, 'r') as f: search_text = f.read() - score = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, script_name) + score_details = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, script_name) - if score > 0: + if score_details['total_score'] > 0: matches.append(script_name) - scores[script_name] = score + scores[script_name] = score_details search_text = search_text or 'No docstring available!' @@ -163,6 +166,10 @@ def main(): logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") logging.info(display_short_info) logging.debug(display_long_info) + for word, score in score_details.items(): + if word != 'total_score': + logging.info(f"{Fore.GREEN}Occurence of '{word}': {score}{Style.RESET_ALL}") + logging.info(f"Total Score: {score_details['total_score']}") logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info("\n") @@ -174,14 +181,14 @@ def main(): logging.info("No matches found in help files. Searching by script keywords...") for script in keywords_data['scripts']: script_name = script['name'] - if not script_name.startswith(f'scil_{selected_object}_'): + if selected_object and not script_name.startswith(f'scil_{selected_object}_'): continue script_keywords = script['keywords'] - score = _calculate_score(stemmed_keywords, stemmed_phrases,' '.join(script_keywords), script_name) + score_details = _calculate_score(stemmed_keywords, stemmed_phrases,' '.join(script_keywords), script_name) - if score > 0: + if score_details['total_score'] > 0: matches.append(script_name) - scores[script_name] = score + scores[script_name] = score_details display_filename = script_name + '.py' first_sentence, _ = _split_first_sentence(search_text) @@ -198,7 +205,7 @@ def main(): logging.info("No matches found by script keywords. Searching by synonyms...") for keyword in args.keywords: synonyms = _get_synonyms(keyword, synonyms_data) - for script in sorted(script_dir.glob('scil_{}_*.py'.format(selected_object))): + for script in sorted(script_dir.glob(search_pattern.format(selected_object))): filename = script.stem if filename == '__init__' or filename == 'scil_search_keywords': continue @@ -215,13 +222,13 @@ def main(): if not matches: logging.info(_make_title(' No results found! ')) - # Sort matches by score and print them + """# Sort matches by score and print them else: - sorted_matches = sorted(matches, key=lambda x: scores[x], reverse=True) + sorted_matches = sorted(matches, key=lambda x: scores[x]['total_score'], reverse=True) logging.info(_make_title(' Results Ordered by Score ')) for match in sorted_matches: display_filename = match + '.py' - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}: Score = {scores[match]}") + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}: Score = {scores[match]['total_score']}")""" # Display full argparser if --full_parser is used if args.full_parser: @@ -238,222 +245,7 @@ def main(): logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info("\n") -def _make_title(text): - return f'{Fore.BLUE}{Style.BRIGHT}{text.center(SPACING_LEN, SPACING_CHAR)}{Style.RESET_ALL}' - - -def _get_docstring_from_script_path(script): - """Extract a python file's docstring from a filepath. - - Parameters - ---------- - script : str - Path to python file - - Returns - ------- - docstring : str - The file docstring, or an empty string if there was no docstring. - """ - with open(script, 'r') as reader: - file_contents = reader.read() - module = ast.parse(file_contents) - docstring = ast.get_docstring(module) or '' - return docstring - - -def _split_first_sentence(text): - """Split the first sentence from the rest of a string by finding the first - dot or newline. If there is no dot or newline, return the full string as - the first sentence, and None as the remaining text. - - Parameters - ---------- - text : str - Text to parse. - - Returns - ------- - first_sentence : str - The first sentence, or the full text if no dot or newline was found. - remaining : str - Everything after the first sentence. - - """ - candidates = ['. ', '.\n'] - sentence_idx = -1 - for candidate in candidates: - idx = text.find(candidate) - if idx != -1 and idx < sentence_idx or sentence_idx == -1: - sentence_idx = idx - - split_idx = (sentence_idx + 1) or None - sentence = text[:split_idx] - remaining = text[split_idx:] if split_idx else "" - return sentence, remaining - -def _stem_keywords(keywords): - """ - Stem a list of keywords using PorterStemmer. - - Parameters - ---------- - keywords : list of str - Keywords to be stemmed. - - Returns - ------- - list of str - Stemmed keywords. - """ - return [stemmer.stem(keyword) for keyword in keywords] - -def _stem_text(text): - """ - Stem all words in a text using PorterStemmer. - - Parameters - ---------- - text : str - Text to be stemmed. - - Returns - ------- - str - Stemmed text. - """ - words = nltk.word_tokenize(text) - return ' '.join([stemmer.stem(word) for word in words]) - -def _stem_phrase(phrase): - """ - Stem all words in a phrase using PorterStemmer. - - Parameters - ---------- - phrase : str - Phrase to be stemmed. - - Returns - ------- - str - Stemmed phrase. - """ - words = phrase.split() - return ' '.join([stemmer.stem(word) for word in words]) - -def _generate_help_files(): - """ - Call the external script generate_help_files to generate help files - """ - script_path = pathlib.Path(__file__).parent.parent / 'scilpy-bot-scripts'/'generate_help_files.py' - #calling the extrernal script generate_help_files - subprocess.run(['python', script_path], check=True) - -def _highlight_keywords(text, stemmed_keywords): - """ - Highlight the stemmed keywords in the given text using colorama. - - Parameters - ---------- - text : str - Text to highlight keywords in. - stemmed_keywords : list of str - Stemmed keywords to highlight. - - Returns - ------- - str - Text with highlighted keywords. - """ - words = text.split() - highlighted_text = [] - for word in words: - stemmed_word = stemmer.stem(word) - if stemmed_word in stemmed_keywords: - highlighted_text.append(f'{Fore.RED}{Style.BRIGHT}{word}{Style.RESET_ALL}') - else: - highlighted_text.append(word) - return ' '.join(highlighted_text) - -def _get_synonyms(keyword, synonyms_data): - """ - Get synonyms for a given keyword from the synonyms data. - - Parameters - ---------- - keyword : str - Keyword to find synonyms for. - synonyms_data : dict - Dictionary containing synonyms data. - - Returns - ------- - list of str - List of synonyms for the given keyword. - """ - for synonym_set in synonyms_data['synonyms']: - if keyword in synonym_set: - return synonym_set - return [] - -def _extract_keywords_and_phrases(keywords): - """ - Extract keywords and phrases from the provided list. - - Parameters - ---------- - keywords : list of str - List of keywords and phrases. - - Returns - ------- - list of str, list of str - List of individual keywords and list of phrases. - """ - keywords_list = [] - phrases_list = [] - phrase_pattern = re.compile(r'\"(.+?)\"') - for keyword in keywords: - if phrase_pattern.match(keyword): - phrases_list.append(keyword.strip('"')) - else: - keywords_list.append(keyword) - return keywords_list, phrases_list - -def _calculate_score(keywords, phrases, text, filename): - """ - Calculate a score for how well the text and filename match the keywords. - - Parameters - ---------- - keywords : list of str - Keywords to search for. - phrases : list of str - Phrases to search for. - text : str - Text to search within. - filename : str - Filename to search within. - - Returns - ------- - int - Score based on the frequency of keywords in the text and filename. - """ - stemmed_text = _stem_text(text.lower()) - stemmed_filename = _stem_text(filename.lower()) - score = 0 - for keyword in keywords: - keyword = keyword.lower() - score += stemmed_text.count(keyword) - score += stemmed_filename.count(keyword) - for phrase in phrases: - phrase_words = phrase.split() - for i in range(len(stemmed_text) - len(phrase_words) + 1): - if stemmed_text[i:i+len(phrase_words)] == phrase_words: - score += 1 - return score + if __name__ == '__main__': main() \ No newline at end of file From f22398abe5f24bb27f4252eabfd9e09e5cdb10be Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Tue, 23 Jul 2024 02:01:56 -0400 Subject: [PATCH 24/69] add no_synonyms argument --- scripts/scil_search_keywords.py | 163 ++++++++++---------------------- 1 file changed, 52 insertions(+), 111 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index b926ab80a..d7a82f74d 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -65,6 +65,10 @@ def _build_arg_parser(): p.add_argument('--search_category', action='store_true', help='Search within a specific category of scripts.') + + p.add_argument('--no_synonyms', action='store_true', + help='Search without using synonyms.') + add_verbose_arg(p) @@ -102,107 +106,58 @@ def main(): matches = [] scores = {} - # Determine the pattern to search for + # pattern to search for search_pattern = f'scil_{"{}_" if selected_object else ""}*.py' - # Search through the docstring - logging.info(f"Searching through docstrings for '{selected_object}' scripts...") + def update_matches_and_scores(filename, search_text, score_details): + if score_details['total_score'] > 0: + if filename not in matches: + matches.append(filename) + scores[filename] = score_details + else: + for key, value in score_details.items(): + if key != 'total_score': + scores[filename][key] = scores[filename].get(key, 0) + value + scores[filename]['total_score'] += score_details['total_score'] + for script in sorted(script_dir.glob(search_pattern.format(selected_object))): - #Remove the .py extension filename = script.stem if filename == '__init__' or filename =='scil_search_keywords': continue - + + # Search through the docstring search_text = _get_docstring_from_script_path(str(script)) score_details = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, filename=filename) + update_matches_and_scores(filename, search_text, score_details) + - if score_details['total_score'] > 0: - matches.append(filename) - scores[filename] = score_details - - search_text = search_text or 'No docstring available!' - - display_filename = filename + '.py' - display_short_info, display_long_info = _split_first_sentence( - search_text) - - # Highlight found keywords using colorama - display_short_info = _highlight_keywords(display_short_info, stemmed_keywords) - display_long_info = _highlight_keywords(display_long_info, stemmed_keywords) - - # Print everything - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") - logging.info(display_short_info) - logging.debug(display_long_info) - for word, score in score_details.items(): - if word != 'total_score': - logging.info(f"{Fore.GREEN}Occurence of '{word}': {score}{Style.RESET_ALL}") - logging.info(f"Total Score: {score_details['total_score']}") - logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") - logging.info("\n") - - # If no matches found in docstrings, check in the help files - - if not matches: - logging.info(f"No matches found in docstrings. Searching through help files for '{selected_object}' scripts...") - for help_file in sorted(hidden_dir.glob(search_pattern.format(selected_object))): #Use precomputed help files - script_name = pathlib.Path(help_file.stem).stem + # Search in help files + help_file = hidden_dir / f"{filename}.py.help" + if help_file.exists(): with open(help_file, 'r') as f: search_text = f.read() + score_details = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, filename=filename) + update_matches_and_scores(filename, search_text, score_details) - score_details = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, script_name) - - if score_details['total_score'] > 0: - matches.append(script_name) - scores[script_name] = score_details - - search_text = search_text or 'No docstring available!' - - display_filename = script_name + '.py' - display_short_info, display_long_info = _split_first_sentence( - search_text) - - # Print everything - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") - logging.info(display_short_info) - logging.debug(display_long_info) - for word, score in score_details.items(): - if word != 'total_score': - logging.info(f"{Fore.GREEN}Occurence of '{word}': {score}{Style.RESET_ALL}") - logging.info(f"Total Score: {score_details['total_score']}") - logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") - logging.info("\n") - - # If no matches found, check in the keywords file + # Search in keywords file with open(KEYWORDS_FILE_PATH, 'r') as f: keywords_data = json.load(f) - if not matches: - logging.info("No matches found in help files. Searching by script keywords...") - for script in keywords_data['scripts']: - script_name = script['name'] - if selected_object and not script_name.startswith(f'scil_{selected_object}_'): - continue - script_keywords = script['keywords'] - score_details = _calculate_score(stemmed_keywords, stemmed_phrases,' '.join(script_keywords), script_name) - - if score_details['total_score'] > 0: - matches.append(script_name) - scores[script_name] = score_details - - display_filename = script_name + '.py' - first_sentence, _ = _split_first_sentence(search_text) - logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}: {first_sentence}") - logging.info("\n") - - - # If still no matches found, check for synonyms in the synonyms file - with open(SYNONYMS_FILE_PATH, 'r') as f: - synonyms_data = json.load(f) - - if not matches: - logging.info("No matches found by script keywords. Searching by synonyms...") + for script in keywords_data['scripts']: + script_name = script['name'] + if selected_object and not script_name.startswith(f'scil_{selected_object}_'): + continue + script_keywords = script['keywords'] + search_text = ' '.join(script_keywords) + score_details = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, script_name) + update_matches_and_scores(script_name, search_text, score_details) + + + # Search in synonyms file if not args.no_synonyms is not specified: + if not args.no_synonyms: + with open(SYNONYMS_FILE_PATH, 'r') as f: + synonyms_data = json.load(f) + for keyword in args.keywords: synonyms = _get_synonyms(keyword, synonyms_data) for script in sorted(script_dir.glob(search_pattern.format(selected_object))): @@ -211,39 +166,25 @@ def main(): continue search_text = _get_docstring_from_script_path(str(script)) if any(synonym in search_text for synonym in synonyms): - matches.append(filename) - scores[filename] = _calculate_score(synonyms,[], search_text, filename) - first_sentence, _ = _split_first_sentence(search_text) - display_filename = filename + '.py' - logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{filename}{Style.RESET_ALL}: {first_sentence}") - logging.info("\n") + score_details = _calculate_score(synonyms, [], search_text, filename) + update_matches_and_scores(filename, search_text, score_details) if not matches: logging.info(_make_title(' No results found! ')) - """# Sort matches by score and print them + # Sort matches by score and print them else: sorted_matches = sorted(matches, key=lambda x: scores[x]['total_score'], reverse=True) logging.info(_make_title(' Results Ordered by Score ')) for match in sorted_matches: - display_filename = match + '.py' - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}: Score = {scores[match]['total_score']}")""" - - # Display full argparser if --full_parser is used - if args.full_parser: - for script in sorted(script_dir.glob('scil_{}_*.py'.format(selected_object))): - filename = script.stem - if filename == '__init__' or filename == 'scil_search_keywords': - continue - help_file = hidden_dir / f"{filename}.py.help" - if help_file.exists(): - with open(help_file, 'r') as f: - display_filename = filename + '.py' - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{display_filename}{Style.RESET_ALL}") - logging.info(f.read()) - logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") - logging.info("\n") + #display_filename = match + '.py' + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match}{Style.RESET_ALL}") + for word, score in scores[match].items(): + if word != 'total_score': + logging.info(f"{Fore.GREEN}Occurrence of '{word}': {score}{Style.RESET_ALL}") + logging.info(f"Total Score: {scores[match]['total_score']}") + logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") + logging.info("\n") From dd5bd73d66d4771ab835248c82a22d18c005a8f4 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Wed, 24 Jul 2024 02:01:37 -0400 Subject: [PATCH 25/69] deal with quoted words and add docstring to functions --- .../utils}/generate_help_files.py | 0 scilpy/utils/scilpy_bot.py | 275 ++++++++++++++++++ scripts/scil_search_keywords.py | 72 ++--- 3 files changed, 306 insertions(+), 41 deletions(-) rename {scilpy-bot-scripts => scilpy/utils}/generate_help_files.py (100%) create mode 100644 scilpy/utils/scilpy_bot.py diff --git a/scilpy-bot-scripts/generate_help_files.py b/scilpy/utils/generate_help_files.py similarity index 100% rename from scilpy-bot-scripts/generate_help_files.py rename to scilpy/utils/generate_help_files.py diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py new file mode 100644 index 000000000..a77f65e69 --- /dev/null +++ b/scilpy/utils/scilpy_bot.py @@ -0,0 +1,275 @@ + +import re +import json +import ast +import nltk +import pathlib +import subprocess +from nltk.stem import PorterStemmer +from colorama import init, Fore, Style + +stemmer = PorterStemmer() + +RED = '\033[31m' +BOLD = '\033[1m' +END_COLOR = '\033[0m' +SPACING_CHAR = '=' +SPACING_LEN = 80 + +# Path to the JSON file containing script information and keywords +KEYWORDS_FILE_PATH = pathlib.Path(__file__).parent.parent.parent / 'scilpy-bot-scripts'/'Vocabulary'/'Keywords.json' +SYNONYMS_FILE_PATH = pathlib.Path(__file__).parent.parent.parent / 'scilpy-bot-scripts'/'Vocabulary'/'Synonyms.json' + + + + +OBJECTS = [ + 'aodf', 'bids', 'bingham', 'btensor', 'bundle', 'connectivity', 'denoising', + 'dki', 'dti','dwi', 'fodf', 'freewater', 'frf', 'gradients', 'header', 'json', + 'labels', 'lesions', 'mti', 'NODDI', 'sh', 'surface', 'tracking', + 'tractogram', 'viz', 'volume', 'qball', 'rgb', 'lesions' +] + +def prompt_user_for_object(): + """ + Prompts the user to select an object from the list of available objects. + """ + print("Available objects:") + for idx, obj in enumerate(OBJECTS): + print(f"{idx + 1}. {obj}") + while True: + try: + choice = int(input("Choose the object you want to work on (enter the number): ")) + if 1 <= choice <= len(OBJECTS): + return OBJECTS[choice - 1] + else: + print(f"Please enter a number between 1 and {len(OBJECTS)}.") + except ValueError: + print("Invalid input. Please enter a number.") + + +def _make_title(text): + """ + Returns a formatted title string with centered text and spacing + """ + return f'{Fore.BLUE}{Style.BRIGHT}{text.center(80, "=")}{Style.RESET_ALL}' + + +def _get_docstring_from_script_path(script): + """ + Extract a python file's docstring from a filepath. + + Parameters + ---------- + script : str + Path to python file + + Returns + ------- + docstring : str + The file's docstring, or an empty string if there was no docstring. + """ + with open(script, 'r') as reader: + file_contents = reader.read() + module = ast.parse(file_contents) + docstring = ast.get_docstring(module) or '' + return docstring + + +def _split_first_sentence(text): + """ + Split the first sentence from the rest of a string by finding the first + dot or newline. If there is no dot or newline, return the full string as + the first sentence, and None as the remaining text. + + Parameters + ---------- + text : str + Text to parse. + + Returns + ------- + first_sentence : str + The first sentence, or the full text if no dot or newline was found. + remaining : str + Everything after the first sentence. + + """ + candidates = ['. ', '.\n'] + sentence_idx = -1 + for candidate in candidates: + idx = text.find(candidate) + if idx != -1 and idx < sentence_idx or sentence_idx == -1: + sentence_idx = idx + + split_idx = (sentence_idx + 1) or None + sentence = text[:split_idx] + remaining = text[split_idx:] if split_idx else "" + return sentence, remaining + +def _stem_keywords(keywords): + """ + Stem a list of keywords using PorterStemmer. + + Parameters + ---------- + keywords : list of str + Keywords to be stemmed. + + Returns + ------- + list of str + Stemmed keywords. + """ + return [stemmer.stem(keyword) for keyword in keywords] + +def _stem_text(text): + """ + Stem all words in a text using PorterStemmer. + + Parameters + ---------- + text : str + Text to be stemmed. + + Returns + ------- + str + Stemmed text. + """ + words = nltk.word_tokenize(text) + return ' '.join([stemmer.stem(word) for word in words]) + +def _stem_phrase(phrase): + """ + Stem all words in a phrase using PorterStemmer. + + Parameters + ---------- + phrase : str + Phrase to be stemmed. + + Returns + ------- + str + Stemmed phrase. + """ + words = phrase.split() + return ' '.join([stemmer.stem(word) for word in words]) + +def _generate_help_files(): + """ + Call the external script generate_help_files to generate help files + """ + script_path = pathlib.Path(__file__).parent /'generate_help_files.py' + #calling the extrernal script generate_help_files + subprocess.run(['python', script_path], check=True) + +def _highlight_keywords(text, stemmed_keywords): + """ + Highlight the stemmed keywords in the given text using colorama. + + Parameters + ---------- + text : str + Text to highlight keywords in. + stemmed_keywords : list of str + Stemmed keywords to highlight. + + Returns + ------- + str + Text with highlighted keywords. + """ + words = text.split() + highlighted_text = [] + for word in words: + stemmed_word = stemmer.stem(word) + if stemmed_word in stemmed_keywords: + highlighted_text.append(f'{Fore.RED}{Style.BRIGHT}{word}{Style.RESET_ALL}') + else: + highlighted_text.append(word) + return ' '.join(highlighted_text) + +def _get_synonyms(keyword, synonyms_data): + """ + Get synonyms for a given keyword from the synonyms data. + + Parameters + ---------- + keyword : str + Keyword to find synonyms for. + synonyms_data : dict + Dictionary containing synonyms data. + + Returns + ------- + list of str + List of synonyms for the given keyword. + """ + for synonym_set in synonyms_data['synonyms']: + if keyword in synonym_set: + return synonym_set + return [] + +def _extract_keywords_and_phrases(keywords): + """ + Extract keywords and phrases from the provided list. + + Parameters + ---------- + keywords : list of str + List of keywords and phrases. + + Returns + ------- + list of str, list of str + List of individual keywords and list of phrases. + """ + keywords_list = [] + phrases_list = [] + + for keyword in keywords: + if ' ' in keyword: #if keyword contain blank space (contains more that 1 word) + phrases_list.append(keyword) + else: + keywords_list.append(keyword) + return keywords_list, phrases_list + +def _calculate_score(keywords, phrases, text, filename): + """ + Calculate a score for how well the text and filename match the keywords. + + Parameters + ---------- + keywords : list of str + Keywords to search for. + phrases : list of str + Phrases to search for. + text : str + Text to search within. + filename : str + Filename to search within. + + Returns + ------- + dict + Score details based on the frequency of keywords in the text and filename. + """ + stemmed_text = _stem_text(text.lower()) + stemmed_filename = _stem_text(filename.lower()) + score_details = {'total_score': 0} + + for keyword in keywords: + keyword = keyword.lower() + keyword_score = stemmed_text.count(keyword) + stemmed_filename.count(keyword) + score_details[keyword] = keyword_score + score_details['total_score'] += keyword_score + + for phrase in phrases: + phrase_stemmed = _stem_text(phrase.lower()) + phrase_score = stemmed_text.count(phrase_stemmed) + score_details[phrase] = phrase_score + score_details['total_score'] += phrase_score + return score_details + diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index d7a82f74d..a42d19faf 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -2,15 +2,23 @@ # -*- coding: utf-8 -*- """ -Search through all of SCILPY scripts and their docstrings. The output of the -search will be the intersection of all provided keywords, found either in the -script name or in its docstring. -By default, print the matching filenames and the first sentence of the -docstring. If --verbose if provided, print the full docstring. +Search through all SCILPY scripts and their docstrings to find matches for the provided keywords. +The search will be performed across script names, docstrings, help files, keywords, and optionally synonyms. +The output will list the matching filenames along with the occurrences of each keyword, and their total score. + +- By default, the search includes synonyms for the keywords. +- Use --no_synonyms to exclude synonyms from the search. +- Use --search_category to limit the search to a specific category of scripts. +- Use --verbose to display the full docstring. +- Words enclosed in quotes will be searched as phrases, ensuring the words appear next to each other in the text. + Examples: scil_search_keywords.py tractogram filtering scil_search_keywords.py --search_parser tractogram filtering -v + scil_search_keywords.py "Spherical Harmonics" convert + scil_search_keywords.py --no_synonyms tractogram filtering + scil_search_keywords.py --search_category --verbose tractogram filtering """ import argparse @@ -21,9 +29,8 @@ import json from scilpy.utils.scilpy_bot import ( - _get_docstring_from_script_path, _split_first_sentence, _stem_keywords, - _stem_text, _stem_phrase, _generate_help_files, _highlight_keywords, - _get_synonyms, _extract_keywords_and_phrases, _calculate_score, _make_title + _get_docstring_from_script_path, _split_first_sentence, _stem_keywords, _stem_phrase, _generate_help_files, + _get_synonyms, _extract_keywords_and_phrases, _calculate_score, _make_title, prompt_user_for_object ) from scilpy.utils.scilpy_bot import SPACING_LEN, KEYWORDS_FILE_PATH, SYNONYMS_FILE_PATH @@ -31,26 +38,6 @@ nltk.download('punkt', quiet=True) -OBJECTS = [ - 'aodf', 'bids', 'bingham', 'btensor', 'bundle', 'connectivity', 'denoising', - 'dki', 'dti','dwi', 'fodf', 'freewater', 'frf', 'gradients', 'header', 'json', - 'labels', 'lesions', 'mti', 'NODDI', 'sh', 'surface', 'tracking', - 'tractogram', 'viz', 'volume', 'qball', 'rgb', 'lesions' -] - -def prompt_user_for_object(): - print("Available objects:") - for idx, obj in enumerate(OBJECTS): - print(f"{idx + 1}. {obj}") - while True: - try: - choice = int(input("Choose the object you want to work on (enter the number): ")) - if 1 <= choice <= len(OBJECTS): - return OBJECTS[choice - 1] - else: - print(f"Please enter a number between 1 and {len(OBJECTS)}.") - except ValueError: - print("Invalid input. Please enter a number.") def _build_arg_parser(): p = argparse.ArgumentParser(description=__doc__, @@ -59,9 +46,6 @@ def _build_arg_parser(): # help='Choose the object you want to work on.' ) p.add_argument('keywords', nargs='+', help='Search the provided list of keywords.') - - p.add_argument('--full_parser', action='store_true', - help='Display the full script argparser help.') p.add_argument('--search_category', action='store_true', help='Search within a specific category of scripts.') @@ -88,7 +72,7 @@ def main(): selected_object = prompt_user_for_object() keywords, phrases = _extract_keywords_and_phrases(args.keywords) - stemmed_keywords = _stem_keywords(args.keywords) + stemmed_keywords = _stem_keywords(keywords) stemmed_phrases = [_stem_phrase(phrase) for phrase in phrases] script_dir = pathlib.Path(__file__).parent @@ -109,7 +93,7 @@ def main(): # pattern to search for search_pattern = f'scil_{"{}_" if selected_object else ""}*.py' - def update_matches_and_scores(filename, search_text, score_details): + def update_matches_and_scores(filename, score_details): if score_details['total_score'] > 0: if filename not in matches: matches.append(filename) @@ -128,7 +112,7 @@ def update_matches_and_scores(filename, search_text, score_details): # Search through the docstring search_text = _get_docstring_from_script_path(str(script)) score_details = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, filename=filename) - update_matches_and_scores(filename, search_text, score_details) + update_matches_and_scores(filename, score_details) # Search in help files @@ -137,7 +121,7 @@ def update_matches_and_scores(filename, search_text, score_details): with open(help_file, 'r') as f: search_text = f.read() score_details = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, filename=filename) - update_matches_and_scores(filename, search_text, score_details) + update_matches_and_scores(filename, score_details) # Search in keywords file with open(KEYWORDS_FILE_PATH, 'r') as f: @@ -150,24 +134,30 @@ def update_matches_and_scores(filename, search_text, score_details): script_keywords = script['keywords'] search_text = ' '.join(script_keywords) score_details = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, script_name) - update_matches_and_scores(script_name, search_text, score_details) + update_matches_and_scores(script_name, score_details) - # Search in synonyms file if not args.no_synonyms is not specified: + # Search in synonyms file if not args.no_synonyms is not specified if not args.no_synonyms: with open(SYNONYMS_FILE_PATH, 'r') as f: synonyms_data = json.load(f) - for keyword in args.keywords: + for keyword in keywords + phrases: synonyms = _get_synonyms(keyword, synonyms_data) for script in sorted(script_dir.glob(search_pattern.format(selected_object))): filename = script.stem if filename == '__init__' or filename == 'scil_search_keywords': continue search_text = _get_docstring_from_script_path(str(script)) - if any(synonym in search_text for synonym in synonyms): - score_details = _calculate_score(synonyms, [], search_text, filename) - update_matches_and_scores(filename, search_text, score_details) + synonym_score = 0 + for synonym in synonyms: + if synonym in search_text: + synonym_score += search_text.count(synonym) + if synonym_score > 0: + if filename not in scores: + scores[filename] = {'total_score': 0} + scores[filename][keyword] = scores[filename].get(keyword, 0) + synonym_score + scores[filename]['total_score'] += synonym_score if not matches: logging.info(_make_title(' No results found! ')) From 52c0edbde1abfca7586b45219ea29e41fcf893d9 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Wed, 24 Jul 2024 02:03:47 -0400 Subject: [PATCH 26/69] change vocabulary path --- {scilpy-bot-scripts => scilpy/utils}/Vocabulary/Keywords.json | 0 {scilpy-bot-scripts => scilpy/utils}/Vocabulary/Synonyms.json | 0 {scilpy-bot-scripts => scilpy/utils}/Vocabulary/acronyms.json | 0 scilpy/utils/scilpy_bot.py | 4 ++-- 4 files changed, 2 insertions(+), 2 deletions(-) rename {scilpy-bot-scripts => scilpy/utils}/Vocabulary/Keywords.json (100%) rename {scilpy-bot-scripts => scilpy/utils}/Vocabulary/Synonyms.json (100%) rename {scilpy-bot-scripts => scilpy/utils}/Vocabulary/acronyms.json (100%) diff --git a/scilpy-bot-scripts/Vocabulary/Keywords.json b/scilpy/utils/Vocabulary/Keywords.json similarity index 100% rename from scilpy-bot-scripts/Vocabulary/Keywords.json rename to scilpy/utils/Vocabulary/Keywords.json diff --git a/scilpy-bot-scripts/Vocabulary/Synonyms.json b/scilpy/utils/Vocabulary/Synonyms.json similarity index 100% rename from scilpy-bot-scripts/Vocabulary/Synonyms.json rename to scilpy/utils/Vocabulary/Synonyms.json diff --git a/scilpy-bot-scripts/Vocabulary/acronyms.json b/scilpy/utils/Vocabulary/acronyms.json similarity index 100% rename from scilpy-bot-scripts/Vocabulary/acronyms.json rename to scilpy/utils/Vocabulary/acronyms.json diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index a77f65e69..a9cddb2d0 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -17,8 +17,8 @@ SPACING_LEN = 80 # Path to the JSON file containing script information and keywords -KEYWORDS_FILE_PATH = pathlib.Path(__file__).parent.parent.parent / 'scilpy-bot-scripts'/'Vocabulary'/'Keywords.json' -SYNONYMS_FILE_PATH = pathlib.Path(__file__).parent.parent.parent / 'scilpy-bot-scripts'/'Vocabulary'/'Synonyms.json' +KEYWORDS_FILE_PATH = pathlib.Path(__file__).parent /'Vocabulary'/'Keywords.json' +SYNONYMS_FILE_PATH = pathlib.Path(__file__).parent /'Vocabulary'/'Synonyms.json' From cc82882b8b0cfc9c232a040791e1bdfeeb6c3472 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Wed, 24 Jul 2024 13:14:12 -0400 Subject: [PATCH 27/69] count synonyms occurence in the score --- scripts/scil_search_keywords.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index a42d19faf..3f388aa2b 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -75,6 +75,10 @@ def main(): stemmed_keywords = _stem_keywords(keywords) stemmed_phrases = [_stem_phrase(phrase) for phrase in phrases] + # Create a mapping of stemmed to original keywords(will be needed to display the occurence of the keywords) + keyword_mapping = {stem: orig for orig, stem in zip(keywords, stemmed_keywords)} + phrase_mapping = {stem: orig for orig, stem in zip(phrases, stemmed_phrases)} + script_dir = pathlib.Path(__file__).parent hidden_dir = script_dir / '.hidden' @@ -141,9 +145,14 @@ def update_matches_and_scores(filename, score_details): if not args.no_synonyms: with open(SYNONYMS_FILE_PATH, 'r') as f: synonyms_data = json.load(f) - - for keyword in keywords + phrases: + + # Create a mapping of synonyms to their original keywords + synonym_to_keyword = {} + for keyword in args.keywords: synonyms = _get_synonyms(keyword, synonyms_data) + for synonym in synonyms: + synonym_to_keyword[synonym] = keyword + for script in sorted(script_dir.glob(search_pattern.format(selected_object))): filename = script.stem if filename == '__init__' or filename == 'scil_search_keywords': @@ -156,22 +165,25 @@ def update_matches_and_scores(filename, score_details): if synonym_score > 0: if filename not in scores: scores[filename] = {'total_score': 0} + matches.append(filename) scores[filename][keyword] = scores[filename].get(keyword, 0) + synonym_score scores[filename]['total_score'] += synonym_score - + if not matches: logging.info(_make_title(' No results found! ')) # Sort matches by score and print them else: sorted_matches = sorted(matches, key=lambda x: scores[x]['total_score'], reverse=True) + logging.info(_make_title(' Results Ordered by Score ')) for match in sorted_matches: #display_filename = match + '.py' logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match}{Style.RESET_ALL}") for word, score in scores[match].items(): if word != 'total_score': - logging.info(f"{Fore.GREEN}Occurrence of '{word}': {score}{Style.RESET_ALL}") + original_word = keyword_mapping.get(word, phrase_mapping.get(word, word)) + logging.info(f"{Fore.GREEN}Occurrence of '{original_word}': {score}{Style.RESET_ALL}") logging.info(f"Total Score: {scores[match]['total_score']}") logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info("\n") From 7843521f305752007ef105fee5fa934ea9ffee9c Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Wed, 24 Jul 2024 13:18:51 -0400 Subject: [PATCH 28/69] change hidden file path --- scilpy/utils/generate_help_files.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/scilpy/utils/generate_help_files.py b/scilpy/utils/generate_help_files.py index 5776c24fb..36991e258 100644 --- a/scilpy/utils/generate_help_files.py +++ b/scilpy/utils/generate_help_files.py @@ -15,12 +15,11 @@ """ import subprocess -from pathlib import Path +import pathlib +scripts_dir= pathlib.Path(__file__).parent.parent.parent /'scripts' -scripts_dir = Path('scripts/') - # Hidden directory to store help files hidden_dir = scripts_dir / '.hidden' hidden_dir.mkdir(exist_ok=True) From b91d93cde2300347c079c65970456a8d5d160776 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Wed, 24 Jul 2024 17:24:22 -0400 Subject: [PATCH 29/69] correct the score for synonyms search --- scripts/scil_search_keywords.py | 63 ++++++++++++++++++++------------- 1 file changed, 39 insertions(+), 24 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 3f388aa2b..afc57c903 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -24,15 +24,15 @@ import argparse import logging import pathlib + import nltk -from colorama import init, Fore, Style +from colorama import Fore, Style import json from scilpy.utils.scilpy_bot import ( - _get_docstring_from_script_path, _split_first_sentence, _stem_keywords, _stem_phrase, _generate_help_files, + _get_docstring_from_script_path, _stem_keywords, _stem_phrase, _generate_help_files, _get_synonyms, _extract_keywords_and_phrases, _calculate_score, _make_title, prompt_user_for_object ) - from scilpy.utils.scilpy_bot import SPACING_LEN, KEYWORDS_FILE_PATH, SYNONYMS_FILE_PATH from scilpy.io.utils import add_verbose_arg @@ -42,8 +42,7 @@ def _build_arg_parser(): p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) - #p.add_argument('--object', choices=OBJECTS, required=True, - # help='Choose the object you want to work on.' ) + p.add_argument('keywords', nargs='+', help='Search the provided list of keywords.') @@ -71,6 +70,7 @@ def main(): if args.search_category: selected_object = prompt_user_for_object() + #keywords are single words and phrases are keywords that contain more than one word keywords, phrases = _extract_keywords_and_phrases(args.keywords) stemmed_keywords = _stem_keywords(keywords) stemmed_phrases = [_stem_phrase(phrase) for phrase in phrases] @@ -98,6 +98,22 @@ def main(): search_pattern = f'scil_{"{}_" if selected_object else ""}*.py' def update_matches_and_scores(filename, score_details): + """ + Update the matches and scores for the given filename based on the score details. + + Parameters + ---------- + filename : str + The name of the script file being analyzed. + score_details : dict + A dictionary containing the scores for the keywords and phrases found in the script. + This dictionary should have a 'total_score' key indicating the cumulative score. + + Returns + ------- + None + Just updates the global `matches` and `scores` lists/dictionaries. + """ if score_details['total_score'] > 0: if filename not in matches: matches.append(filename) @@ -127,6 +143,7 @@ def update_matches_and_scores(filename, score_details): score_details = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, filename=filename) update_matches_and_scores(filename, score_details) + # Search in keywords file with open(KEYWORDS_FILE_PATH, 'r') as f: keywords_data = json.load(f) @@ -145,45 +162,43 @@ def update_matches_and_scores(filename, score_details): if not args.no_synonyms: with open(SYNONYMS_FILE_PATH, 'r') as f: synonyms_data = json.load(f) - - # Create a mapping of synonyms to their original keywords - synonym_to_keyword = {} - for keyword in args.keywords: + + for keyword in keywords + phrases: synonyms = _get_synonyms(keyword, synonyms_data) - for synonym in synonyms: - synonym_to_keyword[synonym] = keyword for script in sorted(script_dir.glob(search_pattern.format(selected_object))): filename = script.stem if filename == '__init__' or filename == 'scil_search_keywords': continue search_text = _get_docstring_from_script_path(str(script)) - synonym_score = 0 + score_details = scores.get(filename, {'total_score': 0}) # Initialize or get existing score_details for the script + for synonym in synonyms: if synonym in search_text: - synonym_score += search_text.count(synonym) - if synonym_score > 0: - if filename not in scores: - scores[filename] = {'total_score': 0} - matches.append(filename) - scores[filename][keyword] = scores[filename].get(keyword, 0) + synonym_score - scores[filename]['total_score'] += synonym_score - + score_details[keyword+' synonyms'] = score_details.get(keyword +' synonyms', 0) + search_text.count(synonym) + score_details['total_score'] += search_text.count(synonym) + + update_matches_and_scores(filename, score_details) + if not matches: logging.info(_make_title(' No results found! ')) - # Sort matches by score and print them + # Sort matches by score and display them else: sorted_matches = sorted(matches, key=lambda x: scores[x]['total_score'], reverse=True) logging.info(_make_title(' Results Ordered by Score ')) for match in sorted_matches: - #display_filename = match + '.py' logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match}{Style.RESET_ALL}") + for word, score in scores[match].items(): if word != 'total_score': - original_word = keyword_mapping.get(word, phrase_mapping.get(word, word)) - logging.info(f"{Fore.GREEN}Occurrence of '{original_word}': {score}{Style.RESET_ALL}") + if word.endswith(' synonyms'): + logging.info(f"{Fore.GREEN}Occurrence of '{word}': {score}{Style.RESET_ALL}") + else: + original_word = keyword_mapping.get(word, phrase_mapping.get(word, word)) + logging.info(f"{Fore.GREEN}Occurrence of '{original_word}': {score}{Style.RESET_ALL}") + logging.info(f"Total Score: {scores[match]['total_score']}") logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info("\n") From eb61787ad1ab6fdf88af01a0571181cf360c62de Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Wed, 24 Jul 2024 17:51:28 -0400 Subject: [PATCH 30/69] clean code --- scilpy/utils/scilpy_bot.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index a9cddb2d0..a56c6d2e7 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -1,6 +1,3 @@ - -import re -import json import ast import nltk import pathlib From a9b9a20405558adc7d448e79beb483c874c82248 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Thu, 25 Jul 2024 12:26:03 -0400 Subject: [PATCH 31/69] add comment --- scripts/scil_search_keywords.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index afc57c903..4e770b5c8 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -175,6 +175,7 @@ def update_matches_and_scores(filename, score_details): for synonym in synonyms: if synonym in search_text: + # Update the score_details with the count of each synonym found score_details[keyword+' synonyms'] = score_details.get(keyword +' synonyms', 0) + search_text.count(synonym) score_details['total_score'] += search_text.count(synonym) From 1a963622b6ef8c0ec2f8e8b77670490716aaa6bd Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Mon, 29 Jul 2024 20:16:28 -0400 Subject: [PATCH 32/69] add hidden folder to gitignore --- .gitignore | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index bf7527f33..b5239c0a0 100644 --- a/.gitignore +++ b/.gitignore @@ -70,4 +70,7 @@ target/ .vscode/ # Virtualenv -venv/ \ No newline at end of file +venv/ + +# Hidden folder +.hidden/ \ No newline at end of file From a63d0c75580ceda4dee55a3596e315a1a5713e42 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Mon, 29 Jul 2024 20:32:22 -0400 Subject: [PATCH 33/69] add nltk to requirements --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index cba514485..c2d725aa0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,6 +22,7 @@ kiwisolver==1.4.* matplotlib==3.6.* nibabel==5.2.* nilearn==0.9.* +nltk==3.8.* numpy==1.23.* openpyxl==3.0.* packaging == 23.2.* From 4e85268f87889f10f3a1c6acf41c1883e6c1a353 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Thu, 1 Aug 2024 13:40:01 -0400 Subject: [PATCH 34/69] remove .hidden older --- scripts/.hidden/scil_NODDI_maps.py.help | 57 ------ scripts/.hidden/scil_NODDI_priors.py.help | 70 ------- scripts/.hidden/scil_aodf_metrics.py.help | 93 --------- scripts/.hidden/scil_bids_validate.py.help | 42 ----- scripts/.hidden/scil_bingham_metrics.py.help | 52 ------ scripts/.hidden/scil_btensor_metrics.py.help | 94 ---------- .../scil_bundle_clean_qbx_clusters.py.help | 54 ------ .../scil_bundle_compute_centroid.py.help | 24 --- .../scil_bundle_compute_endpoints_map.py.help | 42 ----- scripts/.hidden/scil_bundle_diameter.py.help | 70 ------- .../scil_bundle_filter_by_occurence.py.help | 38 ---- .../scil_bundle_generate_priors.py.help | 58 ------ scripts/.hidden/scil_bundle_label_map.py.help | 43 ----- .../scil_bundle_mean_fixel_afd.py.help | 50 ----- ...il_bundle_mean_fixel_afd_from_hdf5.py.help | 50 ----- ...l_bundle_mean_fixel_bingham_metric.py.help | 48 ----- scripts/.hidden/scil_bundle_mean_std.py.help | 51 ----- .../scil_bundle_pairwise_comparison.py.help | 57 ------ .../scil_bundle_reject_outliers.py.help | 35 ---- ..._score_many_bundles_one_tractogram.py.help | 110 ----------- ...ore_same_bundle_many_segmentations.py.help | 62 ------ .../scil_bundle_shape_measures.py.help | 64 ------- .../scil_bundle_uniformize_endpoints.py.help | 44 ----- .../scil_bundle_volume_per_label.py.help | 31 --- ...l_connectivity_compare_populations.py.help | 63 ------- ...scil_connectivity_compute_matrices.py.help | 91 --------- .../scil_connectivity_compute_pca.py.help | 75 -------- .../.hidden/scil_connectivity_filter.py.help | 56 ------ .../scil_connectivity_graph_measures.py.help | 63 ------- ...nectivity_hdf5_average_density_map.py.help | 36 ---- .../.hidden/scil_connectivity_math.py.help | 150 --------------- .../scil_connectivity_normalize.py.help | 76 -------- ...il_connectivity_pairwise_agreement.py.help | 33 ---- .../scil_connectivity_print_filenames.py.help | 32 ---- .../scil_connectivity_reorder_rois.py.help | 51 ----- .../.hidden/scil_denoising_nlmeans.py.help | 28 --- scripts/.hidden/scil_dki_metrics.py.help | 105 ----------- .../.hidden/scil_dti_convert_tensors.py.help | 37 ---- scripts/.hidden/scil_dti_metrics.py.help | 101 ---------- .../.hidden/scil_dwi_apply_bias_field.py.help | 24 --- scripts/.hidden/scil_dwi_compute_snr.py.help | 59 ------ scripts/.hidden/scil_dwi_concatenate.py.help | 31 --- scripts/.hidden/scil_dwi_convert_FDF.py.help | 31 --- .../scil_dwi_detect_volume_outliers.py.help | 39 ---- scripts/.hidden/scil_dwi_extract_b0.py.help | 46 ----- .../.hidden/scil_dwi_extract_shell.py.help | 45 ----- .../.hidden/scil_dwi_powder_average.py.help | 40 ---- .../scil_dwi_prepare_eddy_command.py.help | 64 ------- .../scil_dwi_prepare_topup_command.py.help | 44 ----- .../.hidden/scil_dwi_reorder_philips.py.help | 24 --- .../.hidden/scil_dwi_split_by_indices.py.help | 28 --- scripts/.hidden/scil_dwi_to_sh.py.help | 50 ----- .../scil_fodf_max_in_ventricles.py.help | 56 ------ scripts/.hidden/scil_fodf_memsmt.py.help | 99 ---------- scripts/.hidden/scil_fodf_metrics.py.help | 88 --------- scripts/.hidden/scil_fodf_msmt.py.help | 71 ------- scripts/.hidden/scil_fodf_ssst.py.help | 52 ------ scripts/.hidden/scil_fodf_to_bingham.py.help | 51 ----- scripts/.hidden/scil_freewater_maps.py.help | 58 ------ scripts/.hidden/scil_freewater_priors.py.help | 71 ------- scripts/.hidden/scil_frf_mean.py.help | 22 --- scripts/.hidden/scil_frf_memsmt.py.help | 122 ------------ scripts/.hidden/scil_frf_msmt.py.help | 114 ------------ .../scil_frf_set_diffusivities.py.help | 30 --- scripts/.hidden/scil_frf_ssst.py.help | 61 ------ scripts/.hidden/scil_get_version.py.help | 16 -- .../scil_gradients_apply_transform.py.help | 21 --- .../.hidden/scil_gradients_convert.py.help | 22 --- .../scil_gradients_generate_sampling.py.help | 67 ------- .../scil_gradients_modify_axes.py.help | 28 --- .../scil_gradients_round_bvals.py.help | 33 ---- .../scil_gradients_validate_correct.py.help | 48 ----- ...il_gradients_validate_correct_eddy.py.help | 25 --- .../.hidden/scil_header_print_info.py.help | 20 -- ...scil_header_validate_compatibility.py.help | 22 --- .../scil_json_convert_entries_to_xlsx.py.help | 29 --- .../scil_json_harmonize_entries.py.help | 31 --- .../.hidden/scil_json_merge_entries.py.help | 55 ------ scripts/.hidden/scil_labels_combine.py.help | 48 ----- scripts/.hidden/scil_labels_dilate.py.help | 51 ----- scripts/.hidden/scil_labels_remove.py.help | 31 --- .../scil_labels_split_volume_by_ids.py.help | 32 ---- .../scil_labels_split_volume_from_lut.py.help | 31 --- scripts/.hidden/scil_lesions_info.py.help | 50 ----- .../.hidden/scil_mti_adjust_B1_header.py.help | 17 -- scripts/.hidden/scil_mti_maps_MT.py.help | 150 --------------- scripts/.hidden/scil_mti_maps_ihMT.py.help | 164 ---------------- .../.hidden/scil_plot_stats_per_point.py.help | 33 ---- scripts/.hidden/scil_qball_metrics.py.help | 71 ------- scripts/.hidden/scil_rgb_convert.py.help | 33 ---- scripts/.hidden/scil_sh_convert.py.help | 39 ---- scripts/.hidden/scil_sh_fusion.py.help | 36 ---- scripts/.hidden/scil_sh_to_aodf.py.help | 96 ---------- scripts/.hidden/scil_sh_to_rish.py.help | 36 ---- scripts/.hidden/scil_sh_to_sf.py.help | 67 ------- .../scil_stats_group_comparison.py.help | 70 ------- .../scil_surface_apply_transform.py.help | 38 ---- scripts/.hidden/scil_surface_convert.py.help | 32 ---- scripts/.hidden/scil_surface_flip.py.help | 25 --- scripts/.hidden/scil_surface_smooth.py.help | 36 ---- scripts/.hidden/scil_tracking_local.py.help | 167 ----------------- .../.hidden/scil_tracking_local_dev.py.help | 158 ---------------- scripts/.hidden/scil_tracking_pft.py.help | 107 ----------- .../.hidden/scil_tracking_pft_maps.py.help | 31 --- .../scil_tracking_pft_maps_edit.py.help | 21 --- .../scil_tractogram_apply_transform.py.help | 78 -------- ...tractogram_apply_transform_to_hdf5.py.help | 52 ------ ...cil_tractogram_assign_custom_color.py.help | 0 ...il_tractogram_assign_uniform_color.py.help | 50 ----- .../.hidden/scil_tractogram_commit.py.help | 160 ---------------- .../.hidden/scil_tractogram_compress.py.help | 22 --- .../scil_tractogram_compute_TODI.py.help | 74 -------- ...cil_tractogram_compute_density_map.py.help | 28 --- .../.hidden/scil_tractogram_convert.py.help | 28 --- ...cil_tractogram_convert_hdf5_to_trk.py.help | 50 ----- .../scil_tractogram_count_streamlines.py.help | 24 --- .../scil_tractogram_cut_streamlines.py.help | 60 ------ .../scil_tractogram_detect_loops.py.help | 57 ------ .../.hidden/scil_tractogram_dpp_math.py.help | 76 -------- .../scil_tractogram_extract_ushape.py.help | 41 ---- .../scil_tractogram_filter_by_anatomy.py.help | 111 ----------- .../scil_tractogram_filter_by_length.py.help | 41 ---- ...l_tractogram_filter_by_orientation.py.help | 65 ------- .../scil_tractogram_filter_by_roi.py.help | 127 ------------- .../.hidden/scil_tractogram_fix_trk.py.help | 80 -------- scripts/.hidden/scil_tractogram_flip.py.help | 27 --- scripts/.hidden/scil_tractogram_math.py.help | 75 -------- ...cil_tractogram_pairwise_comparison.py.help | 51 ----- .../scil_tractogram_print_info.py.help | 32 ---- ...ctogram_project_map_to_streamlines.py.help | 68 ------- ...ctogram_project_streamlines_to_map.py.help | 77 -------- scripts/.hidden/scil_tractogram_qbx.py.help | 43 ----- .../.hidden/scil_tractogram_register.py.help | 42 ----- .../scil_tractogram_remove_invalid.py.help | 41 ---- .../.hidden/scil_tractogram_resample.py.help | 72 ------- ...scil_tractogram_resample_nb_points.py.help | 28 --- .../scil_tractogram_seed_density_map.py.help | 29 --- .../scil_tractogram_segment_and_score.py.help | 164 ---------------- .../scil_tractogram_segment_bundles.py.help | 65 ------- ...m_segment_bundles_for_connectivity.py.help | 105 ----------- ...scil_tractogram_segment_one_bundle.py.help | 62 ------ .../.hidden/scil_tractogram_shuffle.py.help | 22 --- .../.hidden/scil_tractogram_smooth.py.help | 51 ----- scripts/.hidden/scil_tractogram_split.py.help | 48 ----- scripts/.hidden/scil_viz_bingham_fit.py.help | 38 ---- scripts/.hidden/scil_viz_bundle.py.help | 56 ------ .../scil_viz_bundle_screenshot_mni.py.help | 48 ----- .../scil_viz_bundle_screenshot_mosaic.py.help | 49 ----- scripts/.hidden/scil_viz_connectivity.py.help | 0 .../.hidden/scil_viz_dti_screenshot.py.help | 30 --- scripts/.hidden/scil_viz_fodf.py.help | 119 ------------ .../scil_viz_gradients_screenshot.py.help | 38 ---- .../.hidden/scil_viz_tractogram_seeds.py.help | 21 --- .../scil_viz_tractogram_seeds_3d.py.help | 46 ----- .../.hidden/scil_viz_volume_histogram.py.help | 30 --- .../scil_viz_volume_scatterplot.py.help | 94 ---------- .../scil_viz_volume_screenshot.py.help | 118 ------------ .../scil_viz_volume_screenshot_mosaic.py.help | 96 ---------- .../scil_volume_apply_transform.py.help | 27 --- .../.hidden/scil_volume_b0_synthesis.py.help | 34 ---- .../scil_volume_count_non_zero_voxels.py.help | 31 --- scripts/.hidden/scil_volume_crop.py.help | 30 --- scripts/.hidden/scil_volume_flip.py.help | 18 -- scripts/.hidden/scil_volume_math.py.help | 176 ------------------ ...scil_volume_remove_outliers_ransac.py.help | 26 --- scripts/.hidden/scil_volume_resample.py.help | 36 ---- .../scil_volume_reshape_to_reference.py.help | 29 --- .../.hidden/scil_volume_stats_in_ROI.py.help | 39 ---- .../scil_volume_stats_in_labels.py.help | 22 --- 169 files changed, 9420 deletions(-) delete mode 100644 scripts/.hidden/scil_NODDI_maps.py.help delete mode 100644 scripts/.hidden/scil_NODDI_priors.py.help delete mode 100644 scripts/.hidden/scil_aodf_metrics.py.help delete mode 100644 scripts/.hidden/scil_bids_validate.py.help delete mode 100644 scripts/.hidden/scil_bingham_metrics.py.help delete mode 100644 scripts/.hidden/scil_btensor_metrics.py.help delete mode 100644 scripts/.hidden/scil_bundle_clean_qbx_clusters.py.help delete mode 100644 scripts/.hidden/scil_bundle_compute_centroid.py.help delete mode 100644 scripts/.hidden/scil_bundle_compute_endpoints_map.py.help delete mode 100644 scripts/.hidden/scil_bundle_diameter.py.help delete mode 100644 scripts/.hidden/scil_bundle_filter_by_occurence.py.help delete mode 100644 scripts/.hidden/scil_bundle_generate_priors.py.help delete mode 100644 scripts/.hidden/scil_bundle_label_map.py.help delete mode 100644 scripts/.hidden/scil_bundle_mean_fixel_afd.py.help delete mode 100644 scripts/.hidden/scil_bundle_mean_fixel_afd_from_hdf5.py.help delete mode 100644 scripts/.hidden/scil_bundle_mean_fixel_bingham_metric.py.help delete mode 100644 scripts/.hidden/scil_bundle_mean_std.py.help delete mode 100644 scripts/.hidden/scil_bundle_pairwise_comparison.py.help delete mode 100644 scripts/.hidden/scil_bundle_reject_outliers.py.help delete mode 100644 scripts/.hidden/scil_bundle_score_many_bundles_one_tractogram.py.help delete mode 100644 scripts/.hidden/scil_bundle_score_same_bundle_many_segmentations.py.help delete mode 100644 scripts/.hidden/scil_bundle_shape_measures.py.help delete mode 100644 scripts/.hidden/scil_bundle_uniformize_endpoints.py.help delete mode 100644 scripts/.hidden/scil_bundle_volume_per_label.py.help delete mode 100644 scripts/.hidden/scil_connectivity_compare_populations.py.help delete mode 100644 scripts/.hidden/scil_connectivity_compute_matrices.py.help delete mode 100644 scripts/.hidden/scil_connectivity_compute_pca.py.help delete mode 100644 scripts/.hidden/scil_connectivity_filter.py.help delete mode 100644 scripts/.hidden/scil_connectivity_graph_measures.py.help delete mode 100644 scripts/.hidden/scil_connectivity_hdf5_average_density_map.py.help delete mode 100644 scripts/.hidden/scil_connectivity_math.py.help delete mode 100644 scripts/.hidden/scil_connectivity_normalize.py.help delete mode 100644 scripts/.hidden/scil_connectivity_pairwise_agreement.py.help delete mode 100644 scripts/.hidden/scil_connectivity_print_filenames.py.help delete mode 100644 scripts/.hidden/scil_connectivity_reorder_rois.py.help delete mode 100644 scripts/.hidden/scil_denoising_nlmeans.py.help delete mode 100644 scripts/.hidden/scil_dki_metrics.py.help delete mode 100644 scripts/.hidden/scil_dti_convert_tensors.py.help delete mode 100644 scripts/.hidden/scil_dti_metrics.py.help delete mode 100644 scripts/.hidden/scil_dwi_apply_bias_field.py.help delete mode 100644 scripts/.hidden/scil_dwi_compute_snr.py.help delete mode 100644 scripts/.hidden/scil_dwi_concatenate.py.help delete mode 100644 scripts/.hidden/scil_dwi_convert_FDF.py.help delete mode 100644 scripts/.hidden/scil_dwi_detect_volume_outliers.py.help delete mode 100644 scripts/.hidden/scil_dwi_extract_b0.py.help delete mode 100644 scripts/.hidden/scil_dwi_extract_shell.py.help delete mode 100644 scripts/.hidden/scil_dwi_powder_average.py.help delete mode 100644 scripts/.hidden/scil_dwi_prepare_eddy_command.py.help delete mode 100644 scripts/.hidden/scil_dwi_prepare_topup_command.py.help delete mode 100644 scripts/.hidden/scil_dwi_reorder_philips.py.help delete mode 100644 scripts/.hidden/scil_dwi_split_by_indices.py.help delete mode 100644 scripts/.hidden/scil_dwi_to_sh.py.help delete mode 100644 scripts/.hidden/scil_fodf_max_in_ventricles.py.help delete mode 100644 scripts/.hidden/scil_fodf_memsmt.py.help delete mode 100644 scripts/.hidden/scil_fodf_metrics.py.help delete mode 100644 scripts/.hidden/scil_fodf_msmt.py.help delete mode 100644 scripts/.hidden/scil_fodf_ssst.py.help delete mode 100644 scripts/.hidden/scil_fodf_to_bingham.py.help delete mode 100644 scripts/.hidden/scil_freewater_maps.py.help delete mode 100644 scripts/.hidden/scil_freewater_priors.py.help delete mode 100644 scripts/.hidden/scil_frf_mean.py.help delete mode 100644 scripts/.hidden/scil_frf_memsmt.py.help delete mode 100644 scripts/.hidden/scil_frf_msmt.py.help delete mode 100644 scripts/.hidden/scil_frf_set_diffusivities.py.help delete mode 100644 scripts/.hidden/scil_frf_ssst.py.help delete mode 100644 scripts/.hidden/scil_get_version.py.help delete mode 100644 scripts/.hidden/scil_gradients_apply_transform.py.help delete mode 100644 scripts/.hidden/scil_gradients_convert.py.help delete mode 100644 scripts/.hidden/scil_gradients_generate_sampling.py.help delete mode 100644 scripts/.hidden/scil_gradients_modify_axes.py.help delete mode 100644 scripts/.hidden/scil_gradients_round_bvals.py.help delete mode 100644 scripts/.hidden/scil_gradients_validate_correct.py.help delete mode 100644 scripts/.hidden/scil_gradients_validate_correct_eddy.py.help delete mode 100644 scripts/.hidden/scil_header_print_info.py.help delete mode 100644 scripts/.hidden/scil_header_validate_compatibility.py.help delete mode 100644 scripts/.hidden/scil_json_convert_entries_to_xlsx.py.help delete mode 100644 scripts/.hidden/scil_json_harmonize_entries.py.help delete mode 100644 scripts/.hidden/scil_json_merge_entries.py.help delete mode 100644 scripts/.hidden/scil_labels_combine.py.help delete mode 100644 scripts/.hidden/scil_labels_dilate.py.help delete mode 100644 scripts/.hidden/scil_labels_remove.py.help delete mode 100644 scripts/.hidden/scil_labels_split_volume_by_ids.py.help delete mode 100644 scripts/.hidden/scil_labels_split_volume_from_lut.py.help delete mode 100644 scripts/.hidden/scil_lesions_info.py.help delete mode 100644 scripts/.hidden/scil_mti_adjust_B1_header.py.help delete mode 100644 scripts/.hidden/scil_mti_maps_MT.py.help delete mode 100644 scripts/.hidden/scil_mti_maps_ihMT.py.help delete mode 100644 scripts/.hidden/scil_plot_stats_per_point.py.help delete mode 100644 scripts/.hidden/scil_qball_metrics.py.help delete mode 100644 scripts/.hidden/scil_rgb_convert.py.help delete mode 100644 scripts/.hidden/scil_sh_convert.py.help delete mode 100644 scripts/.hidden/scil_sh_fusion.py.help delete mode 100644 scripts/.hidden/scil_sh_to_aodf.py.help delete mode 100644 scripts/.hidden/scil_sh_to_rish.py.help delete mode 100644 scripts/.hidden/scil_sh_to_sf.py.help delete mode 100644 scripts/.hidden/scil_stats_group_comparison.py.help delete mode 100644 scripts/.hidden/scil_surface_apply_transform.py.help delete mode 100644 scripts/.hidden/scil_surface_convert.py.help delete mode 100644 scripts/.hidden/scil_surface_flip.py.help delete mode 100644 scripts/.hidden/scil_surface_smooth.py.help delete mode 100644 scripts/.hidden/scil_tracking_local.py.help delete mode 100644 scripts/.hidden/scil_tracking_local_dev.py.help delete mode 100644 scripts/.hidden/scil_tracking_pft.py.help delete mode 100644 scripts/.hidden/scil_tracking_pft_maps.py.help delete mode 100644 scripts/.hidden/scil_tracking_pft_maps_edit.py.help delete mode 100644 scripts/.hidden/scil_tractogram_apply_transform.py.help delete mode 100644 scripts/.hidden/scil_tractogram_apply_transform_to_hdf5.py.help delete mode 100644 scripts/.hidden/scil_tractogram_assign_custom_color.py.help delete mode 100644 scripts/.hidden/scil_tractogram_assign_uniform_color.py.help delete mode 100644 scripts/.hidden/scil_tractogram_commit.py.help delete mode 100644 scripts/.hidden/scil_tractogram_compress.py.help delete mode 100644 scripts/.hidden/scil_tractogram_compute_TODI.py.help delete mode 100644 scripts/.hidden/scil_tractogram_compute_density_map.py.help delete mode 100644 scripts/.hidden/scil_tractogram_convert.py.help delete mode 100644 scripts/.hidden/scil_tractogram_convert_hdf5_to_trk.py.help delete mode 100644 scripts/.hidden/scil_tractogram_count_streamlines.py.help delete mode 100644 scripts/.hidden/scil_tractogram_cut_streamlines.py.help delete mode 100644 scripts/.hidden/scil_tractogram_detect_loops.py.help delete mode 100644 scripts/.hidden/scil_tractogram_dpp_math.py.help delete mode 100644 scripts/.hidden/scil_tractogram_extract_ushape.py.help delete mode 100644 scripts/.hidden/scil_tractogram_filter_by_anatomy.py.help delete mode 100644 scripts/.hidden/scil_tractogram_filter_by_length.py.help delete mode 100644 scripts/.hidden/scil_tractogram_filter_by_orientation.py.help delete mode 100644 scripts/.hidden/scil_tractogram_filter_by_roi.py.help delete mode 100644 scripts/.hidden/scil_tractogram_fix_trk.py.help delete mode 100644 scripts/.hidden/scil_tractogram_flip.py.help delete mode 100644 scripts/.hidden/scil_tractogram_math.py.help delete mode 100644 scripts/.hidden/scil_tractogram_pairwise_comparison.py.help delete mode 100644 scripts/.hidden/scil_tractogram_print_info.py.help delete mode 100644 scripts/.hidden/scil_tractogram_project_map_to_streamlines.py.help delete mode 100644 scripts/.hidden/scil_tractogram_project_streamlines_to_map.py.help delete mode 100644 scripts/.hidden/scil_tractogram_qbx.py.help delete mode 100644 scripts/.hidden/scil_tractogram_register.py.help delete mode 100644 scripts/.hidden/scil_tractogram_remove_invalid.py.help delete mode 100644 scripts/.hidden/scil_tractogram_resample.py.help delete mode 100644 scripts/.hidden/scil_tractogram_resample_nb_points.py.help delete mode 100644 scripts/.hidden/scil_tractogram_seed_density_map.py.help delete mode 100644 scripts/.hidden/scil_tractogram_segment_and_score.py.help delete mode 100644 scripts/.hidden/scil_tractogram_segment_bundles.py.help delete mode 100644 scripts/.hidden/scil_tractogram_segment_bundles_for_connectivity.py.help delete mode 100644 scripts/.hidden/scil_tractogram_segment_one_bundle.py.help delete mode 100644 scripts/.hidden/scil_tractogram_shuffle.py.help delete mode 100644 scripts/.hidden/scil_tractogram_smooth.py.help delete mode 100644 scripts/.hidden/scil_tractogram_split.py.help delete mode 100644 scripts/.hidden/scil_viz_bingham_fit.py.help delete mode 100644 scripts/.hidden/scil_viz_bundle.py.help delete mode 100644 scripts/.hidden/scil_viz_bundle_screenshot_mni.py.help delete mode 100644 scripts/.hidden/scil_viz_bundle_screenshot_mosaic.py.help delete mode 100644 scripts/.hidden/scil_viz_connectivity.py.help delete mode 100644 scripts/.hidden/scil_viz_dti_screenshot.py.help delete mode 100644 scripts/.hidden/scil_viz_fodf.py.help delete mode 100644 scripts/.hidden/scil_viz_gradients_screenshot.py.help delete mode 100644 scripts/.hidden/scil_viz_tractogram_seeds.py.help delete mode 100644 scripts/.hidden/scil_viz_tractogram_seeds_3d.py.help delete mode 100644 scripts/.hidden/scil_viz_volume_histogram.py.help delete mode 100644 scripts/.hidden/scil_viz_volume_scatterplot.py.help delete mode 100644 scripts/.hidden/scil_viz_volume_screenshot.py.help delete mode 100644 scripts/.hidden/scil_viz_volume_screenshot_mosaic.py.help delete mode 100644 scripts/.hidden/scil_volume_apply_transform.py.help delete mode 100644 scripts/.hidden/scil_volume_b0_synthesis.py.help delete mode 100644 scripts/.hidden/scil_volume_count_non_zero_voxels.py.help delete mode 100644 scripts/.hidden/scil_volume_crop.py.help delete mode 100644 scripts/.hidden/scil_volume_flip.py.help delete mode 100644 scripts/.hidden/scil_volume_math.py.help delete mode 100644 scripts/.hidden/scil_volume_remove_outliers_ransac.py.help delete mode 100644 scripts/.hidden/scil_volume_resample.py.help delete mode 100644 scripts/.hidden/scil_volume_reshape_to_reference.py.help delete mode 100644 scripts/.hidden/scil_volume_stats_in_ROI.py.help delete mode 100644 scripts/.hidden/scil_volume_stats_in_labels.py.help diff --git a/scripts/.hidden/scil_NODDI_maps.py.help b/scripts/.hidden/scil_NODDI_maps.py.help deleted file mode 100644 index 2e1ee2efd..000000000 --- a/scripts/.hidden/scil_NODDI_maps.py.help +++ /dev/null @@ -1,57 +0,0 @@ -usage: scil_NODDI_maps.py [-h] [--mask MASK] [--out_dir OUT_DIR] - [--tolerance tol] [--skip_b0_check] - [--para_diff PARA_DIFF] [--iso_diff ISO_DIFF] - [--lambda1 LAMBDA1] [--lambda2 LAMBDA2] - [--save_kernels DIRECTORY | --load_kernels DIRECTORY] - [--compute_only] [--processes NBR] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bval in_bvec - -Compute NODDI [1] maps using AMICO. -Multi-shell DWI necessary. - -Formerly: scil_compute_NODDI.py - -positional arguments: - in_dwi DWI file acquired with a NODDI compatible protocol (single-shell data not suited). - in_bval b-values filename, in FSL format (.bval). - in_bvec b-vectors filename, in FSL format (.bvec). - -options: - -h, --help show this help message and exit - --mask MASK Brain mask filename. - --out_dir OUT_DIR Output directory for the NODDI results. [results] - --tolerance tol The tolerated gap between the b-values to extract and the current b-value. - [Default: 20] - * Note. We would expect to find at least one b-value in the - range [0, tolerance]. To skip this check, use --skip_b0_check. - --skip_b0_check By default, we supervise that at least one b0 exists in your data - (i.e. b-values below the default --tolerance). Use this option to - allow continuing even if the minimum b-value is suspiciously high. - Use with care, and only if you understand your data. - --processes NBR Number of sub-processes to start. - Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Model options: - --para_diff PARA_DIFF - Axial diffusivity (AD) in the CC. [0.0017] - --iso_diff ISO_DIFF Mean diffusivity (MD) in ventricles. [0.003] - --lambda1 LAMBDA1 First regularization parameter. [0.5] - --lambda2 LAMBDA2 Second regularization parameter. [0.001] - -Kernels options: - --save_kernels DIRECTORY - Output directory for the COMMIT kernels. - --load_kernels DIRECTORY - Input directory where the COMMIT kernels are located. - --compute_only Compute kernels only, --save_kernels must be used. - -Reference: - [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC. - NODDI: practical in vivo neurite orientation dispersion - and density imaging of the human brain. - NeuroImage. 2012 Jul 16;61:1000-16. diff --git a/scripts/.hidden/scil_NODDI_priors.py.help b/scripts/.hidden/scil_NODDI_priors.py.help deleted file mode 100644 index 6ea54799a..000000000 --- a/scripts/.hidden/scil_NODDI_priors.py.help +++ /dev/null @@ -1,70 +0,0 @@ -usage: scil_NODDI_priors.py [-h] [--fa_min_single_fiber FA_MIN_SINGLE_FIBER] - [--fa_max_ventricles FA_MAX_VENTRICLES] - [--md_min_ventricles MD_MIN_VENTRICLES] - [--roi_radius ROI_RADIUS] - [--roi_center pos pos pos] - [--out_txt_1fiber_para FILE] - [--out_txt_1fiber_perp FILE] - [--out_mask_1fiber FILE] - [--out_txt_ventricles FILE] - [--out_mask_ventricles FILE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_FA in_AD in_RD in_MD - -Compute the axial (para_diff), radial (perp_diff), and mean (iso_diff) -diffusivity priors for NODDI. - -Formerly: scil_compute_NODDI_priors.py - -positional arguments: - in_FA Path to the FA volume. - in_AD Path to the axial diffusivity (AD) volume. - in_RD Path to the radial diffusivity (RD) volume. - in_MD Path to the mean diffusivity (MD) volume. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Metrics options: - --fa_min_single_fiber FA_MIN_SINGLE_FIBER - Minimal threshold of FA (voxels above that threshold are considered in - the single fiber mask). [0.7] - --fa_max_ventricles FA_MAX_VENTRICLES - Maximal threshold of FA (voxels under that threshold are considered in - the ventricles). [0.1] - --md_min_ventricles MD_MIN_VENTRICLES - Minimal threshold of MD in mm2/s (voxels above that threshold are considered - for in the ventricles). [0.003] - -Regions options: - --roi_radius ROI_RADIUS - Radius of the region used to estimate the priors. The roi will be a cube spanning - from ROI_CENTER in each direction. [20] - --roi_center pos pos pos - Center of the roi of size roi_radius used to estimate the priors; a 3-value coordinate. - If not set, uses the center of the 3D volume. - -Outputs: - --out_txt_1fiber_para FILE - Output path for the text file containing the single fiber average value of AD. - If not set, the file will not be saved. - --out_txt_1fiber_perp FILE - Output path for the text file containing the single fiber average value of RD. - If not set, the file will not be saved. - --out_mask_1fiber FILE - Output path for single fiber mask. If not set, the mask will not be saved. - --out_txt_ventricles FILE - Output path for the text file containing the ventricles average value of MD. - If not set, the file will not be saved. - --out_mask_ventricles FILE - Output path for the ventricule mask. - If not set, the mask will not be saved. - -Reference: - [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC. - NODDI: practical in vivo neurite orientation dispersion and density - imaging of the human brain. NeuroImage. 2012 Jul 16;61:1000-16. diff --git a/scripts/.hidden/scil_aodf_metrics.py.help b/scripts/.hidden/scil_aodf_metrics.py.help deleted file mode 100644 index 9af856779..000000000 --- a/scripts/.hidden/scil_aodf_metrics.py.help +++ /dev/null @@ -1,93 +0,0 @@ -usage: scil_aodf_metrics.py [-h] [--mask MASK] [--asi_map ASI_MAP] - [--odd_power_map ODD_POWER_MAP] [--peaks PEAKS] - [--peak_values PEAK_VALUES] - [--peak_indices PEAK_INDICES] [--nufid NUFID] - [--not_all] [--at A_THRESHOLD] [--rt R_THRESHOLD] - [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}] - [--processes NBR] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_sh - -Script to compute various metrics derivated from asymmetric ODF. - -These metrics include the asymmetric peak directions image, a number of fiber -directions (nufid) map [1], the asymmetry index (ASI) map [2] and an odd-power -map [3]. - -The asymmetric peak directions image contains peaks per hemisphere, considering -antipodal sphere directions as distinct. On a symmetric signal, the number of -asymmetric peaks extracted is then twice the number of symmetric peaks. - -The nufid map is the asymmetric alternative to NuFO maps. It counts the -number of asymmetric peaks extracted and ranges in [0..N] with N the maximum -number of peaks. - -The asymmetric index is a cosine-based metric in the range [0..1], with 0 -corresponding to a perfectly symmetric signal and 1 to a perfectly asymmetric -signal. - -The odd-power map is also in the range [0..1], with 0 corresponding to a -perfectly symmetric signal and 1 to a perfectly anti-symmetric signal. It is -given as the ratio of the L2-norm of odd SH coefficients on the L2-norm of all -SH coefficients. - -Formerly: scil_compute_asym_odf_metrics.py - -positional arguments: - in_sh Input SH image. - -options: - -h, --help show this help message and exit - --mask MASK Optional mask. - --asi_map ASI_MAP Output asymmetry index (ASI) map. - --odd_power_map ODD_POWER_MAP - Output odd power map. - --peaks PEAKS Output filename for the extracted peaks. - --peak_values PEAK_VALUES - Output filename for the extracted peaks values. - --peak_indices PEAK_INDICES - Output filename for the generated peaks indices on the sphere. - --nufid NUFID Output filename for the nufid file. - --not_all If set, only saves the files specified using the file flags [False]. - --at A_THRESHOLD Absolute threshold on fODF amplitude. This value should be set to - approximately 1.5 to 2 times the maximum fODF amplitude in isotropic voxels - (ie. ventricles). - Use scil_fodf_max_in_ventricles.py to find the maximal value. - See [Dell'Acqua et al HBM 2013] [0.0]. - --rt R_THRESHOLD Relative threshold on fODF amplitude in percentage [0.1]. - --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} - Sphere to use for peak directions estimation [symmetric724]. - --processes NBR Number of sub-processes to start. - Default: [1] - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -References: -[1] C. Poirier and M. Descoteaux, "Filtering Methods for Asymmetric ODFs: -Where and How Asymmetry Occurs in the White Matter." bioRxiv. 2022 Jan 1; -2022.12.18.520881. doi: https://doi.org/10.1101/2022.12.18.520881 - -[2] S. Cetin Karayumak, E. Özarslan, and G. Unal, -"Asymmetric Orientation Distribution Functions (AODFs) revealing intravoxel -geometry in diffusion MRI," Magnetic Resonance Imaging, vol. 49, pp. 145-158, -Jun. 2018, doi: https://doi.org/10.1016/j.mri.2018.03.006. - -[3] C. Poirier, E. St-Onge, and M. Descoteaux, "Investigating the Occurence of -Asymmetric Patterns in White Matter Fiber Orientation Distribution Functions" -[Abstract], In: Proc. Intl. Soc. Mag. Reson. Med. 29 (2021), 2021 May 15-20, -Vancouver, BC, Abstract number 0865. diff --git a/scripts/.hidden/scil_bids_validate.py.help b/scripts/.hidden/scil_bids_validate.py.help deleted file mode 100644 index 9af451483..000000000 --- a/scripts/.hidden/scil_bids_validate.py.help +++ /dev/null @@ -1,42 +0,0 @@ -usage: scil_bids_validate.py [-h] [--bids_ignore BIDS_IGNORE] [--fs FS] - [--clean] [--readout READOUT] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bids out_json - -Create a json file from a BIDS dataset detailling all info -needed for tractoflow -- DWI/rev_DWI -- T1 -- fmap/sbref (based on IntendedFor entity) -- Freesurfer (optional - could be one per participant - or one per participant/session) - -The BIDS dataset MUST be homogeneous. -The metadata need to be uniform across all participants/sessions/runs - -Mandatory entity: IntendedFor -Sensitive entities: PhaseEncodingDirection, TotalReadoutTime, direction - -Formerly: scil_validate_bids.py - -positional arguments: - in_bids Input BIDS folder. - out_json Output json file. - -options: - -h, --help show this help message and exit - --bids_ignore BIDS_IGNORE - If you want to ignore some subjects or some files, you - can provide an extra bidsignore file.Check: - https://github.com/bids-standard/bids- - validator#bidsignore - --fs FS Output freesurfer path. It will add keys wmparc and - aparc+aseg. - --clean If set, it will remove all the participants that are - missing any information. - --readout READOUT Default total readout time value [0.062]. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided - level. Default level is warning, default when using -v - is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_bingham_metrics.py.help b/scripts/.hidden/scil_bingham_metrics.py.help deleted file mode 100644 index ac2a1c2ff..000000000 --- a/scripts/.hidden/scil_bingham_metrics.py.help +++ /dev/null @@ -1,52 +0,0 @@ -usage: scil_bingham_metrics.py [-h] [--out_fd OUT_FD] [--out_fs OUT_FS] - [--out_ff OUT_FF] [--not_all] [--mask MASK] - [--nbr_integration_steps NBR_INTEGRATION_STEPS] - [-v [{DEBUG,INFO,WARNING}]] [--processes NBR] - [-f] - in_bingham - -Script to compute fODF lobe-specific metrics derived from a Bingham -distribution fit, as described in [1]. Resulting metrics are fiber density -(FD), fiber spread (FS) and fiber fraction (FF) [2]. - -The Bingham coefficients volume comes from scil_fodf_to_bingham.py. - -A lobe's FD is the integral of the Bingham function on the sphere. It -represents the density of fibers going through a given voxel for a given -fODF lobe (fixel). A lobe's FS is the ratio of its FD on its maximum AFD. It -is at its minimum for a sharp lobe and at its maximum for a wide lobe. A lobe's -FF is the ratio of its FD on the total FD in the voxel. - -Using 12 threads, the execution takes 10 minutes for FD estimation for a brain -with 1mm isotropic resolution. Other metrics take less than a second. - -Formerly: scil_compute_lobe_specific_fodf_metrics.py - -positional arguments: - in_bingham Input Bingham nifti image. - -options: - -h, --help show this help message and exit - --out_fd OUT_FD Path to output fiber density. [fd.nii.gz] - --out_fs OUT_FS Path to output fiber spread. [fs.nii.gz] - --out_ff OUT_FF Path to fiber fraction file. [ff.nii.gz] - --not_all Do not compute all metrics. Then, please provide the output paths of the files you need. - --mask MASK Optional mask image. Only voxels inside the mask are computed. - --nbr_integration_steps NBR_INTEGRATION_STEPS - Number of integration steps along the theta axis for fiber density estimation. [50] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - --processes NBR Number of sub-processes to start. - Default: [1] - -f Force overwriting of the output files. - -[1] T. W. Riffert, J. Schreiber, A. Anwander, and T. R. Knösche, “Beyond - fractional anisotropy: Extraction of bundle-specific structural metrics - from crossing fiber models,” NeuroImage, vol. 100, pp. 176-191, Oct. 2014, - doi: 10.1016/j.neuroimage.2014.06.015. - -[2] J. Schreiber, T. Riffert, A. Anwander, and T. R. Knösche, “Plausibility - Tracking: A method to evaluate anatomical connectivity and microstructural - properties along fiber pathways,” NeuroImage, vol. 90, pp. 163-178, Apr. - 2014, doi: 10.1016/j.neuroimage.2014.01.002. diff --git a/scripts/.hidden/scil_btensor_metrics.py.help b/scripts/.hidden/scil_btensor_metrics.py.help deleted file mode 100644 index 2cb7853e0..000000000 --- a/scripts/.hidden/scil_btensor_metrics.py.help +++ /dev/null @@ -1,94 +0,0 @@ -usage: scil_btensor_metrics.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals - IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS - [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5} - [{0,1,-0.5,0.5} ...] [--mask MASK] - [--tolerance tol] [--skip_b0_check] - [--fit_iters FIT_ITERS] - [--random_iters RANDOM_ITERS] - [--do_weight_bvals] [--do_weight_pa] - [--do_multiple_s0] [--op OP] [--fa FA] - [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] - [-f] [--not_all] [--md file] [--ufa file] - [--mk_i file] [--mk_a file] [--mk_t file] - -Script to compute microstructure metrics using the DIVIDE method. In order to -operate, the script needs at leats two different types of b-tensor encodings. -Note that custom encodings are not yet supported, so that only the linear -tensor encoding (LTE, b_delta = 1), the planar tensor encoding -(PTE, b_delta = -0.5), the spherical tensor encoding (STE, b_delta = 0) and -the cigar shape tensor encoding (b_delta = 0.5) are available. Moreover, all -of `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the -same number of arguments. Be sure to keep the same order of encodings -throughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT). - -By default, will output all possible files, using default names. Thus, this -script outputs the results from the DIVIDE fit or direct derivatives: -mean diffusivity (MD), isotropic mean kurtosis (mk_i), anisotropic mean -kurtosis (mk_a), total mean kurtosis (mk_t) and finally micro-FA (uFA). -Specific names can be specified using the -file flags specified in the "File flags" section. - -If --not_all is set, only the files specified explicitly by the flags -will be output. The order parameter can also be computed from the uFA and a -precomputed FA, using separate input parameters. - ->>> scil_btensor_metrics.py --in_dwis LTE.nii.gz PTE.nii.gz STE.nii.gz - --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs LTE.bvec PTE.bvec STE.bvec - --in_bdeltas 1 -0.5 0 --mask mask.nii.gz - -IMPORTANT: If the script does not converge to a solution, it is probably due to -noise outside the brain. Thus, it is strongly recommanded to provide a brain -mask with --mask. - -Based on Markus Nilsson, Filip Szczepankiewicz, Björn Lampinen, André Ahlgren, -João P. de Almeida Martins, Samo Lasic, Carl-Fredrik Westin, -and Daniel Topgaard. An open-source framework for analysis of multidimensional -diffusion MRI data implemented in MATLAB. -Proc. Intl. Soc. Mag. Reson. Med. (26), Paris, France, 2018. - -Formerly: scil_compute_divide.py - -options: - -h, --help show this help message and exit - --in_dwis IN_DWIS [IN_DWIS ...] - Path to the input diffusion volume for each b-tensor encoding type. - --in_bvals IN_BVALS [IN_BVALS ...] - Path to the bval file, in FSL format, for each b-tensor encoding type. - --in_bvecs IN_BVECS [IN_BVECS ...] - Path to the bvec file, in FSL format, for each b-tensor encoding type. - --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...] - Value of b_delta for each b-tensor encoding type, in the same order as dwi, bval and bvec inputs. - --mask MASK Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction. - --tolerance tol The tolerated gap between the b-values to extract and the current b-value. - [Default: 20] - * Note. We would expect to find at least one b-value in the - range [0, tolerance]. To skip this check, use --skip_b0_check. - --skip_b0_check By default, we supervise that at least one b0 exists in your data - (i.e. b-values below the default --tolerance). Use this option to - allow continuing even if the minimum b-value is suspiciously high. - Use with care, and only if you understand your data. - --fit_iters FIT_ITERS - The number of time the gamma fit will be done [1] - --random_iters RANDOM_ITERS - The number of iterations for the initial parameters search. [50] - --do_weight_bvals If set, does not do a weighting on the bvalues in the gamma fit. - --do_weight_pa If set, does not do a powder averaging weighting in the gamma fit. - --do_multiple_s0 If set, does not take into account multiple baseline signals. - --processes NBR Number of sub-processes to start. - Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - --not_all If set, only saves the files specified using the file flags. (Default: False) - -Order parameter (OP): - --op OP Output filename for the order parameter. The OP will not be output if this is not given. Computation of the OP also requires a precomputed FA map (given using --fa). - --fa FA Path to a FA map. Needed for calculating the OP. - -File flags: - --md file Output filename for the MD. - --ufa file Output filename for the microscopic FA. - --mk_i file Output filename for the isotropic mean kurtosis. - --mk_a file Output filename for the anisotropic mean kurtosis. - --mk_t file Output filename for the total mean kurtosis. diff --git a/scripts/.hidden/scil_bundle_clean_qbx_clusters.py.help b/scripts/.hidden/scil_bundle_clean_qbx_clusters.py.help deleted file mode 100644 index f76af15c7..000000000 --- a/scripts/.hidden/scil_bundle_clean_qbx_clusters.py.help +++ /dev/null @@ -1,54 +0,0 @@ -usage: scil_bundle_clean_qbx_clusters.py [-h] - [--out_accepted_dir OUT_ACCEPTED_DIR] - [--out_rejected_dir OUT_REJECTED_DIR] - [--min_cluster_size MIN_CLUSTER_SIZE] - [--background_opacity BACKGROUND_OPACITY] - [--background_linewidth BACKGROUND_LINEWIDTH] - [--clusters_linewidth CLUSTERS_LINEWIDTH] - [--reference REFERENCE] - [--no_bbox_check] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bundles [in_bundles ...] - out_accepted out_rejected - - Render clusters sequentially to either accept or reject them based on - visual inspection. Useful for cleaning bundles for RBx, BST or for figures. - The VTK window does not handle well opacity of streamlines, this is a - normal rendering behavior. - Often use in pair with scil_tractogram_qbx.py. - - Key mapping: - - a/A: accept displayed clusters - - r/R: reject displayed clusters - - z/Z: Rewing one element - - c/C: Stop rendering of the background concatenation of streamlines - - q/Q: Early window exist, everything remaining will be rejected - -positional arguments: - in_bundles List of the clusters filename. - out_accepted Filename of the concatenated accepted clusters. - out_rejected Filename of the concatenated rejected clusters. - -options: - -h, --help show this help message and exit - --out_accepted_dir OUT_ACCEPTED_DIR - Directory to save all accepted clusters separately. - --out_rejected_dir OUT_REJECTED_DIR - Directory to save all rejected clusters separately. - --min_cluster_size MIN_CLUSTER_SIZE - Minimum cluster size for consideration [1].Must be at least 1. - --background_opacity BACKGROUND_OPACITY - Opacity of the background streamlines.Keep low between 0 and 0.5 [0.1]. - --background_linewidth BACKGROUND_LINEWIDTH - Linewidth of the background streamlines [1]. - --clusters_linewidth CLUSTERS_LINEWIDTH - Linewidth of the current cluster [1]. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of - tractograms (ignores the presence of invalid streamlines). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_bundle_compute_centroid.py.help b/scripts/.hidden/scil_bundle_compute_centroid.py.help deleted file mode 100644 index 197b4c393..000000000 --- a/scripts/.hidden/scil_bundle_compute_centroid.py.help +++ /dev/null @@ -1,24 +0,0 @@ -usage: scil_bundle_compute_centroid.py [-h] [--nb_points NB_POINTS] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bundle out_centroid - -Compute a single bundle centroid, using an 'infinite' QuickBundles threshold. - -Formerly: scil_compute_centroid.py - -positional arguments: - in_bundle Fiber bundle file. - out_centroid Output centroid streamline filename. - -options: - -h, --help show this help message and exit - --nb_points NB_POINTS - Number of points defining the centroid streamline[20]. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_bundle_compute_endpoints_map.py.help b/scripts/.hidden/scil_bundle_compute_endpoints_map.py.help deleted file mode 100644 index 1de7346dc..000000000 --- a/scripts/.hidden/scil_bundle_compute_endpoints_map.py.help +++ /dev/null @@ -1,42 +0,0 @@ -usage: scil_bundle_compute_endpoints_map.py [-h] [--swap] [--binary] - [--nb_points NB_POINTS] - [--indent INDENT] [--sort_keys] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bundle endpoints_map_head - endpoints_map_tail - -Computes the endpoint map of a bundle. The endpoint map is simply a count of -the number of streamlines that start or end in each voxel. - -The idea is to estimate the cortical area affected by the bundle (assuming -streamlines start/end in the cortex). - -Note: If the streamlines are not ordered the head/tail are random and not -really two coherent groups. Use the following script to order streamlines: -scil_tractogram_uniformize_endpoints.py - -Formerly: scil_compute_endpoints_map.py - -positional arguments: - in_bundle Fiber bundle filename. - endpoints_map_head Output endpoints map head filename. - endpoints_map_tail Output endpoints map tail filename. - -options: - -h, --help show this help message and exit - --swap Swap head<->tail convention. Can be useful when the reference is not in RAS. - --binary Save outputs as a binary mask instead of a heat map. - --nb_points NB_POINTS - Number of points to consider at the extremities of the streamlines. [1] - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_bundle_diameter.py.help b/scripts/.hidden/scil_bundle_diameter.py.help deleted file mode 100644 index adf2eb4ea..000000000 --- a/scripts/.hidden/scil_bundle_diameter.py.help +++ /dev/null @@ -1,70 +0,0 @@ -usage: scil_bundle_diameter.py [-h] - [--fitting_func {lin_up,lin_down,exp,inv,log}] - [--show_rendering | --save_rendering OUT_FOLDER] - [--wireframe] [--error_coloring] - [--width WIDTH] [--opacity OPACITY] - [--win_dims WIDTH HEIGHT] [--background R G B] - [--reference REFERENCE] [--indent INDENT] - [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bundles [in_bundles ...] in_labels - [in_labels ...] - -Script to estimate the diameter of bundle(s) along their length. -See also scil_bundle_shape_measures.py, which prints a quick estimate of -the diameter (volume / length). The computation here is more complex and done -for each section of the bundle. - -The script expects: -- bundles with coherent endpoints from scil_tractogram_uniformize_endpoints.py -- labels maps with around 5-50 points scil_bundle_label_map.py - <5 is not enough, high risk of bad fit - >50 is too much, high risk of bad fit -- bundles that are close to a tube - without major fanning in a single axis - fanning is in 2 directions (uniform dispersion) good approximation - -The scripts prints a JSON file with mean/std to be compatible with tractometry. -WARNING: STD is in fact an ERROR measure from the fit and NOT an STD. - -Since the estimation and fit quality is not always intuitive for some bundles -and the tube with varying diameter is not easy to color/visualize, -the script comes with its own VTK rendering to allow exploration of the data. -(optional). - -Formerly: scil_estimate_bundles_diameter.py - -positional arguments: - in_bundles List of tractography files. - in_labels List of labels maps that match the bundles. - -options: - -h, --help show this help message and exit - --fitting_func {lin_up,lin_down,exp,inv,log} - Function to weigh points using their distance. - [Default: None] - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Visualization options: - --show_rendering Display VTK window (optional). - --save_rendering OUT_FOLDER - Save VTK render in the specified folder (optional) - --wireframe Use wireframe for the tube rendering. - --error_coloring Use the fitting error to color the tube. - --width WIDTH Width of tubes or lines representing streamlines - [Default: 0.2] - --opacity OPACITY Opacity for the streamlines rendered with the tube. - [Default: 0.2] - --win_dims WIDTH HEIGHT - The dimensions for the vtk window. [(1920, 1080)] - --background R G B RBG values [0, 255] of the color of the background. - [Default: [1, 1, 1]] - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_bundle_filter_by_occurence.py.help b/scripts/.hidden/scil_bundle_filter_by_occurence.py.help deleted file mode 100644 index 7c8dc5881..000000000 --- a/scripts/.hidden/scil_bundle_filter_by_occurence.py.help +++ /dev/null @@ -1,38 +0,0 @@ -usage: scil_bundle_filter_by_occurence.py [-h] [--ratio_voxels [RATIO_VOXELS]] - [--ratio_streamlines [RATIO_STREAMLINES]] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bundles [in_bundles ...] - output_prefix - -Use multiple versions of a same bundle and detect the most probable voxels by -using a threshold on the occurence, voxel-wise. With threshold 0.5, this is -a majority vote. This is useful to generate an average representation from -bundles of a given population. - -If streamlines originate from the same tractogram (ex, to compare various -bundle clustering techniques), streamline-wise vote is available to find the -streamlines most often included in the bundle. - -Formerly: scil_perform_majority_vote.py - -positional arguments: - in_bundles Input bundles filename(s). All tractograms must have identical headers. - output_prefix Output prefix. Ex: my_path/voting_. The suffixes will be: streamlines.trk and voxels.nii.gz - -options: - -h, --help show this help message and exit - --ratio_voxels [RATIO_VOXELS] - Threshold on the ratio of bundles with at least one streamine in a - given voxel to consider it as part of the 'gold standard'. Default if set: 0.5. - --ratio_streamlines [RATIO_STREAMLINES] - If all bundles come from the same tractogram, use this to generate - a voting for streamlines too. The associated value is the threshold on the ratio of - bundles including the streamline to consider it as part of the 'gold standard'. [0.5] - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_bundle_generate_priors.py.help b/scripts/.hidden/scil_bundle_generate_priors.py.help deleted file mode 100644 index abd09ab30..000000000 --- a/scripts/.hidden/scil_bundle_generate_priors.py.help +++ /dev/null @@ -1,58 +0,0 @@ -usage: scil_bundle_generate_priors.py [-h] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [--todi_sigma {0,1,2,3,4}] - [--sf_threshold SF_THRESHOLD] - [--out_prefix OUT_PREFIX] - [--out_dir OUT_DIR] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bundle in_fodf in_mask - -Generation of priors and enhanced-FOD from an example/template bundle. -The bundle must have been cleaned thorougly before use. The E-FOD can then -be used for bundle-specific tractography, but not for FOD metrics. - -Formerly: scil_generate_priors_from_bundle.py - -positional arguments: - in_bundle Input bundle filename. - in_fodf Input FOD filename. - in_mask Mask to constrain the TODI spatial smoothing, - for example a WM mask. - -options: - -h, --help show this help message and exit - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - --todi_sigma {0,1,2,3,4} - Smooth the orientation histogram. - --sf_threshold SF_THRESHOLD - Relative threshold for sf masking (0.0-1.0). - --out_prefix OUT_PREFIX - Add a prefix to all output filename, - default is no prefix. - --out_dir OUT_DIR Output directory for all generated files, - default is current directory. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - - References: - [1] Rheault, Francois, et al. "Bundle-specific tractography with - incorporated anatomical and orientational priors." - NeuroImage 186 (2019): 382-398 - diff --git a/scripts/.hidden/scil_bundle_label_map.py.help b/scripts/.hidden/scil_bundle_label_map.py.help deleted file mode 100644 index 39484a324..000000000 --- a/scripts/.hidden/scil_bundle_label_map.py.help +++ /dev/null @@ -1,43 +0,0 @@ -usage: scil_bundle_label_map.py [-h] [--nb_pts NB_PTS] [--colormap COLORMAP] - [--new_labelling] [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bundles [in_bundles ...] in_centroid - out_dir - -Compute the label image (Nifti) from a centroid and tractograms (all -representing the same bundle). The label image represents the coverage of -the bundle, segmented into regions labelled from 0 to --nb_pts, starting from -the head, ending in the tail. - -Each voxel will have the label of its nearest centroid point. - -The number of labels will be the same as the centroid's number of points. - -Formerly: scil_compute_bundle_voxel_label_map.py - -positional arguments: - in_bundles Fiber bundle file. - in_centroid Centroid streamline corresponding to bundle. - out_dir Directory to save all mapping and coloring files: - - correlation_map.nii.gz - - session_x/labels_map.nii.gz - - session_x/distance_map.nii.gz - - session_x/correlation_map.nii.gz - - session_x/labels.trk - - session_x/distance.trk - - session_x/correlation.trk - Where session_x is numbered with each bundle. - -options: - -h, --help show this help message and exit - --nb_pts NB_PTS Number of divisions for the bundles. - Default is the number of points of the centroid. - --colormap COLORMAP Select the colormap for colored trk (data_per_point) [jet]. - --new_labelling Use the new labelling method (multi-centroids). - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_bundle_mean_fixel_afd.py.help b/scripts/.hidden/scil_bundle_mean_fixel_afd.py.help deleted file mode 100644 index 994221db7..000000000 --- a/scripts/.hidden/scil_bundle_mean_fixel_afd.py.help +++ /dev/null @@ -1,50 +0,0 @@ -usage: scil_bundle_mean_fixel_afd.py [-h] [--length_weighting] - [--reference REFERENCE] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bundle in_fodf afd_mean_map - -Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF) -maps along a bundle. - -This is the "real" fixel-based fODF amplitude along every streamline -of the bundle provided, averaged at every voxel. - -Please use a bundle file rather than a whole tractogram. - -Formerly: scil_compute_fixel_afd_from_bundles.py - -positional arguments: - in_bundle Path of the bundle file. - in_fodf Path of the fODF volume in spherical harmonics (SH). - afd_mean_map Path of the output mean AFD map. - -options: - -h, --help show this help message and exit - --length_weighting If set, will weigh the AFD values according to segment lengths. [False] - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Reference: - [1] Raffelt, D., Tournier, JD., Rose, S., Ridgway, GR., Henderson, R., - Crozier, S., Salvado, O., & Connelly, A. (2012). - Apparent Fibre Density: a novel measure for the analysis of - diffusion-weighted magnetic resonance images. NeuroImage, 59(4), - 3976--3994. diff --git a/scripts/.hidden/scil_bundle_mean_fixel_afd_from_hdf5.py.help b/scripts/.hidden/scil_bundle_mean_fixel_afd_from_hdf5.py.help deleted file mode 100644 index cf289868d..000000000 --- a/scripts/.hidden/scil_bundle_mean_fixel_afd_from_hdf5.py.help +++ /dev/null @@ -1,50 +0,0 @@ -usage: scil_bundle_mean_fixel_afd_from_hdf5.py [-h] [--length_weighting] - [--processes NBR] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [-v [{DEBUG,INFO,WARNING}]] - [-f] - in_hdf5 in_fodf out_hdf5 - -Compute the mean Apparent Fiber Density (AFD) and mean Radial fODF (radfODF) -maps for every connections within a hdf5 (.h5) file. - -This is the "real" fixel-based fODF amplitude along every streamline -of each connection, averaged at every voxel. - -Please use a hdf5 (.h5) file containing decomposed connections - -Formerly: scil_compute_fixel_afd_from_hdf5.py - -positional arguments: - in_hdf5 HDF5 filename (.h5) containing decomposed connections. - in_fodf Path of the fODF volume in spherical harmonics (SH). - out_hdf5 Path of the output HDF5 filenames (.h5). - -options: - -h, --help show this help message and exit - --length_weighting If set, will weigh the AFD values according to segment lengths. [False] - --processes NBR Number of sub-processes to start. - Default: [1] - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Reference: - [1] Raffelt, D., Tournier, JD., Rose, S., Ridgway, GR., Henderson, R., - Crozier, S., Salvado, O., & Connelly, A. (2012). - Apparent Fibre Density: a novel measure for the analysis of - diffusion-weighted magnetic resonance images. NeuroImage, - 59(4), 3976--3994. diff --git a/scripts/.hidden/scil_bundle_mean_fixel_bingham_metric.py.help b/scripts/.hidden/scil_bundle_mean_fixel_bingham_metric.py.help deleted file mode 100644 index 2e4d7f6c7..000000000 --- a/scripts/.hidden/scil_bundle_mean_fixel_bingham_metric.py.help +++ /dev/null @@ -1,48 +0,0 @@ -usage: scil_bundle_mean_fixel_bingham_metric.py [-h] [--length_weighting] - [--max_theta MAX_THETA] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] - [-f] - in_bundle in_bingham - in_bingham_metric out_mean_map - -Given a bundle and Bingham coefficients, compute the average Bingham -metric at each voxel intersected by the bundle. Intersected voxels are -found by computing the intersection between the voxel grid and each streamline -in the input tractogram. - -This script behaves like scil_compute_mean_fixel_afd_from_bundles.py for fODFs, -but here for Bingham distributions. These add the unique possibility to capture -fixel-based fiber spread (FS) and fiber fraction (FF). FD from the bingham -should be "equivalent" to the AFD_fixel we are used to. - -Bingham coefficients volume must come from scil_fodf_to_bingham.py -and Bingham metrics comes from scil_bingham_metrics.py. - -Bingham metrics are extracted from Bingham distributions fitted to fODF. There -are as many values per voxel as there are lobes extracted. The values chosen -for a given voxelis the one belonging to the lobe better aligned with the -current streamline segment. - -Please use a bundle file rather than a whole tractogram. - -Formerly: scil_compute_mean_fixel_obe_metric_from_bundles.py - -positional arguments: - in_bundle Path of the bundle file. - in_bingham Path of the Bingham volume. - in_bingham_metric Path of the Bingham metric (FD, FS, or FF) volume. - out_mean_map Path of the output mean map. - -options: - -h, --help show this help message and exit - --length_weighting If set, will weigh the FD values according to segment lengths. - --max_theta MAX_THETA - Maximum angle (in degrees) condition on lobe alignment. [60] - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_bundle_mean_std.py.help b/scripts/.hidden/scil_bundle_mean_std.py.help deleted file mode 100644 index 8a75b6b42..000000000 --- a/scripts/.hidden/scil_bundle_mean_std.py.help +++ /dev/null @@ -1,51 +0,0 @@ -usage: scil_bundle_mean_std.py [-h] [--per_point in_labels | --include_dps] - [--density_weighting] - [--distance_weighting DISTANCE_NII] - [--correlation_weighting CORRELATION_NII] - [--out_json OUT_JSON] [--reference REFERENCE] - [--indent INDENT] [--sort_keys] - [-v [{DEBUG,INFO,WARNING}]] - in_bundle in_metrics [in_metrics ...] - -Compute mean and std for each metric. - -- Default: For the whole bundle. This is achieved by averaging the metric - values of all voxels occupied by the bundle. -- Option --per_point: For all streamlines points in the bundle for each metric - combination, along the bundle, i.e. for each point. - **To create label_map and distance_map, see - scil_bundle_label_map.py - -Density weighting modifies the contribution of voxel with lower/higher -streamline count to reduce influence of spurious streamlines. - -Formerly: scil_compute_bundle_mean_std_per_point.py or -scil_compute_bundle_mean_std.py - -positional arguments: - in_bundle Fiber bundle file to compute statistics on. - in_metrics Nifti file to compute statistics on. Probably some tractometry measure(s) such as FA, MD, RD, ... - -options: - -h, --help show this help message and exit - --per_point in_labels - If set, computes the metrics per point instead of on the whole bundle. - You must then give the label map (.nii.gz) of the corresponding fiber bundle. - --include_dps Save values from data_per_streamline. - Currently not offered with option --per_point. - --density_weighting If set, weights statistics by the number of fibers passing through each voxel. - --distance_weighting DISTANCE_NII - If set, weights statistics by the inverse of the distance between a streamline and the centroid. - --correlation_weighting CORRELATION_NII - If set, weight statistics by the correlation strength between longitudinal data. - --out_json OUT_JSON Path of the output file. If not given, the output is simply printed on screen. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_bundle_pairwise_comparison.py.help b/scripts/.hidden/scil_bundle_pairwise_comparison.py.help deleted file mode 100644 index 8c485f23e..000000000 --- a/scripts/.hidden/scil_bundle_pairwise_comparison.py.help +++ /dev/null @@ -1,57 +0,0 @@ -usage: scil_bundle_pairwise_comparison.py [-h] [--streamline_dice] - [--bundle_adjency_no_overlap] - [--disable_streamline_distance] - [--single_compare SINGLE_COMPARE] - [--keep_tmp] [--ratio] - [--processes NBR] - [--reference REFERENCE] - [--indent INDENT] [--sort_keys] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bundles [in_bundles ...] out_json - -Evaluate pair-wise similarity measures of bundles. -All tractograms must be in the same space (aligned to one reference). - -For the voxel representation, the computed similarity measures are: - bundle_adjacency_voxels, dice_voxels, w_dice_voxels, density_correlation - volume_overlap, volume_overreach -The same measures are also evluated for the endpoints. - -For the streamline representation, the computed similarity measures are: - bundle_adjacency_streamlines, dice_streamlines, streamlines_count_overlap, - streamlines_count_overreach - -Formerly: scil_evaluate_bundles_pairwise_agreement_measures.py - -positional arguments: - in_bundles Path of the input bundles. - out_json Path of the output json file. - -options: - -h, --help show this help message and exit - --streamline_dice Compute streamline-wise dice coefficient. - Tractograms must be identical [False]. - --bundle_adjency_no_overlap - If set, do not count zeros in the average BA. - --disable_streamline_distance - Will not compute the streamlines distance - [False]. - --single_compare SINGLE_COMPARE - Compare inputs to this single file. - --keep_tmp Will not delete the tmp folder at the end. - --ratio Compute overlap and overreach as a ratio over the - reference tractogram in a Tractometer-style way. - Can only be used if also using the `single_compare` option. - --processes NBR Number of sub-processes to start. - Default: [1] - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_bundle_reject_outliers.py.help b/scripts/.hidden/scil_bundle_reject_outliers.py.help deleted file mode 100644 index d624985ae..000000000 --- a/scripts/.hidden/scil_bundle_reject_outliers.py.help +++ /dev/null @@ -1,35 +0,0 @@ -usage: scil_bundle_reject_outliers.py [-h] - [--remaining_bundle REMAINING_BUNDLE] - [--alpha ALPHA] [--display_counts] - [--indent INDENT] [--sort_keys] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bundle out_bundle - -Clean a bundle (inliers/outliers) using hiearchical clustering. -http://archive.ismrm.org/2015/2844.html - -If spurious streamlines are dense, it is possible they will not be recognized -as outliers. Manual cleaning may be required to overcome this limitation. - -positional arguments: - in_bundle Fiber bundle file to remove outliers from. - out_bundle Fiber bundle without outliers. - -options: - -h, --help show this help message and exit - --remaining_bundle REMAINING_BUNDLE - Removed outliers. - --alpha ALPHA Percent of the length of the tree that clusters of individual streamlines will be pruned. [0.6] - --display_counts Print streamline count before and after filtering - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_bundle_score_many_bundles_one_tractogram.py.help b/scripts/.hidden/scil_bundle_score_many_bundles_one_tractogram.py.help deleted file mode 100644 index fd342a4e5..000000000 --- a/scripts/.hidden/scil_bundle_score_many_bundles_one_tractogram.py.help +++ /dev/null @@ -1,110 +0,0 @@ -usage: scil_bundle_score_many_bundles_one_tractogram.py [-h] [--json_prefix p] - [--gt_dir DIR] - [--indent INDENT] - [--sort_keys] - [--reference REFERENCE] - [--no_bbox_check] - [-v [{DEBUG,INFO,WARNING}]] - [-f] - gt_config bundles_dir - -This script is intended to score all bundles from a single tractogram. Each -valid bundle is compared to its ground truth. -Ex: It was used for the ISMRM 2015 Challenge scoring. - -See also scil_bundle_score_same_bundle_many_segmentations.py to score many -versions of a same bundle, compared to ONE ground truth / gold standard. - -This script is the second part of script scil_score_tractogram, which also -segments the wholebrain tractogram into bundles first. - -Here we suppose that the bundles are already segmented and saved as follows: - main_dir/ - segmented_VB/*_VS.trk. - segmented_IB/*_*_IC.trk (optional) - segmented_WPC/*_wpc.trk (optional) - IS.trk OR NC.trk (if segmented_IB is present) - -Config file ------------ -The config file needs to be a json containing a dict of the ground-truth -bundles as keys. The value for each bundle is itself a dictionnary with: - - - gt_mask: expected result. OL and OR metrics will be computed from this.* - -* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will -be created. If it is a nifti file, it will be considered to be a mask. - -Exemple config file: -{ - "Ground_truth_bundle_0": { - "gt_mask": "PATH/bundle0.nii.gz", - } -} - -Formerly: scil_score_bundles.py - -Tractometry ------------ -Global connectivity metrics: - -- Computed by default: - - VS: valid streamlines, belonging to a bundle (i.e. respecting all the - criteria for that bundle; endpoints, limit_mask, gt_mask.). - - IS: invalid streamlines. All other streamlines. IS = IC + NC. - -- Optional: - - WPC: wrong path connections, streamlines connecting correct ROIs but not - respecting the other criteria for that bundle. Such streamlines always - exist but they are only saved separately if specified in the options. - Else, they are merged back with the IS. - By definition. WPC are only computed if "limits masks" are provided. - - IC: invalid connections, streamlines joining an incorrect combination of - ROIs. Use carefully, quality depends on the quality of your ROIs and no - analysis is done on the shape of the streamlines. - - NC: no connections. Invalid streamlines minus invalid connections. - -- Fidelity metrics: - - OL: Overlap. Percentage of ground truth voxels containing streamline(s) - for a given bundle. - - OR: Overreach. Amount of voxels containing streamline(s) when they - shouldn't, for a given bundle. We compute two versions : - OR_pct_vs = divided by the total number of voxel covered by the bundle. - (percentage of the voxels touched by VS). - Values range between 0 and 100%. Values are not defined when we - recovered no streamline for a bundle, but we set the OR_pct_vs to 0 - in that case. - OR_pct_gt = divided by the total size of the ground truth bundle mask. - Values could be higher than 100%. - - f1 score: which is the same as the Dice score. - -positional arguments: - gt_config .json dict configured as specified above. - bundles_dir Directory containing all bundles. - (Ex: Output directory for scil_score_tractogram). - It is expected to contain a file IS.trk and - files segmented_VB/*_VS.trk, with, possibly, files - segmented_WPC/*_wpc.trk and segmented_IC/ - -options: - -h, --help show this help message and exit - --json_prefix p Prefix of the output json file. Ex: 'study_x_'. - Suffix will be results.json. File will be saved inside bundles_dir. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of - tractograms (ignores the presence of invalid streamlines). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Additions to gt_config: - --gt_dir DIR Root path of the ground truth files listed in the gt_config. - If not set, filenames in the config file are considered - as absolute paths. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_bundle_score_same_bundle_many_segmentations.py.help b/scripts/.hidden/scil_bundle_score_same_bundle_many_segmentations.py.help deleted file mode 100644 index dbee99f10..000000000 --- a/scripts/.hidden/scil_bundle_score_same_bundle_many_segmentations.py.help +++ /dev/null @@ -1,62 +0,0 @@ -usage: scil_bundle_score_same_bundle_many_segmentations.py [-h] - [--streamlines_measures GOLD_STANDARD_STREAMLINES TRACTOGRAM] - [--voxels_measures GOLD_STANDARD_MASK TRACKING MASK] - [--processes NBR] - [--reference REFERENCE] - [--indent INDENT] - [--sort_keys] - [-v [{DEBUG,INFO,WARNING}]] - [-f] - in_bundles - [in_bundles ...] - out_json - -This script is intended to score many versions of a same bundle, compared to -ONE ground truth / gold standard. - -See also scil_bundle_score_many_bundles_one_tractogram.py to score all bundles -from a single tractogram by comparing each valid bundle to its ground truth. - -All tractograms must be in the same space (aligned to one reference). -The measures can be applied to a voxel-wise or streamline-wise representation. - -A gold standard must be provided for the desired representation. -A gold standard would be a segmentation from an expert or a group of experts. -If only the streamline-wise representation is provided without a voxel-wise -gold standard, it will be computed from the provided streamlines. -At least one of the two representations is required. - -The gold standard tractogram is the tractogram (whole brain most likely) from -which the segmentation is performed. -The gold standard tracking mask is the tracking mask used by the tractography -algorighm to generate the gold standard tractogram. - -The computed binary classification measures are: -sensitivity, specificity, precision, accuracy, dice, kappa, youden for both -the streamline and voxel representation (if provided). - -Formerly: scil_evaluate_bundles_binary_classification_measures.py - -positional arguments: - in_bundles Path of the input bundles. - out_json Path of the output json. - -options: - -h, --help show this help message and exit - --streamlines_measures GOLD_STANDARD_STREAMLINES TRACTOGRAM - The gold standard bundle and the original tractogram. - --voxels_measures GOLD_STANDARD_MASK TRACKING MASK - The gold standard mask and the original tracking mask. - --processes NBR Number of sub-processes to start. - Default: [1] - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_bundle_shape_measures.py.help b/scripts/.hidden/scil_bundle_shape_measures.py.help deleted file mode 100644 index 8ab8dc13b..000000000 --- a/scripts/.hidden/scil_bundle_shape_measures.py.help +++ /dev/null @@ -1,64 +0,0 @@ -usage: scil_bundle_shape_measures.py [-h] [--out_json OUT_JSON] - [--group_statistics] [--no_uniformize] - [--reference REFERENCE] [--processes NBR] - [--indent INDENT] [--sort_keys] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bundles [in_bundles ...] - -Evaluate basic measurements of bundle(s). - -The computed measures are: - - volume_info: volume, volume_endpoints - - streamlines_info: streamlines_count, avg_length (in mm or in number of - point), average step size, min_length, max_length. - ** You may also get this information with scil_tractogram_print_info.py. - - shape_info: span, curl, diameter, elongation, surface area, - irregularity, end surface area, radius, end surface irregularity, - mean_curvature, fractal dimension. - ** The diameter, here, is a simple estimation using volume / length. - For a more complex calculation, see scil_bundle_diameter.py. - -With more than one bundle, the measures are averaged over bundles. All -tractograms must be in the same space. - -The set average contains the average measures of all input bundles. The -measures that are dependent on the streamline count are weighted by the number -of streamlines of each bundle. Each of these average measure is computed by -first summing the multiple of a measure and the streamline count of each -bundle and divide the sum by the total number of streamlines. Thus, measures -including length and span are essentially averages of all the streamlines. -Other streamline-related set measure are computed with other set averages. -Whereas bundle-related measures are computed as an average of all bundles. -These measures include volume and surface area. - -The fractal dimension is dependent on the voxel size and the number of voxels. -If data comparison is performed, the bundles MUST be in same resolution. - -Formerly: scil_compute_bundle_volume.py or -scil_evaluate_bundles_individual_measures.py - -positional arguments: - in_bundles Path of the input bundles. - -options: - -h, --help show this help message and exit - --out_json OUT_JSON Path of the output file. If not given, the output is simply printed on screen. - --group_statistics Show average measures [False]. - --no_uniformize Do NOT automatically uniformize endpoints for theendpoints related metrics. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - --processes NBR Number of sub-processes to start. - Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. - -References: -[1] Fang-Cheng Yeh. 2020. - Shape analysis of the human association pathways. NeuroImage. diff --git a/scripts/.hidden/scil_bundle_uniformize_endpoints.py.help b/scripts/.hidden/scil_bundle_uniformize_endpoints.py.help deleted file mode 100644 index 75da0ffbe..000000000 --- a/scripts/.hidden/scil_bundle_uniformize_endpoints.py.help +++ /dev/null @@ -1,44 +0,0 @@ -usage: scil_bundle_uniformize_endpoints.py [-h] - (--axis {x,y,z} | --auto | --centroid tractogram | --target_roi TARGET_ROI [TARGET_ROI ...]) - [--swap] [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bundle out_bundle - -Uniformize streamlines' endpoints according to a defined axis. -Useful for tractometry or models creation. - -The --auto option will automatically calculate the main orientation. -If the input bundle is poorly defined, it is possible heuristic will be wrong. - -The default is to flip each streamline so their first point's coordinate in the -defined axis is smaller than their last point (--swap does the opposite). - -The --target_roi option will use the barycenter of the target mask to define -the axis. The target mask can be a binary mask or an atlas. If an atlas is -used, labels are expected in the form of --target_roi atlas.nii.gz 2 3 5:7. - -Formerly: scil_uniformize_streamlines_endpoints.py - -positional arguments: - in_bundle Input path of the tractography file. - out_bundle Output path of the uniformized file. - -options: - -h, --help show this help message and exit - --axis {x,y,z} Match endpoints of the streamlines along this axis. - SUGGESTION: Commissural = x, Association = y, Projection = z - --auto Match endpoints of the streamlines along an automatically determined axis. - --centroid tractogram - Match endpoints of the streamlines to align it to a reference unique streamline (centroid). - --target_roi TARGET_ROI [TARGET_ROI ...] - Provide a target ROI: either a binary mask or a label map and the labels to use. - Will align heads to be closest to the mask barycenter. - (atlas: if no labels are provided, all labels will be used. - --swap Swap head <-> tail convention. Can be useful when the reference is not in RAS. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_bundle_volume_per_label.py.help b/scripts/.hidden/scil_bundle_volume_per_label.py.help deleted file mode 100644 index 6d28443d7..000000000 --- a/scripts/.hidden/scil_bundle_volume_per_label.py.help +++ /dev/null @@ -1,31 +0,0 @@ -usage: scil_bundle_volume_per_label.py [-h] [--indent INDENT] [--sort_keys] - [-v [{DEBUG,INFO,WARNING}]] [-f] - voxel_label_map bundle_name - -Compute bundle volume per label in mm3. This script supports anisotropic voxels -resolution. Volume is estimated by counting the number of voxel occupied by -each label and multiplying it by the volume of a single voxel. - -The labels can be obtained by scil_bundle_label_map.py. - -This estimation is typically performed at resolution around 1mm3. - -To get the volume and other measures directly from the (whole) bundle, use -scil_bundle_shape_measures.py. - -Formerly: scil_compute_bundle_volume_per_label.py - -positional arguments: - voxel_label_map Fiber bundle file. - bundle_name Bundle name. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_connectivity_compare_populations.py.help b/scripts/.hidden/scil_connectivity_compare_populations.py.help deleted file mode 100644 index b95c60a3a..000000000 --- a/scripts/.hidden/scil_connectivity_compare_populations.py.help +++ /dev/null @@ -1,63 +0,0 @@ -usage: scil_connectivity_compare_populations.py [-h] --in_g1 IN_G1 [IN_G1 ...] - --in_g2 IN_G2 [IN_G2 ...] - [--tail {left,right,both}] - [--paired] - [--fdr | --bonferroni] - [--p_threshold THRESH OUT_FILE] - [--filtering_mask FILTERING_MASK] - [-v [{DEBUG,INFO,WARNING}]] - [-f] - out_pval_matrix - -Performs a network-based statistical comparison for populations g1 and g2. The -output is a matrix of the same size as the input connectivity matrices, with -p-values at each edge. -All input matrices must have the same shape (NxN). For paired t-test, both -groups must have the same number of observations. - -For example, if you have streamline count weighted matrices for a MCI and a -control group and you want to investiguate differences in their connectomes: - >>> scil_connectivity_compare_populations.py pval.npy - --g1 MCI/*_sc.npy --g2 CTL/*_sc.npy - ---filtering_mask will simply multiply the binary mask to all input -matrices before performing the statistical comparison. Reduces the number of -statistical tests, useful when using --fdr or --bonferroni. - -Formerly: scil_compare_connectivity.py - -positional arguments: - out_pval_matrix Output matrix (.npy) containing the edges p-value. - -options: - -h, --help show this help message and exit - --in_g1 IN_G1 [IN_G1 ...] - List of matrices for the first population (.npy). - --in_g2 IN_G2 [IN_G2 ...] - List of matrices for the second population (.npy). - --tail {left,right,both} - Enables specification of an alternative hypothesis: - left: mean of g1 < mean of g2, - right: mean of g2 < mean of g1, - both: both means are not equal (default). - --paired Use paired sample t-test instead of population t-test. - --in_g1 and --in_g2 must be ordered the same way. - --fdr Perform a false discovery rate (FDR) correction for the p-values. - Uses the number of non-zero edges as number of tests (value between 0.01 and 0.1). - --bonferroni Perform a Bonferroni correction for the p-values. - Uses the number of non-zero edges as number of tests. - --p_threshold THRESH OUT_FILE - Threshold the final p-value matrix and save the binary matrix (.npy). - --filtering_mask FILTERING_MASK - Binary filtering mask (.npy) to apply before computing the measures. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -[1] Rubinov, Mikail, and Olaf Sporns. "Complex network measures of brain - connectivity: uses and interpretations." Neuroimage 52.3 (2010): - 1059-1069. -[2] Zalesky, Andrew, Alex Fornito, and Edward T. Bullmore. "Network-based - statistic: identifying differences in brain networks." Neuroimage 53.4 - (2010): 1197-1207. diff --git a/scripts/.hidden/scil_connectivity_compute_matrices.py.help b/scripts/.hidden/scil_connectivity_compute_matrices.py.help deleted file mode 100644 index 3b5c0c9c8..000000000 --- a/scripts/.hidden/scil_connectivity_compute_matrices.py.help +++ /dev/null @@ -1,91 +0,0 @@ -usage: scil_connectivity_compute_matrices.py [-h] [--volume OUT_FILE] - [--streamline_count OUT_FILE] - [--length OUT_FILE] - [--similarity IN_FOLDER OUT_FILE] - [--maps IN_FOLDER OUT_FILE] - [--metrics IN_FILE OUT_FILE] - [--lesion_load IN_FILE OUT_DIR] - [--min_lesion_vol MIN_LESION_VOL] - [--density_weighting] - [--no_self_connection] - [--include_dps OUT_DIR] - [--force_labels_list FORCE_LABELS_LIST] - [--processes NBR] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_hdf5 in_labels - -This script computes a variety of measures in the form of connectivity -matrices. This script is made to follow -scil_tractogram_segment_bundles_for_connectivity.py and -uses the same labels list as input. - -The script expects a folder containing all relevants bundles following the -naming convention LABEL1_LABEL2.trk and a text file containing the list of -labels that should be part of the matrices. The ordering of labels in the -matrices will follow the same order as the list. -This script only generates matrices in the form of array, does not visualize -or reorder the labels (node). - -The parameter --similarity expects a folder with density maps -(LABEL1_LABEL2.nii.gz) following the same naming convention as the input -directory. -The bundles should be averaged version in the same space. This will -compute the weighted-dice between each node and their homologuous average -version. - -The parameters --metrics can be used more than once and expect a map (t1, fa, -etc.) in the same space and each will generate a matrix. The average value in -the volume occupied by the bundle will be reported in the matrices nodes. - -The parameters --maps can be used more than once and expect a folder with -pre-computed maps (LABEL1_LABEL2.nii.gz) following the same naming convention -as the input directory. Each will generate a matrix. The average non-zeros -value in the map will be reported in the matrices nodes. - -The parameters --lesion_load will compute 3 lesion(s) related matrices: -lesion_count.npy, lesion_vol.npy, lesion_sc.npy and put it inside of a -specified folder. They represent the number of lesion, the total volume of -lesion(s) and the total of streamlines going through the lesion(s) for of each -connection. Each connection can be seen as a 'bundle' and then something -similar to scil_analyse_lesion_load.py is run for each 'bundle'. - -Formerly: scil_compute_connectivity.py - -positional arguments: - in_hdf5 Input filename for the hdf5 container (.h5). - Obtained from scil_tractogram_segment_bundles_for_connectivity.py. - in_labels Labels file name (nifti). - This generates a NxN connectivity matrix. - -options: - -h, --help show this help message and exit - --volume OUT_FILE Output file for the volume weighted matrix (.npy). - --streamline_count OUT_FILE - Output file for the streamline count weighted matrix (.npy). - --length OUT_FILE Output file for the length weighted matrix (.npy). - --similarity IN_FOLDER OUT_FILE - Input folder containing the averaged bundle density - maps (.nii.gz) and output file for the similarity weighted matrix (.npy). - --maps IN_FOLDER OUT_FILE - Input folder containing pre-computed maps (.nii.gz) - and output file for the weighted matrix (.npy). - --metrics IN_FILE OUT_FILE - Input (.nii.gz). and output file (.npy) for a metric weighted matrix. - --lesion_load IN_FILE OUT_DIR - Input binary mask (.nii.gz) and output directory for all lesion-related matrices. - --min_lesion_vol MIN_LESION_VOL - Minimum lesion volume in mm3 [7]. - --density_weighting Use density-weighting for the metric weightedmatrix. - --no_self_connection Eliminate the diagonal from the matrices. - --include_dps OUT_DIR - Save matrices from data_per_streamline in the output directory. - COMMIT-related values will be summed instead of averaged. - Will always overwrite files. - --force_labels_list FORCE_LABELS_LIST - Path to a labels list (.txt) in case of missing labels in the atlas. - --processes NBR Number of sub-processes to start. - Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_connectivity_compute_pca.py.help b/scripts/.hidden/scil_connectivity_compute_pca.py.help deleted file mode 100644 index 297cfbf43..000000000 --- a/scripts/.hidden/scil_connectivity_compute_pca.py.help +++ /dev/null @@ -1,75 +0,0 @@ -usage: scil_connectivity_compute_pca.py [-h] --metrics METRICS [METRICS ...] - --list_ids FILE [--not_only_common] - [--input_connectoflow] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_folder out_folder - -Script to compute PCA analysis on diffusion metrics. Output returned is all -significant principal components (e.g. presenting eigenvalues > 1) in a -connectivity matrix format. This script can take into account all edges from -every subject in a population or only non-zero edges across all subjects. - -The script can take directly as input a connectoflow output folder. Simply use -the --input_connectoflow flag. For other type of folder input, the script -expects a single folder containing all matrices for all subjects. -Example: - [in_folder] - |--- sub-01_ad.npy - |--- sub-01_md.npy - |--- sub-02_ad.npy - |--- sub-02_md.npy - |--- ... - -The plots, tables and principal components matrices will be outputted in the -designated folder from the argument. If you want to move back your -principal components matrices in your connectoflow output, you can use a -similar bash command for all principal components: -for sub in `cat list_id.txt`; -do - cp out_folder/${sub}_PC1.npy connectoflow_output/$sub/Compute_Connectivity/ -done - -Interpretation of resulting principal components can be done by evaluating the -loadings values for each metrics. A value near 0 means that this metric doesn't -contribute to this specific component whereas high positive or negative values -mean a larger contribution. Components can then be labeled based on which -metric contributes the highest. For example, a principal component showing a -high loading for afd_fixel and near 0 loading for all other metrics can be -interpreted as axonal density (see Gagnon et al. 2022 for this specific example -or ref [3] for an introduction to PCA). - -EXAMPLE USAGE: -scil_connectivity_compute_pca.py input_folder/ output_folder/ - --metrics ad fa md rd [...] --list_ids list_ids.txt - -positional arguments: - in_folder Path to the input folder. See explanation above for its expected organization. - out_folder Path to the output folder to export graphs, tables and principal - components matrices. - -options: - -h, --help show this help message and exit - --metrics METRICS [METRICS ...] - Suffixes of all metrics to include in PCA analysis (ex: ad md fa rd). - They must be immediately followed by the .npy extension. - --list_ids FILE Path to a .txt file containing a list of all ids. - --not_only_common If true, will include all edges from all subjects and not only - common edges (Not recommended) - --input_connectoflow If true, script will assume the input folder is a Connectoflow output. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -[1] Chamberland M, Raven EP, Genc S, Duffy K, Descoteaux M, Parker GD, Tax CMW, - Jones DK. Dimensionality reduction of diffusion MRI measures for improved - tractometry of the human brain. Neuroimage. 2019 Oct 15;200:89-100. - doi: 10.1016/j.neuroimage.2019.06.020. Epub 2019 Jun 20. PMID: 31228638; - PMCID: PMC6711466. -[2] Gagnon A., Grenier G., Bocti C., Gillet V., Lepage J.-F., Baccarelli A. A., - Posner J., Descoteaux M., Takser L. (2022). White matter microstructural - variability linked to differential attentional skills and impulsive behavior - in a pediatric population. Cerebral Cortex. - https://doi.org/10.1093/cercor/bhac180 -[3] https://towardsdatascience.com/what-are-pca-loadings-and-biplots-9a7897f2e559 - diff --git a/scripts/.hidden/scil_connectivity_filter.py.help b/scripts/.hidden/scil_connectivity_filter.py.help deleted file mode 100644 index 3349006e3..000000000 --- a/scripts/.hidden/scil_connectivity_filter.py.help +++ /dev/null @@ -1,56 +0,0 @@ -usage: scil_connectivity_filter.py [-h] [--lower_than [LOWER_THAN ...]] - [--greater_than [GREATER_THAN ...]] - [--keep_condition_count] [--inverse_mask] - [-v [{DEBUG,INFO,WARNING}]] [-f] - out_matrix_mask - -Script to facilitate filtering of connectivity matrices. -The same could be achieved through a complex sequence of -scil_connectivity_math.py. - -Can be used with any connectivity matrix from -scil_connectivity_compute_matrices.py. - -For example, a simple filtering (Jasmeen style) would be: -scil_connectivity_filter.py out_mask.npy - --greater_than */sc.npy 1 0.90 - --lower_than */sim.npy 2 0.90 - --greater_than */len.npy 40 0.90 -v; - -This will result in a binary mask where each node with a value of 1 represents -a node with at least 90% of the population having at least 1 streamline, -90% of the population is similar to the average (2mm) and 90% of the -population having at least 40mm of average streamlines length. - -All operation are stricly > or <, there is no >= or <=. - ---greater_than or --lower_than expect the same convention: - MATRICES_LIST VALUE_THR POPULATION_PERC -It is strongly recommended (but not enforced) that the same number of -connectivity matrices is used for each condition. - -This script performs an intersection of all conditions, meaning that all -conditions must be met in order not to be filtered. -If the user wants to manually handle the requirements, --keep_condition_count -can be used and manually binarized using scil_connectivity_math.py - -Formerly: scil_filter_connectivity.py - -positional arguments: - out_matrix_mask Output mask (matrix) resulting from the provided conditions (.npy). - -options: - -h, --help show this help message and exit - --lower_than [LOWER_THAN ...] - Lower than condition using the VALUE_THR in at least POPULATION_PERC (from MATRICES_LIST). - See description for more details. - --greater_than [GREATER_THAN ...] - Greater than condition using the VALUE_THR in at least POPULATION_PERC (from MATRICES_LIST). - See description for more details. - --keep_condition_count - Report the number of condition(s) that pass/fail rather than a binary mask. - --inverse_mask Inverse the final mask. 0 where all conditions are respected and 1 where at least one fail. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_connectivity_graph_measures.py.help b/scripts/.hidden/scil_connectivity_graph_measures.py.help deleted file mode 100644 index 10cab03e9..000000000 --- a/scripts/.hidden/scil_connectivity_graph_measures.py.help +++ /dev/null @@ -1,63 +0,0 @@ -usage: scil_connectivity_graph_measures.py [-h] - [--filtering_mask FILTERING_MASK] - [--avg_node_wise] [--append_json] - [--small_world] [--indent INDENT] - [--sort_keys] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_conn_matrix in_length_matrix - out_json - -Evaluate graph theory measures from connectivity matrices. -A length weighted and a streamline count weighted matrix are required since -some measures require one or the other. - -This script evaluates the measures one subject at the time. To generate a -population dictionary (similarly to other scil_connectivity_*.py scripts), use -the --append_json option as well as using the same output filename. ->>> for i in hcp/*/; do scil_connectivity_graph_measures.py ${i}/sc_prob.npy - ${i}/len_prob.npy hcp_prob.json --append_json --avg_node_wise; done - -Some measures output one value per node, the default behavior is to list -them all into a list. To obtain only the average use the ---avg_node_wise option. - -The computed connectivity measures are: -centrality, modularity, assortativity, participation, clustering, -nodal_strength, local_efficiency, global_efficiency, density, rich_club, -path_length, edge_count, omega, sigma - -For more details about the measures, please refer to -- https://sites.google.com/site/bctnet/measures -- https://github.com/aestrivex/bctpy/wiki - -This script is under the GNU GPLv3 license, for more detail please refer to -https://www.gnu.org/licenses/gpl-3.0.en.html - -Formerly: scil_evaluate_connectivity_graph_measures.py - -positional arguments: - in_conn_matrix Input connectivity matrix (.npy). - Typically a streamline count weighted matrix. - in_length_matrix Input length weighted matrix (.npy). - out_json Path of the output json. - -options: - -h, --help show this help message and exit - --filtering_mask FILTERING_MASK - Binary filtering mask to apply before computing the measures. - --avg_node_wise Return a single value for node-wise measures. - --append_json If the file already exists, will append to the dictionary. - --small_world Compute measure related to small worldness (omega and sigma). - This option is much slower. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. - -[1] Rubinov, Mikail, and Olaf Sporns. "Complex network measures of brain - connectivity: uses and interpretations." Neuroimage 52.3 (2010): - 1059-1069. diff --git a/scripts/.hidden/scil_connectivity_hdf5_average_density_map.py.help b/scripts/.hidden/scil_connectivity_hdf5_average_density_map.py.help deleted file mode 100644 index 71a633c5a..000000000 --- a/scripts/.hidden/scil_connectivity_hdf5_average_density_map.py.help +++ /dev/null @@ -1,36 +0,0 @@ -usage: scil_connectivity_hdf5_average_density_map.py [-h] [--binary] - [--processes NBR] - [-v [{DEBUG,INFO,WARNING}]] - [-f] - in_hdf5 [in_hdf5 ...] - out_dir - -Compute a density map for each connection from a hdf5 file. -Typically use after scil_tractogram_segment_bundles_for_connectivity.py in -order to obtain the average density map of each connection to allow the use -of --similarity in scil_connectivity_compute_matrices.py. - -This script is parallelized, but will run much slower on non-SSD if too many -processes are used. The output is a directory containing the thousands of -connections: -out_dir/ - |-- LABEL1_LABEL1.nii.gz - |-- LABEL1_LABEL2.nii.gz - |-- [...] - |-- LABEL90_LABEL90.nii.gz - -Formerly: scil_compute_hdf5_average_density_map.py - -positional arguments: - in_hdf5 List of HDF5 filenames (.h5) from scil_tractogram_segment_bundles_for_connectivity.py. - out_dir Path of the output directory. - -options: - -h, --help show this help message and exit - --binary Binarize density maps before the population average. - --processes NBR Number of sub-processes to start. - Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_connectivity_math.py.help b/scripts/.hidden/scil_connectivity_math.py.help deleted file mode 100644 index e4419c2fe..000000000 --- a/scripts/.hidden/scil_connectivity_math.py.help +++ /dev/null @@ -1,150 +0,0 @@ -usage: scil_connectivity_math.py [-h] [--data_type DATA_TYPE] - [--exclude_background] - [-v [{DEBUG,INFO,WARNING}]] [-f] - {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference} - in_matrices [in_matrices ...] out_matrix - -Performs an operation on a list of matrices. The supported operations are -listed below. - -Some operations such as multiplication or addition accept float value as -parameters instead of matrices. -> scil_connectivity_math.py multiplication mat.npy 10 mult_10.npy - - lower_threshold: MAT THRESHOLD - All values below the threshold will be set to zero. - All values above the threshold will be set to one. - - upper_threshold: MAT THRESHOLD - All values below the threshold will be set to one. - All values above the threshold will be set to zero. - Equivalent to lower_threshold followed by an inversion. - - lower_threshold_eq: MAT THRESHOLD - All values below the threshold will be set to zero. - All values above or equal the threshold will be set to one. - - upper_threshold_eq: MAT THRESHOLD - All values below or equal the threshold will be set to one. - All values above the threshold will be set to zero. - Equivalent to lower_threshold followed by an inversion. - - lower_threshold_otsu: MAT - All values below or equal to the Otsu threshold will be set to zero. - All values above the Otsu threshold will be set to one. - (Otsu's method is an algorithm to perform automatic matrix thresholding - of the background.) - - upper_threshold_otsu: MAT - All values below the Otsu threshold will be set to one. - All values above or equal to the Otsu threshold will be set to zero. - Equivalent to lower_threshold_otsu followed by an inversion. - - lower_clip: MAT THRESHOLD - All values below the threshold will be set to threshold. - - upper_clip: MAT THRESHOLD - All values above the threshold will be set to threshold. - - absolute_value: MAT - All negative values will become positive. - - round: MAT - Round all decimal values to the closest integer. - - ceil: MAT - Ceil all decimal values to the next integer. - - floor: MAT - Floor all decimal values to the previous integer. - - normalize_sum: MAT - Normalize the matrix so the sum of all values is one. - - normalize_max: MAT - Normalize the matrix so the maximum value is one. - - log_10: MAT - Apply a log (base 10) to all non zeros values of an matrix. - - log_e: MAT - Apply a natural log to all non zeros values of an matrix. - - convert: MAT - Perform no operation, but simply change the data type. - - invert: MAT - Operation on binary matrix to interchange 0s and 1s in a binary mask. - - addition: MATs - Add multiple matrices together. - - subtraction: MAT_1 MAT_2 - Subtract first matrix by the second (MAT_1 - MAT_2). - - multiplication: MATs - Multiply multiple matrices together (danger of underflow and overflow) - - division: MAT_1 MAT_2 - Divide first matrix by the second (danger of underflow and overflow) - Ignore zeros values, excluded from the operation. - - mean: MATs - Compute the mean of matrices. - If a single 4D matrix is provided, average along the last dimension. - - std: MATs - Compute the standard deviation average of multiple matrices. - If a single 4D matrix is provided, compute the STD along the last - dimension. - - correlation: MATs - Computes the correlation of the 3x3x3 neighborhood of each voxel, for - all pair of input matrices. The final matrix is the average correlation - (through all pairs). - For a given pair of matrices - - Background is considered as 0. May lead to very high correlations - close to the border of the background regions, or very poor ones if the - background in both matrices differ. - - Images are zero-padded. For the same reason as higher, may lead to - very high correlations if you have data close to the border of the - matrix. - - NaN values (if a voxel's neighborhood is entirely uniform; std 0) are - replaced by - - 0 if at least one neighborhood was entirely containing background. - - 1 if the voxel's neighborhoods are uniform in both matrices - - 0 if the voxel's neighborhoods is uniform in one matrix, but not - the other. - - UPDATE AS OF VERSION 2.0: Random noise was previously added in the - process to help avoid NaN values. Now replaced by either 0 or 1 as - explained above. - - union: MATs - Operation on binary matrix to keep voxels, that are non-zero, in at - least one file. - - intersection: MATs - Operation on binary matrix to keep the voxels, that are non-zero, - are present in all files. - - difference: MAT_1 MAT_2 - Operation on binary matrix to keep voxels from the first file that are - not in the second file (non-zeros). - - -positional arguments: - {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference} - The type of operation to be performed on the matrices. - in_matrices The list of matrices files or parameters. - out_matrix Output matrix path. - -options: - -h, --help show this help message and exit - --data_type DATA_TYPE - Data type of the output image. Use the format: uint8, float16, int32. - --exclude_background Does not affect the background of the original matrices. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_connectivity_normalize.py.help b/scripts/.hidden/scil_connectivity_normalize.py.help deleted file mode 100644 index 9731dcb81..000000000 --- a/scripts/.hidden/scil_connectivity_normalize.py.help +++ /dev/null @@ -1,76 +0,0 @@ -usage: scil_connectivity_normalize.py [-h] - [--length LENGTH_MATRIX | --inverse_length LENGTH_MATRIX] - [--bundle_volume VOLUME_MATRIX] - [--parcel_volume ATLAS LABELS_LIST | --parcel_surface ATLAS LABELS_LIST] - [--max_at_one | --sum_to_one | --log_10] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_matrix out_matrix - -Normalize a connectivity matrix coming from -scil_tractogram_segment_bundles_for_connectivity.py. -3 categories of normalization are available: --- Edge attributes - - length: Multiply each edge by the average bundle length. - Compensate for far away connections when using interface seeding. - Cannot be used with inverse_length. - - - inverse_length: Divide each edge by the average bundle length. - Compensate for big connections when using white matter seeding. - Cannot be used with length. - - - bundle_volume: Divide each edge by the average bundle length. - Compensate for big connections when using white matter seeding. - --- Node attributes (Mutually exclusive) - - parcel_volume: Divide each edge by the sum of node volume. - Compensate for the likelihood of ending in the node. - Compensate seeding bias when using interface seeding. - - - parcel_surface: Divide each edge by the sum of the node surface. - Compensate for the likelihood of ending in the node. - Compensate for seeding bias when using interface seeding. - --- Matrix scaling (Mutually exclusive) - - max_at_one: Maximum value of the matrix will be set to one. - - sum_to_one: Ensure the sum of all edges weight is one - - log_10: Apply a base 10 logarithm to all edges weight - -The volume and length matrix should come from the -scil_tractogram_segment_bundles_for_connectivity.py script. - -A review of the type of normalization is available in: -Colon-Perez, Luis M., et al. "Dimensionless, scale-invariant, edge weight -metric for the study of complex structural networks." PLOS one 10.7 (2015). - -However, the proposed weighting of edge presented in this publication is not -implemented. - -Formerly: scil_normalize_connectivity.py - -positional arguments: - in_matrix Input connectivity matrix. This is typically a streamline_count matrix (.npy). - out_matrix Output normalized matrix (.npy). - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Edge-wise options: - --length LENGTH_MATRIX - Length matrix used for edge-wise multiplication. - --inverse_length LENGTH_MATRIX - Length matrix used for edge-wise division. - --bundle_volume VOLUME_MATRIX - Volume matrix used for edge-wise division. - --parcel_volume ATLAS LABELS_LIST - Atlas and labels list for edge-wise division. - --parcel_surface ATLAS LABELS_LIST - Atlas and labels list for edge-wise division. - -Scaling options: - --max_at_one Scale matrix with maximum value at one. - --sum_to_one Scale matrix with sum of all elements at one. - --log_10 Apply a base 10 logarithm to the matrix. diff --git a/scripts/.hidden/scil_connectivity_pairwise_agreement.py.help b/scripts/.hidden/scil_connectivity_pairwise_agreement.py.help deleted file mode 100644 index 182cf6a11..000000000 --- a/scripts/.hidden/scil_connectivity_pairwise_agreement.py.help +++ /dev/null @@ -1,33 +0,0 @@ -usage: scil_connectivity_pairwise_agreement.py [-h] [--single_compare matrix] - [--normalize] [--indent INDENT] - [--sort_keys] - [-v [{DEBUG,INFO,WARNING}]] - [-f] - in_matrices [in_matrices ...] - out_json - -Evaluate pair-wise similarity measures of connectivity matrix. - -The computed similarity measures are: -sum of square difference and pearson correlation coefficent - -Formerly: scil_evaluate_connectivity_pairwaise_agreement_measures.py - -positional arguments: - in_matrices Path of the input matricies. - out_json Path of the output json file. - -options: - -h, --help show this help message and exit - --single_compare matrix - Compare inputs to this single file. - (Else, compute all pairs in in_matrices). - --normalize If set, will normalize all matrices from zero to one. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_connectivity_print_filenames.py.help b/scripts/.hidden/scil_connectivity_print_filenames.py.help deleted file mode 100644 index 575fa9fec..000000000 --- a/scripts/.hidden/scil_connectivity_print_filenames.py.help +++ /dev/null @@ -1,32 +0,0 @@ -usage: scil_connectivity_print_filenames.py [-h] [-v [{DEBUG,INFO,WARNING}]] - [-f] - in_matrix labels_list out_txt - -Output the list of filenames using the coordinates from a binary connectivity -matrix. Typically used to move around files that are considered valid after -the scil_connectivity_filter.py script. - -Example: -# Keep connections with more than 1000 streamlines for 100% of a population -scil_connectivity_filter.py filtering_mask.npy - --greater_than */streamlines_count.npy 1000 1.0 -scil_connectivity_print_filenames.py filtering_mask.npy - labels_list.txt pass.txt -for file in $(cat pass.txt); - do mv ${SOMEWHERE}/${FILE} ${SOMEWHERE_ELSE}/; -done - -Formerly: scil_print_connectivity_filenames.py - -positional arguments: - in_matrix Binary matrix in numpy (.npy) format. - Typically from scil_connectivity_filter.py - labels_list List saved by the decomposition script. - out_txt Output text file containing all filenames. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_connectivity_reorder_rois.py.help b/scripts/.hidden/scil_connectivity_reorder_rois.py.help deleted file mode 100644 index ad23d8da1..000000000 --- a/scripts/.hidden/scil_connectivity_reorder_rois.py.help +++ /dev/null @@ -1,51 +0,0 @@ -usage: scil_connectivity_reorder_rois.py [-h] - (--in_ordering IN_ORDERING | --optimal_leaf_ordering OUT_FILE) - [--out_suffix OUT_SUFFIX] - [--out_dir OUT_DIR] - [--labels_list LABELS_LIST] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_matrices [in_matrices ...] - -Re-order one or many connectivity matrices using a text file format. -The first row are the (x) and the second row the (y), must be space separated. -The resulting matrix does not have to be square (support unequal number of -x and y). - -The values refer to the coordinates (starting at 0) in the matrix, but if the ---labels_list parameter is used, the values will refer to the label which will -be converted to the appropriate coordinates. This file must be the same as the -one provided to the scil_tractogram_segment_bundles_for_connectivity.py. - -To subsequently use scil_visualize_connectivity.py with a lookup table, you -must use a label-based reording json and use --labels_list. - -You can also use the Optimal Leaf Ordering(OLO) algorithm to transform a -sparse matrix into an ordering that reduces the matrix bandwidth. The output -file can then be re-used with --in_ordering. Only one input can be used with -this option, we recommand an average streamline count or volume matrix. - -Formerly: scil_reorder_connectivity.py - -positional arguments: - in_matrices Connectivity matrices in .npy or .txt format. - -options: - -h, --help show this help message and exit - --in_ordering IN_ORDERING - Txt file with the first row as x and second as y. - --optimal_leaf_ordering OUT_FILE - Output a text file with an ordering that alignsstructures along the diagonal. - --out_suffix OUT_SUFFIX - Suffix for the output matrix filename. - --out_dir OUT_DIR Output directory for the re-ordered matrices. - --labels_list LABELS_LIST - List saved by the decomposition script, - --in_ordering must contain labels rather than coordinates (.txt). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -[1] Rubinov, Mikail, and Olaf Sporns. "Complex network measures of brain - connectivity: uses and interpretations." Neuroimage 52.3 (2010): - 1059-1069. diff --git a/scripts/.hidden/scil_denoising_nlmeans.py.help b/scripts/.hidden/scil_denoising_nlmeans.py.help deleted file mode 100644 index 69680f495..000000000 --- a/scripts/.hidden/scil_denoising_nlmeans.py.help +++ /dev/null @@ -1,28 +0,0 @@ -usage: scil_denoising_nlmeans.py [-h] [--mask] [--sigma float] [--log LOGFILE] - [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] - [-f] - in_image out_image number_coils - -Script to denoise a dataset with the Non Local Means algorithm. - -Formerly: scil_run_nlmeans.py - -positional arguments: - in_image Path of the image file to denoise. - out_image Path to save the denoised image file. - number_coils Number of receiver coils of the scanner. - Use number_coils=1 in the case of a SENSE (GE, Philips) reconstruction and - number_coils >= 1 for GRAPPA reconstruction (Siemens). number_coils=4 works well for the 1.5T - in Sherbrooke. Use number_coils=0 if the noise is considered Gaussian distributed. - -options: - -h, --help show this help message and exit - --mask Path to a binary mask. Only the data inside the mask will be used for computations - --sigma float The standard deviation of the noise to use instead of computing it automatically. - --log LOGFILE If supplied, name of the text file to store the logs. - --processes NBR Number of sub-processes to start. - Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dki_metrics.py.help b/scripts/.hidden/scil_dki_metrics.py.help deleted file mode 100644 index e831c70ce..000000000 --- a/scripts/.hidden/scil_dki_metrics.py.help +++ /dev/null @@ -1,105 +0,0 @@ -usage: scil_dki_metrics.py [-h] [--mask MASK] [--tolerance tol] - [--skip_b0_check] [--min_k MIN_K] [--max_k MAX_K] - [--smooth SMOOTH] [--not_all] [--ak file] - [--mk file] [--rk file] [--msk file] - [--dki_fa file] [--dki_md file] [--dki_ad file] - [--dki_rd file] [--dki_residual file] [--msd file] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bval in_bvec - -Script to compute the Diffusion Kurtosis Imaging (DKI) and Mean Signal DKI -(MSDKI) metrics. DKI is a multi-shell diffusion model. The input DWI needs -to be multi-shell, i.e. multi-bvalued. - -Since the diffusion kurtosis model involves the estimation of a large number -of parameters and since the non-Gaussian components of the diffusion signal -are more sensitive to artefacts, you should really denoise your DWI volume -before using this DKI script (e.g. scil_denoising_nlmeans.py). Moreover, to -remove biases due to fiber dispersion, fiber crossings and other mesoscopic -properties of the underlying tissue, MSDKI does a powder-average of DWI for all -directions, thus removing the orientational dependencies and creating an -alternative mean kurtosis map. - -DKI is also known to be vulnerable to artefacted voxels induced by the -low radial diffusivities of aligned white matter (CC, CST voxels). Since it is -very hard to capture non-Gaussian information due to the low decays in radial -direction, its kurtosis estimates have very low robustness. -Noisy kurtosis estimates tend to be negative and its absolute values can have -order of magnitudes higher than the typical kurtosis values. Consequently, -these negative kurtosis values will heavily propagate to the mean and radial -kurtosis metrics. This is well-reported in [Rafael Henriques MSc thesis 2012, -chapter 3]. Two ways to overcome this issue: i) compute the kurtosis values -from powder-averaged MSDKI, and ii) perform 3D Gaussian smoothing. On -powder-averaged signal decays, you don't have this low diffusivity issue and -your kurtosis estimates have much higher precision (additionally they are -independent to the fODF). - -By default, will output all available metrics, using default names. Specific -names can be specified using the metrics flags that are listed in the "Metrics -files flags" section. If --not_all is set, only the metrics specified -explicitly by the flags will be output. - -This script directly comes from the DIPY example gallery and references -therein. -[1] examples_built/reconst_dki/#example-reconst-dki -[2] examples_built/reconst_msdki/#example-reconst-msdki - -Formerly: scil_compute_kurtosis_metrics.py - -positional arguments: - in_dwi Path of the input multi-shell DWI dataset. - in_bval Path of the b-value file, in FSL format. - in_bvec Path of the b-vector file, in FSL format. - -options: - -h, --help show this help message and exit - --mask MASK Path to a binary mask. - Only data inside the mask will be used for computations and reconstruction. - [Default: None] - --tolerance tol The tolerated gap between the b-values to extract and the current b-value. - [Default: 20] - * Note. We would expect to find at least one b-value in the - range [0, tolerance]. To skip this check, use --skip_b0_check. - --skip_b0_check By default, we supervise that at least one b0 exists in your data - (i.e. b-values below the default --tolerance). Use this option to - allow continuing even if the minimum b-value is suspiciously high. - Use with care, and only if you understand your data. - --min_k MIN_K Minimum kurtosis value in the output maps - (ak, mk, rk). In theory, -3/7 is the min kurtosis - limit for regions that consist of water confined - to spherical pores (see DIPY example and - documentation) [Default: 0.0]. - --max_k MAX_K Maximum kurtosis value in the output maps - (ak, mk, rk). In theory, 10 is the max kurtosis - limit for regions that consist of water confined - to spherical pores (see DIPY example and - documentation) [Default: 3.0]. - --smooth SMOOTH Smooth input DWI with a 3D Gaussian filter with - full-width-half-max (fwhm). Kurtosis fitting is - sensitive and outliers occur easily. According to - tests on HCP, CB_Brain, Penthera3T, this smoothing - is thus turned ON by default with fwhm=2.5. - [Default: 2.5]. - --not_all If set, will only save the metrics explicitly - specified using the other metrics flags. - [Default: not set]. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Metrics files flags: - --ak file Output filename for the axial kurtosis. - --mk file Output filename for the mean kurtosis. - --rk file Output filename for the radial kurtosis. - --msk file Output filename for the mean signal kurtosis. - --dki_fa file Output filename for the fractional anisotropy from DKI. - --dki_md file Output filename for the mean diffusivity from DKI. - --dki_ad file Output filename for the axial diffusivity from DKI. - --dki_rd file Output filename for the radial diffusivity from DKI. - -Quality control files flags: - --dki_residual file Output filename for the map of the residual of the tensor fit. - Note. In previous versions, the resulting map was normalized. - It is not anymore. - --msd file Output filename for the mean signal diffusion (powder-average). diff --git a/scripts/.hidden/scil_dti_convert_tensors.py.help b/scripts/.hidden/scil_dti_convert_tensors.py.help deleted file mode 100644 index fff01c191..000000000 --- a/scripts/.hidden/scil_dti_convert_tensors.py.help +++ /dev/null @@ -1,37 +0,0 @@ -usage: scil_dti_convert_tensors.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] - in_file out_file in_format out_format - -Conversion of tensors (the 6 values from the triangular matrix) between various -software standards. We cannot discover the input format type, user must know -how the tensors were created. - - Dipy's order is [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz] - Shape: [i, j , k, 6]. - Ref: https://github.com/dipy/dipy/blob/master/dipy/reconst/dti.py#L1639 - - MRTRIX's order is : [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz] - Shape: [i, j , k, 6]. - Ref: https://mrtrix.readthedocs.io/en/dev/reference/commands/dwi2tensor.html - - ANTS's order ('nifti format') is : [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]. - Shape: [i, j , k, 1, 6] (Careful, file is 5D). - Ref: https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software - - FSL's order is [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz] - Shape: [i, j , k, 6]. - Ref: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide - (Also used for the Fibernavigator) - - -positional arguments: - in_file Input tensors filename. - out_file Output tensors filename. - in_format Input format. Choices: ['fsl', 'nifti', 'mrtrix', 'dipy'] - out_format Output format. Choices: ['fsl', 'nifti', 'mrtrix', 'dipy'] - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dti_metrics.py.help b/scripts/.hidden/scil_dti_metrics.py.help deleted file mode 100644 index 098e0891d..000000000 --- a/scripts/.hidden/scil_dti_metrics.py.help +++ /dev/null @@ -1,101 +0,0 @@ -usage: scil_dti_metrics.py [-h] [-f] [--mask MASK] [--method method_name] - [--not_all] [--ad file] [--evecs file] - [--evals file] [--fa file] [--ga file] [--md file] - [--mode file] [--norm file] [--rgb file] - [--rd file] [--tensor file] - [--tensor_format {fsl,nifti,mrtrix,dipy}] - [--non-physical file] [--pulsation string] - [--residual file] [--b0_threshold thr] - [--skip_b0_check] [-v [{DEBUG,INFO,WARNING}]] - in_dwi in_bval in_bvec - -Script to compute all of the Diffusion Tensor Imaging (DTI) metrics. - -By default, will output all available metrics, using default names. Specific -names can be specified using the metrics flags that are listed in the "Metrics -files flags" section. - -If --not_all is set, only the metrics specified explicitly by the flags -will be output. The available metrics are: - -fractional anisotropy (FA), geodesic anisotropy (GA), axial diffusivisty (AD), -radial diffusivity (RD), mean diffusivity (MD), mode, red-green-blue colored -FA (rgb), principal tensor e-vector and tensor coefficients (dxx, dxy, dxz, -dyy, dyz, dzz). - -For all the quality control metrics such as residual, physically implausible -signals, pulsation and misalignment artifacts, see -[J-D Tournier, S. Mori, A. Leemans. Diffusion Tensor Imaging and Beyond. -MRM 2011]. - -Formerly: scil_compute_dti_metrics.py - -positional arguments: - in_dwi Path of the input diffusion volume. - in_bval Path of the bvals file, in FSL format. - in_bvec Path of the bvecs file, in FSL format. - -options: - -h, --help show this help message and exit - -f Force overwriting of the output files. - --mask MASK Path to a binary mask. - Only data inside the mask will be used for computations and reconstruction. (Default: None) - --method method_name Tensor fit method. - WLS for weighted least squares - LS for ordinary least squares - NLLS for non-linear least-squares - restore for RESTORE robust tensor fitting. (Default: WLS) - --not_all If set, will only save the metrics explicitly specified using the other metrics flags. (Default: not set). - --b0_threshold thr Threshold under which b-values are considered to be b0s. - [Default: 20] - * Note. We would expect to find at least one b-value in the - range [0, b0_threshold]. To skip this check, use --skip_b0_check. - --skip_b0_check By default, we supervise that at least one b0 exists in your data - (i.e. b-values below the default --b0_threshold). Use this option to - allow continuing even if the minimum b-value is suspiciously high. - If no b-value is found below the threshold, the script will continue - with your minimal b-value as new --b0_threshold. - Use with care, and only if you understand your data. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -Metrics files flags: - --ad file Output filename for the axial diffusivity. - --evecs file Output filename for the eigenvectors of the tensor. - --evals file Output filename for the eigenvalues of the tensor. - --fa file Output filename for the fractional anisotropy. - --ga file Output filename for the geodesic anisotropy. - --md file Output filename for the mean diffusivity. - --mode file Output filename for the mode. - --norm file Output filename for the tensor norm. - --rgb file Output filename for the colored fractional anisotropy. - --rd file Output filename for the radial diffusivity. - --tensor file Output filename for the tensor coefficients. - --tensor_format {fsl,nifti,mrtrix,dipy} - Format used for the tensors saved in --tensor file.(default: fsl) - - Dipy's order is [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz] - Shape: [i, j , k, 6]. - Ref: https://github.com/dipy/dipy/blob/master/dipy/reconst/dti.py#L1639 - - MRTRIX's order is : [Dxx, Dyy, Dzz, Dxy, Dxz, Dyz] - Shape: [i, j , k, 6]. - Ref: https://mrtrix.readthedocs.io/en/dev/reference/commands/dwi2tensor.html - - ANTS's order ('nifti format') is : [Dxx, Dxy, Dyy, Dxz, Dyz, Dzz]. - Shape: [i, j , k, 1, 6] (Careful, file is 5D). - Ref: https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software - - FSL's order is [Dxx, Dxy, Dxz, Dyy, Dyz, Dzz] - Shape: [i, j , k, 6]. - Ref: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide - (Also used for the Fibernavigator) - - -Quality control files flags: - --non-physical file Output filename for the voxels with physically implausible signals - where the mean of b=0 images is below one or more diffusion-weighted images. - --pulsation string Standard deviation map across all diffusion-weighted images and across b=0 images if more than one is available. - Shows pulsation and misalignment artifacts. - --residual file Output filename for the map of the residual of the tensor fit. diff --git a/scripts/.hidden/scil_dwi_apply_bias_field.py.help b/scripts/.hidden/scil_dwi_apply_bias_field.py.help deleted file mode 100644 index 777f5a146..000000000 --- a/scripts/.hidden/scil_dwi_apply_bias_field.py.help +++ /dev/null @@ -1,24 +0,0 @@ -usage: scil_dwi_apply_bias_field.py [-h] [--mask MASK] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bias_field out_name - -Apply bias field correction to DWI. This script doesn't compute the bias -field itself. It ONLY applies an existing bias field. Please use the ANTs -N4BiasFieldCorrection executable to compute the bias field. - -Formerly: scil_apply_bias_field_on_dwi.py - -positional arguments: - in_dwi DWI Nifti image. - in_bias_field Bias field Nifti image. - out_name Corrected DWI Nifti image. - -options: - -h, --help show this help message and exit - --mask MASK Apply bias field correction only in the region defined by the mask. - If this is not given, the bias field is still only applied only in non-background data - (i.e. where the dwi is not 0). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_compute_snr.py.help b/scripts/.hidden/scil_dwi_compute_snr.py.help deleted file mode 100644 index be4e1a6fb..000000000 --- a/scripts/.hidden/scil_dwi_compute_snr.py.help +++ /dev/null @@ -1,59 +0,0 @@ -usage: scil_dwi_compute_snr.py [-h] - [--noise_mask NOISE_MASK | --noise_map NOISE_MAP] - [--b0_thr B0_THR] [--out_basename OUT_BASENAME] - [--split_shells] [--indent INDENT] - [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bval in_bvec in_mask - -Script to compute signal to noise ratio (SNR) in a region of interest (ROI) -of a DWI volume. - -It will compute the SNR for all DWI volumes of the input image seperately. -The output will contain the SNR which is the ratio of -mean(signal) / std(noise). -The mean of the signal is computed inside the mask. -The standard deviation of the noise is estimated inside the noise_mask -or inside the same mask if a noise_map is provided. -If it's not supplied, it will be estimated using the data outside the brain, -computed with Dipy medotsu - -If verbose is True, the SNR for every DWI volume will be output. - -This works best in a well-defined ROI such as the corpus callosum. -It is heavily dependent on the ROI and its quality. - -We highly recommend using a noise_map if you can acquire one. -See refs [1, 2] that describe the noise map acquisition. -[1] St-Jean, et al (2016). Non Local Spatial and Angular Matching... - https://doi.org/10.1016/j.media.2016.02.010 -[2] Reymbaut, et al (2021). Magic DIAMOND... - https://doi.org/10.1016/j.media.2021.101988 - -Formerly: scil_snr_in_roi.py - -positional arguments: - in_dwi Path of the input diffusion volume. - in_bval Path of the bvals file, in FSL format. - in_bvec Path of the bvecs file, in FSL format. - in_mask Binary mask of the region used to estimate SNR. - -options: - -h, --help show this help message and exit - --b0_thr B0_THR All b-values with values less than or equal to b0_thr are considered as b0s i.e. without diffusion weighting. [0.0] - --out_basename OUT_BASENAME - Path and prefix for the various saved file. - --split_shells SNR will be split into shells. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Masks options: - --noise_mask NOISE_MASK - Binary mask used to estimate the noise from the DWI. - --noise_map NOISE_MAP - Noise map. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_dwi_concatenate.py.help b/scripts/.hidden/scil_dwi_concatenate.py.help deleted file mode 100644 index a63cde9d5..000000000 --- a/scripts/.hidden/scil_dwi_concatenate.py.help +++ /dev/null @@ -1,31 +0,0 @@ -usage: scil_dwi_concatenate.py [-h] [--in_dwis IN_DWIS [IN_DWIS ...]] - [--in_bvals IN_BVALS [IN_BVALS ...]] - [--in_bvecs IN_BVECS [IN_BVECS ...]] - [--data_type DATA_TYPE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - out_dwi out_bval out_bvec - -Concatenate DWI, bval and bvecs together. File must be specified in matching -order. Default data type will be the same as the first input DWI. - -Formerly: scil_concatenate_dwi.py - -positional arguments: - out_dwi The name of the output DWI file. - out_bval The name of the output b-values file (.bval). - out_bvec The name of the output b-vectors file (.bvec). - -options: - -h, --help show this help message and exit - --in_dwis IN_DWIS [IN_DWIS ...] - The DWI file (.nii) to concatenate. - --in_bvals IN_BVALS [IN_BVALS ...] - The b-values files in FSL format (.bval). - --in_bvecs IN_BVECS [IN_BVECS ...] - The b-vectors files in FSL format (.bvec). - --data_type DATA_TYPE - Data type of the output image. Use the format: uint8, int16, int/float32, int/float64. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_convert_FDF.py.help b/scripts/.hidden/scil_dwi_convert_FDF.py.help deleted file mode 100644 index 19e401845..000000000 --- a/scripts/.hidden/scil_dwi_convert_FDF.py.help +++ /dev/null @@ -1,31 +0,0 @@ -usage: scil_dwi_convert_FDF.py [-h] [--bval BVAL] [--bvec BVEC] - [--flip dimension [dimension ...]] - [--swap dimension [dimension ...]] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_b0_path in_dwi_path out_path - -Converts a Varian FDF file or directory to a nifti file. -If the procpar contains diffusion information, it will be saved as bval and -bvec in the same folder as the output file. - -ex: scil_dwi_convert_FDF.py semsdw/b0_folder/ semsdw/dwi_folder/ dwi.nii.gz --bval dwi.bval --bvec dwi.bvec -f - -Formerly: scil_convert_fdf.py - -positional arguments: - in_b0_path Path to the b0 FDF file or folder to convert. - in_dwi_path Path to the DWI FDF file or folder to convert. - out_path Path to the nifti file to write on disk. - -options: - -h, --help show this help message and exit - --bval BVAL Path to the bval file to write on disk. - --bvec BVEC Path to the bvec file to write on disk. - --flip dimension [dimension ...] - The axes you want to flip. eg: to flip the x and y axes use: x y. [None] - --swap dimension [dimension ...] - The axes you want to swap. eg: to swap the x and y axes use: x y. [None] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_detect_volume_outliers.py.help b/scripts/.hidden/scil_dwi_detect_volume_outliers.py.help deleted file mode 100644 index 8e55b96e5..000000000 --- a/scripts/.hidden/scil_dwi_detect_volume_outliers.py.help +++ /dev/null @@ -1,39 +0,0 @@ -usage: scil_dwi_detect_volume_outliers.py [-h] [--std_scale STD_SCALE] - [--b0_threshold thr] - [--skip_b0_check] - [-v [{DEBUG,INFO,WARNING}]] - in_dwi in_bval in_bvec - -This script simply finds the 3 closest angular neighbors of each direction -(per shell) and compute the voxel-wise correlation. -If the angles or correlations to neighbors are below the shell average (by -args.std_scale x STD) it will flag the volume as a potential outlier. - -This script supports multi-shells, but each shell is independant and detected -using the --b0_threshold parameter. - -This script can be run before any processing to identify potential problem -before launching pre-processing. - -positional arguments: - in_dwi The DWI file (.nii) to concatenate. - in_bval The b-values files in FSL format (.bval). - in_bvec The b-vectors files in FSL format (.bvec). - -options: - -h, --help show this help message and exit - --std_scale STD_SCALE - How many deviation from the mean are required to be considered an outlier. [2.0] - --b0_threshold thr Threshold under which b-values are considered to be b0s. - [Default: 20] - * Note. We would expect to find at least one b-value in the - range [0, b0_threshold]. To skip this check, use --skip_b0_check. - --skip_b0_check By default, we supervise that at least one b0 exists in your data - (i.e. b-values below the default --b0_threshold). Use this option to - allow continuing even if the minimum b-value is suspiciously high. - If no b-value is found below the threshold, the script will continue - with your minimal b-value as new --b0_threshold. - Use with care, and only if you understand your data. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_dwi_extract_b0.py.help b/scripts/.hidden/scil_dwi_extract_b0.py.help deleted file mode 100644 index 35b655d14..000000000 --- a/scripts/.hidden/scil_dwi_extract_b0.py.help +++ /dev/null @@ -1,46 +0,0 @@ -usage: scil_dwi_extract_b0.py [-h] - [--all | --mean | --cluster-mean | --cluster-first] - [--block-size INT] [--single-image] - [--b0_threshold thr] [--skip_b0_check] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bval in_bvec out_b0 - -Extract B0s from DWI, based on the bval and bvec information. - -The default behavior is to save the first b0 of the series. - -Formerly: scil_extract_b0.py - -positional arguments: - in_dwi DWI Nifti image. - in_bval b-values filename, in FSL format (.bval). - in_bvec b-values filename, in FSL format (.bvec). - out_b0 Output b0 file(s). - -options: - -h, --help show this help message and exit - --block-size INT, -s INT - Load the data using this block size. Useful - when the data is too large to be loaded in memory. - --single-image If output b0 volume has multiple time points, only outputs a single - image instead of a numbered series of images. - --b0_threshold thr Threshold under which b-values are considered to be b0s. - [Default: 20] - * Note. We would expect to find at least one b-value in the - range [0, b0_threshold]. To skip this check, use --skip_b0_check. - --skip_b0_check By default, we supervise that at least one b0 exists in your data - (i.e. b-values below the default --b0_threshold). Use this option to - allow continuing even if the minimum b-value is suspiciously high. - If no b-value is found below the threshold, the script will continue - with your minimal b-value as new --b0_threshold. - Use with care, and only if you understand your data. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Options in the case of multiple b0s.: - --all Extract all b0s. Index number will be appended to the output file. - --mean Extract mean b0. - --cluster-mean Extract mean of each continuous cluster of b0s. - --cluster-first Extract first b0 of each continuous cluster of b0s. diff --git a/scripts/.hidden/scil_dwi_extract_shell.py.help b/scripts/.hidden/scil_dwi_extract_shell.py.help deleted file mode 100644 index 25edd5c89..000000000 --- a/scripts/.hidden/scil_dwi_extract_shell.py.help +++ /dev/null @@ -1,45 +0,0 @@ -usage: scil_dwi_extract_shell.py [-h] [--out_indices OUT_INDICES] - [--block-size INT] [--tolerance INT] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bval in_bvec in_bvals_to_extract - [in_bvals_to_extract ...] out_dwi out_bval - out_bvec - -Extracts the DWI volumes that are on specific b-value shells. Many shells -can be extracted at once by specifying multiple b-values. The extracted -volumes are in the same order as in the original file. - -If the b-values of a shell are not all identical, use the --tolerance -argument to adjust the accepted interval. For example, a b-value of 2000 -and a tolerance of 20 will extract all volumes with a b-values from 1980 to -2020. - -Files that are too large to be loaded in memory can still be processed by -setting the --block-size argument. A block size of X means that X DWI volumes -are loaded at a time for processing. - -Formerly: scil_extract_dwi_shell.py - -positional arguments: - in_dwi The DW image file to split. - in_bval The b-values file in FSL format (.bval). - in_bvec The b-vectors file in FSL format (.bvec). - in_bvals_to_extract The list of b-values to extract. For example 0 2000. - out_dwi The name of the output DWI file. - out_bval The name of the output b-value file (.bval). - out_bvec The name of the output b-vector file (.bvec). - -options: - -h, --help show this help message and exit - --out_indices OUT_INDICES - Optional filename for valid indices in input dwi volume - --block-size INT, -s INT - Loads the data using this block size. Useful - when the data is too large to be loaded in memory. - --tolerance INT, -t INT - The tolerated gap between the b-values to extract - and the actual b-values. [20] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_powder_average.py.help b/scripts/.hidden/scil_dwi_powder_average.py.help deleted file mode 100644 index 213159c11..000000000 --- a/scripts/.hidden/scil_dwi_powder_average.py.help +++ /dev/null @@ -1,40 +0,0 @@ -usage: scil_dwi_powder_average.py [-h] [-f] [--mask file] [--b0_thr B0_THR] - [--shells SHELLS [SHELLS ...]] - [--shell_thr SHELL_THR] - [-v [{DEBUG,INFO,WARNING}]] - in_dwi in_bval out_avg - -Script to compute powder average (mean diffusion weighted image) from set of -diffusion images. - -By default will output an average image calculated from all images with -non-zero bvalue. - -Specify --bvalue to output an image for a single shell - -Script currently does not take into account the diffusion gradient directions -being averaged. - -Formerly: scil_compute_powder_average.py - -positional arguments: - in_dwi Path of the input diffusion volume. - in_bval Path of the bvals file, in FSL format. - out_avg Path of the output file. - -options: - -h, --help show this help message and exit - -f Force overwriting of the output files. - --mask file Path to a binary mask. - Only data inside the mask will be used for powder avg. (Default: None) - --b0_thr B0_THR Exclude b0 volumes from powder average with bvalue less than specified threshold. - (Default: remove volumes with bvalue < 50 - --shells SHELLS [SHELLS ...] - bvalue (shells) to include in powder average passed as a list - (e.g. --shells 1000 2000). If not specified will include all volumes with a non-zero bvalue. - --shell_thr SHELL_THR - Include volumes with bvalue +- the specified threshold. - (Default: [50] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_dwi_prepare_eddy_command.py.help b/scripts/.hidden/scil_dwi_prepare_eddy_command.py.help deleted file mode 100644 index 10d0c9580..000000000 --- a/scripts/.hidden/scil_dwi_prepare_eddy_command.py.help +++ /dev/null @@ -1,64 +0,0 @@ -usage: scil_dwi_prepare_eddy_command.py [-h] [--n_reverse N_REVERSE] - [--topup TOPUP] - [--topup_params TOPUP_PARAMS] - [--eddy_cmd {eddy_openmp,eddy_cuda,eddy_cuda8.0,eddy_cuda9.1,eddy_cuda10.2,eddy,eddy_cpu}] - [--b0_thr B0_THR] - [--encoding_direction {x,y,z}] - [--readout READOUT] - [--slice_drop_correction] - [--lsr_resampling] - [--out_directory OUT_DIRECTORY] - [--out_prefix OUT_PREFIX] - [--out_script] [--fix_seed] - [--eddy_options EDDY_OPTIONS] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bvals in_bvecs in_mask - -Prepare a typical command for eddy and create the necessary files. When using -multiple acquisitions and/or opposite phase directions, images, b-values and -b-vectors should be merged together using scil_dwi_concatenate.py. If using -topup prior to calling this script, images should be concatenated in the same -order as the b0s used with prepare_topup. - -Formerly: scil_prepare_eddy_command.py - -positional arguments: - in_dwi Input DWI Nifti image. If using multiple acquisition and/or opposite phase directions, please merge in the same order as for prepare_topup using scil_dwi_concatenate.py. - in_bvals Input b-values file in FSL format. - in_bvecs Input b-vectors file in FSL format. - in_mask Binary brain mask. - -options: - -h, --help show this help message and exit - --n_reverse N_REVERSE - Number of reverse phase volumes included in the DWI image [0]. - --topup TOPUP Topup output name. If given, apply topup during eddy. - Should be the same as --out_prefix from scil_dwi_prepare_topup_command.py. - --topup_params TOPUP_PARAMS - Parameters file (typically named acqparams) used to run topup. - --eddy_cmd {eddy_openmp,eddy_cuda,eddy_cuda8.0,eddy_cuda9.1,eddy_cuda10.2,eddy,eddy_cpu} - Eddy command [eddy_openmp]. - --b0_thr B0_THR All b-values with values less than or equal to b0_thr are considered - as b0s i.e. without diffusion weighting [20]. - --encoding_direction {x,y,z} - Acquisition direction, default is AP-PA [y]. - --readout READOUT Total readout time from the DICOM metadata [0.062]. - --slice_drop_correction - If set, will activate eddy's outlier correction, - which includes slice drop correction. - --lsr_resampling Perform least-square resampling, allowing eddy to combine forward and reverse phase acquisitions for better reconstruction. Only works if directions and b-values are identical in both phase direction. - --out_directory OUT_DIRECTORY - Output directory for eddy files [.]. - --out_prefix OUT_PREFIX - Prefix of the eddy-corrected DWI [dwi_eddy_corrected]. - --out_script If set, will output a .sh script (eddy.sh). - else, will output the lines to the terminal [False]. - --fix_seed If set, will use the fixed seed strategy for eddy. - Enhances reproducibility. - --eddy_options EDDY_OPTIONS - Additional options you want to use to run eddy. - Add these options using quotes (i.e. "--ol_nstd=6 --mb=4"). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_prepare_topup_command.py.help b/scripts/.hidden/scil_dwi_prepare_topup_command.py.help deleted file mode 100644 index b23857556..000000000 --- a/scripts/.hidden/scil_dwi_prepare_topup_command.py.help +++ /dev/null @@ -1,44 +0,0 @@ -usage: scil_dwi_prepare_topup_command.py [-h] [--config CONFIG] [--synb0] - [--encoding_direction {x,y,z}] - [--readout READOUT] - [--out_b0s OUT_B0S] - [--out_directory OUT_DIRECTORY] - [--out_prefix OUT_PREFIX] - [--out_params OUT_PARAMS] - [--out_script] - [--topup_options TOPUP_OPTIONS] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_forward_b0 in_reverse_b0 - -Prepare a typical command for topup and create the necessary files. -The reversed b0 must be in a different file. - -Formerly: scil_prepare_topup_command.py - -positional arguments: - in_forward_b0 Input b0 Nifti image with forward phase encoding. - in_reverse_b0 Input b0 Nifti image with reversed phase encoding. - -options: - -h, --help show this help message and exit - --config CONFIG Topup config file [b02b0.cnf]. - --synb0 If set, will use SyNb0 custom acqparams file. - --encoding_direction {x,y,z} - Acquisition direction of the forward b0 image, default is AP [y]. - --readout READOUT Total readout time from the DICOM metadata [0.062]. - --out_b0s OUT_B0S Output fused b0 file [fused_b0s.nii.gz]. - --out_directory OUT_DIRECTORY - Output directory for topup files [.]. - --out_prefix OUT_PREFIX - Prefix of the topup results [topup_results]. - --out_params OUT_PARAMS - Filename for the acquisition parameters file [acqparams.txt]. - --out_script If set, will output a .sh script (topup.sh). - else, will output the lines to the terminal [False]. - --topup_options TOPUP_OPTIONS - Additional options you want to use to run topup. - Add these options using quotes (i.e. "--fwhm=6 --miter=4"). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_reorder_philips.py.help b/scripts/.hidden/scil_dwi_reorder_philips.py.help deleted file mode 100644 index 8c01e8740..000000000 --- a/scripts/.hidden/scil_dwi_reorder_philips.py.help +++ /dev/null @@ -1,24 +0,0 @@ -usage: scil_dwi_reorder_philips.py [-h] [--json JSON] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bval in_bvec in_table - out_basename - -Re-order gradient according to original table (Philips) -This script is not needed for version 5.6 and higher - -Formerly: scil_reorder_dwi_philips.py - -positional arguments: - in_dwi Input dwi file. - in_bval Input bval FSL format. - in_bvec Input bvec FSL format. - in_table Original philips table - first line is skipped. - out_basename Basename output file. - -options: - -h, --help show this help message and exit - --json JSON If you give a json file, it will check if you need to reorder your Philips dwi. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_split_by_indices.py.help b/scripts/.hidden/scil_dwi_split_by_indices.py.help deleted file mode 100644 index 562f2d0aa..000000000 --- a/scripts/.hidden/scil_dwi_split_by_indices.py.help +++ /dev/null @@ -1,28 +0,0 @@ -usage: scil_dwi_split_by_indices.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bval in_bvec out_basename - split_indices [split_indices ...] - -Splits the DWI image at certain indices along the last dimension (b-values). -Many indices can be given at once by specifying multiple values. The splited -volumes are in the same order as in the original file. Also outputs the -corresponding .bval and .bvec files. - -This script can be useful for splitting images at places where a b-value -extraction does not work. For instance, if one wants to split the x first -b-1500s from the rest of the b-1500s in an image, simply put x as an index. - -Formerly: scil_split_image.py - -positional arguments: - in_dwi The DW image file to split. - in_bval The b-values file in FSL format (.bval). - in_bvec The b-vectors file in FSL format (.bvec). - out_basename The basename of the output files. Indices number will be appended to out_basename. For example, if split_indices were 3 10, the files would be saved as out_basename_0_2, out_basename_3_10, out_basename_11_20, where the size of the last dimension is 21 in this example. - split_indices The list of indices where to split the image. For example 3 10. This would split the image in three parts, such as [:3], [3:10], [10:]. Indices must be in increasing order. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_dwi_to_sh.py.help b/scripts/.hidden/scil_dwi_to_sh.py.help deleted file mode 100644 index d5f63057a..000000000 --- a/scripts/.hidden/scil_dwi_to_sh.py.help +++ /dev/null @@ -1,50 +0,0 @@ -usage: scil_dwi_to_sh.py [-h] [--sh_order SH_ORDER] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [--smooth SMOOTH] [--use_attenuation] [--mask MASK] - [--b0_threshold thr] [--skip_b0_check] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bval in_bvec out_sh - -Script to compute the SH coefficient directly on the raw DWI signal. - -Formerly: scil_compute_sh_from_signal.py - -positional arguments: - in_dwi Path of the dwi volume. - in_bval Path of the b-value file, in FSL format. - in_bvec Path of the b-vector file, in FSL format. - out_sh Name of the output SH file to save. - -options: - -h, --help show this help message and exit - --sh_order SH_ORDER SH order to fit (int). [4] - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - --smooth SMOOTH Lambda-regularization coefficient in the SH fit (float). [0.006] - --use_attenuation If set, will use signal attenuation before fitting the SH (i.e. divide by the b0). - --mask MASK Path to a binary mask. - Only data inside the mask will be used for computations and reconstruction - --b0_threshold thr Threshold under which b-values are considered to be b0s. - [Default: 20] - * Note. We would expect to find at least one b-value in the - range [0, b0_threshold]. To skip this check, use --skip_b0_check. - --skip_b0_check By default, we supervise that at least one b0 exists in your data - (i.e. b-values below the default --b0_threshold). Use this option to - allow continuing even if the minimum b-value is suspiciously high. - If no b-value is found below the threshold, the script will continue - with your minimal b-value as new --b0_threshold. - Use with care, and only if you understand your data. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_fodf_max_in_ventricles.py.help b/scripts/.hidden/scil_fodf_max_in_ventricles.py.help deleted file mode 100644 index b439b0690..000000000 --- a/scripts/.hidden/scil_fodf_max_in_ventricles.py.help +++ /dev/null @@ -1,56 +0,0 @@ -usage: scil_fodf_max_in_ventricles.py [-h] [--fa_threshold FA_THRESHOLD] - [--md_threshold MD_THRESHOLD] - [--max_value_output file] - [--mask_output file] [--small_dims] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [-v [{DEBUG,INFO,WARNING}]] [-f] - fODFs FA MD - -Script to compute the maximum fODF in the ventricles. The ventricules are -estimated from an MD and FA threshold. - -This allows to clip the noise of fODF using an absolute thresold. - -Formerly: scil_compute_fodf_max_in_ventricles.py - -positional arguments: - fODFs Path of the fODF volume in spherical harmonics (SH). - FA Path to the FA volume. - MD Path to the mean diffusivity (MD) volume. - -options: - -h, --help show this help message and exit - --fa_threshold FA_THRESHOLD - Maximal threshold of FA (voxels under that threshold are considered - for evaluation. [0.1]). - --md_threshold MD_THRESHOLD - Minimal threshold of MD in mm2/s (voxels above that threshold are - considered for evaluation. [0.003]). - --max_value_output file - Output path for the text file containing the value. If not set the - file will not be saved. - --mask_output file Output path for the ventricule mask. If not set, the mask - will not be saved. - --small_dims If set, takes the full range of data to search the max fodf amplitude - in ventricles. Useful when the data has small dimensions. - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -[1] Dell'Acqua, Flavio, et al. "Can spherical deconvolution provide more - information than fiber orientations? Hindrance modulated orientational - anisotropy, a true-tract specific index to characterize white matter - diffusion." Human brain mapping 34.10 (2013): 2464-2483. diff --git a/scripts/.hidden/scil_fodf_memsmt.py.help b/scripts/.hidden/scil_fodf_memsmt.py.help deleted file mode 100644 index a83299d57..000000000 --- a/scripts/.hidden/scil_fodf_memsmt.py.help +++ /dev/null @@ -1,99 +0,0 @@ -usage: scil_fodf_memsmt.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals - IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS - [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5} - [{0,1,-0.5,0.5} ...] [--sh_order int] [--mask MASK] - [--tolerance tol] [--skip_b0_check] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] [-f] - [--not_all] [--wm_out_fODF file] - [--gm_out_fODF file] [--csf_out_fODF file] - [--vf file] [--vf_rgb file] - in_wm_frf in_gm_frf in_csf_frf - -Script to compute multi-encoding multi-shell multi-tissue (memsmt) -Constrained Spherical Deconvolution ODFs. - -In order to operate, the script only needs the data from one type of b-tensor -encoding. However, giving only a spherical one will not produce good fODFs, as -it only probes spherical shapes. As for planar encoding, it should technically -work alone, but seems to be very sensitive to noise and is yet to be properly -documented. We thus suggest to always use at least the linear encoding, which -will be equivalent to standard multi-shell multi-tissue if used alone, in -combinaison with other encodings. Note that custom encodings are not yet -supported, so that only the linear tensor encoding (LTE, b_delta = 1), the -planar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding -(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are -available. - -All of `--in_dwis`, `--in_bvals`, `--in_bvecs` and `--in_bdeltas` must have the -same number of arguments. Be sure to keep the same order of encodings -throughout all these inputs and to set `--in_bdeltas` accordingly (IMPORTANT). - -By default, will output all possible files, using default names. -Specific names can be specified using the file flags specified in the -"File flags" section. - -If --not_all is set, only the files specified explicitly by the flags -will be output. - ->>> scil_fodf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz - PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs - LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz - -Based on P. Karan et al., Bridging the gap between constrained spherical -deconvolution and diffusional variance decomposition via tensor-valued -diffusion MRI. Medical Image Analysis (2022) - -Formerly: scil_compute_memsmt_fodf.py - -positional arguments: - in_wm_frf Text file of WM response function. - in_gm_frf Text file of GM response function. - in_csf_frf Text file of CSF response function. - -options: - -h, --help show this help message and exit - --in_dwis IN_DWIS [IN_DWIS ...] - Path to the input diffusion volume for each b-tensor encoding type. - --in_bvals IN_BVALS [IN_BVALS ...] - Path to the bval file, in FSL format, for each b-tensor encoding type. - --in_bvecs IN_BVECS [IN_BVECS ...] - Path to the bvec file, in FSL format, for each b-tensor encoding type. - --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...] - Value of b_delta for each b-tensor encoding type, in the same order as dwi, bval and bvec inputs. - --sh_order int SH order used for the CSD. (Default: 8) - --mask MASK Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction. - --tolerance tol The tolerated gap between the b-values to extract and the current b-value. - [Default: 20] - * Note. We would expect to find at least one b-value in the - range [0, tolerance]. To skip this check, use --skip_b0_check. - --skip_b0_check By default, we supervise that at least one b0 exists in your data - (i.e. b-values below the default --tolerance). Use this option to - allow continuing even if the minimum b-value is suspiciously high. - Use with care, and only if you understand your data. - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - --processes NBR Number of sub-processes to start. - Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - --not_all If set, only saves the files specified using the file flags. (Default: False) - -File flags: - --wm_out_fODF file Output filename for the WM fODF coefficients. - --gm_out_fODF file Output filename for the GM fODF coefficients. - --csf_out_fODF file Output filename for the CSF fODF coefficients. - --vf file Output filename for the volume fractions map. - --vf_rgb file Output filename for the volume fractions map in rgb. diff --git a/scripts/.hidden/scil_fodf_metrics.py.help b/scripts/.hidden/scil_fodf_metrics.py.help deleted file mode 100644 index 17b3aa757..000000000 --- a/scripts/.hidden/scil_fodf_metrics.py.help +++ /dev/null @@ -1,88 +0,0 @@ -usage: scil_fodf_metrics.py [-h] [--sphere string] [--mask] [--at A_THRESHOLD] - [--rt R_THRESHOLD] [--abs_peaks_and_values] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [-v [{DEBUG,INFO,WARNING}]] [--processes NBR] [-f] - [--not_all] [--afd_max file] [--afd_total file] - [--afd_sum file] [--nufo file] [--rgb file] - [--peaks file] [--peak_values file] - [--peak_indices file] - in_fODF - -Script to compute the maximum Apparent Fiber Density (AFD), the fiber ODFs -orientations, values and indices (peaks, peak_values, peak_indices), the Number -of Fiber Orientations (NuFO) maps from fiber ODFs and the RGB map. - -AFD_max map is the maximal fODF amplitude for each voxel. - -NuFO is the the number of maxima of the fODF with an ABSOLUTE amplitude above -the threshold set using --at, AND an amplitude above the RELATIVE threshold -set using --rt. - -The --at argument should be set to a value which is 1.5 times the maximal -value of the fODF in the ventricules. This can be obtained with the -scil_fodf_max_in_ventricles.py script. - -If the --abs_peaks_and_values argument is set, the peaks are all normalized -and the peak_values are equal to the actual fODF amplitude of the peaks. By -default, the script max-normalizes the peak_values for each voxel and -multiplies the peaks by peak_values. - -By default, will output all possible files, using default names. Specific names -can be specified using the file flags specified in the "File flags" section. - -If --not_all is set, only the files specified explicitly by the flags will be -output. - -See [Raffelt et al. NeuroImage 2012] and [Dell'Acqua et al HBM 2013] for the -definitions. - -Formerly: scil_compute_fodf_metrics.py - -positional arguments: - in_fODF Path of the fODF volume in spherical harmonics (SH). - -options: - -h, --help show this help message and exit - --sphere string Discrete sphere to use in the processing [repulsion724]. - --mask Path to a binary mask. Only the data inside the mask - will beused for computations and reconstruction [None]. - --at A_THRESHOLD Absolute threshold on fODF amplitude. This value should be set to - approximately 1.5 to 2 times the maximum fODF amplitude in isotropic voxels - (ie. ventricles). - Use scil_fodf_max_in_ventricles.py to find the maximal value. - See [Dell'Acqua et al HBM 2013] [0.0]. - --rt R_THRESHOLD Relative threshold on fODF amplitude in percentage [0.1]. - --abs_peaks_and_values - If set, the peak_values are not max-normalized for each voxel, - but rather they keep the actual fODF amplitude of the peaks. - Also, the peaks are given as unit directions instead of being proportional to peak_values. [False] - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - --processes NBR Number of sub-processes to start. - Default: [1] - -f Force overwriting of the output files. - --not_all If set, only saves the files specified using the file flags [False]. - -File flags: - --afd_max file Output filename for the AFD_max map. - --afd_total file Output filename for the AFD_total map(SH coeff = 0). - --afd_sum file Output filename for the sum of all peak contributions - (sum of fODF lobes on the sphere). - --nufo file Output filename for the NuFO map. - --rgb file Output filename for the RGB map. - --peaks file Output filename for the extracted peaks. - --peak_values file Output filename for the extracted peaks values. - --peak_indices file Output filename for the generated peaks indices on the sphere. diff --git a/scripts/.hidden/scil_fodf_msmt.py.help b/scripts/.hidden/scil_fodf_msmt.py.help deleted file mode 100644 index a1b20c06c..000000000 --- a/scripts/.hidden/scil_fodf_msmt.py.help +++ /dev/null @@ -1,71 +0,0 @@ -usage: scil_fodf_msmt.py [-h] [--sh_order int] [--mask] [--tolerance tol] - [--skip_b0_check] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [--processes NBR] [--not_all] [--wm_out_fODF file] - [--gm_out_fODF file] [--csf_out_fODF file] - [--vf file] [--vf_rgb file] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bval in_bvec in_wm_frf in_gm_frf in_csf_frf - -Script to compute Multishell Multi-tissue Constrained Spherical Deconvolution -ODFs. - -By default, will output all possible files, using default names. -Specific names can be specified using the file flags specified in the -"File flags" section. - -If --not_all is set, only the files specified explicitly by the flags -will be output. - -Based on B. Jeurissen et al., Multi-tissue constrained spherical -deconvolution for improved analysis of multi-shell diffusion -MRI data. Neuroimage (2014) - -Formerly: scil_compute_msmt_fodf.py - -positional arguments: - in_dwi Path of the input diffusion volume. - in_bval Path of the bval file, in FSL format. - in_bvec Path of the bvec file, in FSL format. - in_wm_frf Text file of WM response function. - in_gm_frf Text file of GM response function. - in_csf_frf Text file of CSF response function. - -options: - -h, --help show this help message and exit - --sh_order int SH order used for the CSD. (Default: 8) - --mask Path to a binary mask. Only the data inside the mask will be used for computations and reconstruction. - --tolerance tol The tolerated gap between the b-values to extract and the current b-value. - [Default: 20] - * Note. We would expect to find at least one b-value in the - range [0, tolerance]. To skip this check, use --skip_b0_check. - --skip_b0_check By default, we supervise that at least one b0 exists in your data - (i.e. b-values below the default --tolerance). Use this option to - allow continuing even if the minimum b-value is suspiciously high. - Use with care, and only if you understand your data. - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - --processes NBR Number of sub-processes to start. - Default: [1] - --not_all If set, only saves the files specified using the file flags. (Default: False) - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -File flags: - --wm_out_fODF file Output filename for the WM fODF coefficients. - --gm_out_fODF file Output filename for the GM fODF coefficients. - --csf_out_fODF file Output filename for the CSF fODF coefficients. - --vf file Output filename for the volume fractions map. - --vf_rgb file Output filename for the volume fractions map in rgb. diff --git a/scripts/.hidden/scil_fodf_ssst.py.help b/scripts/.hidden/scil_fodf_ssst.py.help deleted file mode 100644 index 6542f859f..000000000 --- a/scripts/.hidden/scil_fodf_ssst.py.help +++ /dev/null @@ -1,52 +0,0 @@ -usage: scil_fodf_ssst.py [-h] [--sh_order int] [--mask] [--b0_threshold thr] - [--skip_b0_check] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bval in_bvec frf_file out_fODF - -Script to compute Constrained Spherical Deconvolution (CSD) fiber ODFs. - -See [Tournier et al. NeuroImage 2007] - -Formerly: scil_compute_ssst_fodf.py - -positional arguments: - in_dwi Path of the input diffusion volume. - in_bval Path of the bvals file, in FSL format. - in_bvec Path of the bvecs file, in FSL format. - frf_file Path of the FRF file - out_fODF Output path for the fiber ODF coefficients. - -options: - -h, --help show this help message and exit - --sh_order int SH order used for the CSD. (Default: 8) - --mask Path to a binary mask. Only the data inside the mask will be used - for computations and reconstruction. - --b0_threshold thr Threshold under which b-values are considered to be b0s. - [Default: 20] - * Note. We would expect to find at least one b-value in the - range [0, b0_threshold]. To skip this check, use --skip_b0_check. - --skip_b0_check By default, we supervise that at least one b0 exists in your data - (i.e. b-values below the default --b0_threshold). Use this option to - allow continuing even if the minimum b-value is suspiciously high. - If no b-value is found below the threshold, the script will continue - with your minimal b-value as new --b0_threshold. - Use with care, and only if you understand your data. - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - --processes NBR Number of sub-processes to start. - Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_fodf_to_bingham.py.help b/scripts/.hidden/scil_fodf_to_bingham.py.help deleted file mode 100644 index 9e16278d1..000000000 --- a/scripts/.hidden/scil_fodf_to_bingham.py.help +++ /dev/null @@ -1,51 +0,0 @@ -usage: scil_fodf_to_bingham.py [-h] [--max_lobes MAX_LOBES] [--at AT] - [--rt RT] [--min_sep_angle MIN_SEP_ANGLE] - [--max_fit_angle MAX_FIT_ANGLE] [--mask MASK] - [-v [{DEBUG,INFO,WARNING}]] [--processes NBR] - [-f] - in_sh out_bingham - -Script for fitting a Bingham distribution to each fODF lobe, as described -in [1]. - -The Bingham fit is saved, with each Bingham distribution described by 7 -coefficients (for example, for a maximum number of lobes of 5, the number -of coefficients is 7 x 5 = 35 -- less than the number of coefficients for -SH of maximum order 8). - -Using 12 threads, the execution takes approximately 30 minutes for a brain with -1mm isotropic resolution. - -Formerly: scil_fit_bingham_to_fodf.py - -positional arguments: - in_sh Input SH image. - out_bingham Output Bingham functions image. - -options: - -h, --help show this help message and exit - --max_lobes MAX_LOBES - Maximum number of lobes per voxel to extract. [5] - --at AT Absolute threshold for peaks extraction. [0.0] - --rt RT Relative threshold for peaks extraction. [0.1] - --min_sep_angle MIN_SEP_ANGLE - Minimum separation angle between two peaks. [25.0] - --max_fit_angle MAX_FIT_ANGLE - Maximum distance in degrees around a peak direction for fitting the Bingham function. [15.0] - --mask MASK Optional mask file. Only SH inside the mask are fitted. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - --processes NBR Number of sub-processes to start. - Default: [1] - -f Force overwriting of the output files. - -[1] T. W. Riffert, J. Schreiber, A. Anwander, and T. R. Knösche, “Beyond - fractional anisotropy: Extraction of bundle-specific structural metrics - from crossing fiber models,” NeuroImage, vol. 100, pp. 176-191, Oct. 2014, - doi: 10.1016/j.neuroimage.2014.06.015. - -[2] J. Schreiber, T. Riffert, A. Anwander, and T. R. Knösche, “Plausibility - Tracking: A method to evaluate anatomical connectivity and microstructural - properties along fiber pathways,” NeuroImage, vol. 90, pp. 163-178, Apr. - 2014, doi: 10.1016/j.neuroimage.2014.01.002. diff --git a/scripts/.hidden/scil_freewater_maps.py.help b/scripts/.hidden/scil_freewater_maps.py.help deleted file mode 100644 index 5c51f8740..000000000 --- a/scripts/.hidden/scil_freewater_maps.py.help +++ /dev/null @@ -1,58 +0,0 @@ -usage: scil_freewater_maps.py [-h] [--mask MASK] [--out_dir OUT_DIR] - [--b_thr B_THR] [--para_diff PARA_DIFF] - [--iso_diff ISO_DIFF] - [--perp_diff_min PERP_DIFF_MIN] - [--perp_diff_max PERP_DIFF_MAX] - [--lambda1 LAMBDA1] [--lambda2 LAMBDA2] - [--save_kernels DIRECTORY | --load_kernels DIRECTORY] - [--compute_only] [--mouse] [--processes NBR] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bval in_bvec - -Compute Free Water maps [1] using AMICO. -This script supports both single and multi-shell data. - -Formerly: scil_compute_freewater.py - -positional arguments: - in_dwi DWI file. - in_bval b-values filename, in FSL format (.bval). - in_bvec b-vectors filename, in FSL format (.bvec). - -options: - -h, --help show this help message and exit - --mask MASK Brain mask filename. - --out_dir OUT_DIR Output directory for the Free Water results. [results] - --b_thr B_THR Limit value to consider that a b-value is on an - existing shell. Above this limit, the b-value is - placed on a new shell. This includes b0s values. - --mouse If set, use mouse fitting profile. - --processes NBR Number of sub-processes to start. Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided - level. Default level is warning, default when using -v - is info. - -f Force overwriting of the output files. - -Model options: - --para_diff PARA_DIFF - Axial diffusivity (AD) in the CC. [0.0015] - --iso_diff ISO_DIFF Mean diffusivity (MD) in ventricles. [0.003] - --perp_diff_min PERP_DIFF_MIN - Radial diffusivity (RD) minimum. [0.0001] - --perp_diff_max PERP_DIFF_MAX - Radial diffusivity (RD) maximum. [0.0007] - --lambda1 LAMBDA1 First regularization parameter. [0.0] - --lambda2 LAMBDA2 Second regularization parameter. [0.25] - -Kernels options: - --save_kernels DIRECTORY - Output directory for the COMMIT kernels. - --load_kernels DIRECTORY - Input directory where the COMMIT kernels are located. - --compute_only Compute kernels only, --save_kernels must be used. - -Reference: - [1] Pasternak 0, Sochen N, Gur Y, Intrator N, Assaf Y. - Free water elimination and mapping from diffusion mri. - Magn Reson Med. 62 (3) (2009) 717-730. diff --git a/scripts/.hidden/scil_freewater_priors.py.help b/scripts/.hidden/scil_freewater_priors.py.help deleted file mode 100644 index fce34c0e3..000000000 --- a/scripts/.hidden/scil_freewater_priors.py.help +++ /dev/null @@ -1,71 +0,0 @@ -usage: scil_freewater_priors.py [-h] - [--fa_min_single_fiber FA_MIN_SINGLE_FIBER] - [--fa_max_ventricles FA_MAX_VENTRICLES] - [--md_min_ventricles MD_MIN_VENTRICLES] - [--roi_radius ROI_RADIUS] - [--roi_center pos pos pos] - [--out_txt_1fiber_para FILE] - [--out_txt_1fiber_perp FILE] - [--out_mask_1fiber FILE] - [--out_txt_ventricles FILE] - [--out_mask_ventricles FILE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_FA in_AD in_RD in_MD - -Compute the axial (para_diff), radial (perp_diff), and mean (iso_diff) -diffusivity priors for NODDI. - -Formerly: scil_compute_NODDI_priors.py - -positional arguments: - in_FA Path to the FA volume. - in_AD Path to the axial diffusivity (AD) volume. - in_RD Path to the radial diffusivity (RD) volume. - in_MD Path to the mean diffusivity (MD) volume. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Metrics options: - --fa_min_single_fiber FA_MIN_SINGLE_FIBER - Minimal threshold of FA (voxels above that threshold are considered in - the single fiber mask). [0.7] - --fa_max_ventricles FA_MAX_VENTRICLES - Maximal threshold of FA (voxels under that threshold are considered in - the ventricles). [0.1] - --md_min_ventricles MD_MIN_VENTRICLES - Minimal threshold of MD in mm2/s (voxels above that threshold are considered - for in the ventricles). [0.003] - -Regions options: - --roi_radius ROI_RADIUS - Radius of the region used to estimate the priors. The roi will be a cube spanning - from ROI_CENTER in each direction. [20] - --roi_center pos pos pos - Center of the roi of size roi_radius used to estimate the priors; a 3-value coordinate. - If not set, uses the center of the 3D volume. - -Outputs: - --out_txt_1fiber_para FILE - Output path for the text file containing the single fiber average value of AD. - If not set, the file will not be saved. - --out_txt_1fiber_perp FILE - Output path for the text file containing the single fiber average value of RD. - If not set, the file will not be saved. - --out_mask_1fiber FILE - Output path for single fiber mask. If not set, the mask will not be saved. - --out_txt_ventricles FILE - Output path for the text file containing the ventricles average value of MD. - If not set, the file will not be saved. - --out_mask_ventricles FILE - Output path for the ventricule mask. - If not set, the mask will not be saved. - -Reference: - [1] Zhang H, Schneider T, Wheeler-Kingshott CA, Alexander DC. - NODDI: practical in vivo neurite orientation dispersion and density - imaging of the human brain. NeuroImage. 2012 Jul 16;61:1000-16. diff --git a/scripts/.hidden/scil_frf_mean.py.help b/scripts/.hidden/scil_frf_mean.py.help deleted file mode 100644 index f2c6d410b..000000000 --- a/scripts/.hidden/scil_frf_mean.py.help +++ /dev/null @@ -1,22 +0,0 @@ -usage: scil_frf_mean.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] - list [list ...] file - -Compute the mean Fiber Response Function from a set of individually -computed Response Functions. - -The FRF files are obtained from scil_frf_ssst.py, scil_frf_msmt.py in the -case of multi-shell data or scil_frf_memsmt.py in the case of multi-encoding -multi-shell data. - -Formerly: scil_compute_mean_frf.py - -positional arguments: - list List of FRF filepaths. - file Path of the output mean FRF file. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_frf_memsmt.py.help b/scripts/.hidden/scil_frf_memsmt.py.help deleted file mode 100644 index 070ad5f23..000000000 --- a/scripts/.hidden/scil_frf_memsmt.py.help +++ /dev/null @@ -1,122 +0,0 @@ -usage: scil_frf_memsmt.py [-h] --in_dwis IN_DWIS [IN_DWIS ...] --in_bvals - IN_BVALS [IN_BVALS ...] --in_bvecs IN_BVECS - [IN_BVECS ...] --in_bdeltas {0,1,-0.5,0.5} - [{0,1,-0.5,0.5} ...] [--mask MASK] - [--mask_wm MASK_WM] [--mask_gm MASK_GM] - [--mask_csf MASK_CSF] [--fa_thr_wm FA_THR_WM] - [--fa_thr_gm FA_THR_GM] [--fa_thr_csf FA_THR_CSF] - [--md_thr_gm MD_THR_GM] [--md_thr_csf MD_THR_CSF] - [--min_nvox MIN_NVOX] [--tolerance tol] - [--skip_b0_check] [--dti_bval_limit DTI_BVAL_LIMIT] - [--roi_radii ROI_RADII [ROI_RADII ...]] - [--roi_center tuple(3) tuple(3) tuple(3)] - [--wm_frf_mask file] [--gm_frf_mask file] - [--csf_frf_mask file] [-v [{DEBUG,INFO,WARNING}]] - [-f] - out_wm_frf out_gm_frf out_csf_frf - -Script to estimate response functions for multi-encoding multi-shell -multi-tissue (memsmt) constrained spherical deconvolution. In order to operate, -the script only needs the data from one type of b-tensor encoding. However, -giving only a spherical one will not produce good fiber response functions, as -it only probes spherical shapes. As for planar encoding, it should technically -work alone, but seems to be very sensitive to noise and is yet to be properly -documented. We thus suggest to always use at least the linear encoding, which -will be equivalent to standard multi-shell multi-tissue if used alone, in -combinaison with other encodings. Note that custom encodings are not yet -supported, so that only the linear tensor encoding (LTE, b_delta = 1), the -planar tensor encoding (PTE, b_delta = -0.5), the spherical tensor encoding -(STE, b_delta = 0) and the cigar shape tensor encoding (b_delta = 0.5) are -available. Moreover, all of `--in_dwis`, `--in_bvals`, `--in_bvecs` and -`--in_bdeltas` must have the same number of arguments. Be sure to keep the -same order of encodings throughout all these inputs and to set `--in_bdeltas` -accordingly (IMPORTANT). - -The script computes a response function for white-matter (wm), -gray-matter (gm), csf and the mean b=0. - -In the wm, we compute the response function in each voxels where -the FA is superior at threshold_fa_wm. - -In the gm (or csf), we compute the response function in each voxels where -the FA is below at threshold_fa_gm (or threshold_fa_csf) and where -the MD is below threshold_md_gm (or threshold_md_csf). - ->>> scil_frf_memsmt.py wm_frf.txt gm_frf.txt csf_frf.txt --in_dwis LTE.nii.gz - PTE.nii.gz STE.nii.gz --in_bvals LTE.bval PTE.bval STE.bval --in_bvecs - LTE.bvec PTE.bvec STE.bvec --in_bdeltas 1 -0.5 0 --mask mask.nii.gz - -Based on P. Karan et al., Bridging the gap between constrained spherical -deconvolution and diffusional variance decomposition via tensor-valued -diffusion MRI. Medical Image Analysis (2022) - -Formerly: scil_compute_memsmt_frf.py - -positional arguments: - out_wm_frf Path to the output WM frf file, in .txt format. - out_gm_frf Path to the output GM frf file, in .txt format. - out_csf_frf Path to the output CSF frf file, in .txt format. - -options: - -h, --help show this help message and exit - --in_dwis IN_DWIS [IN_DWIS ...] - Path to the input diffusion volume for each b-tensor encoding type. - --in_bvals IN_BVALS [IN_BVALS ...] - Path to the bval file, in FSL format, for each b-tensor encoding type. - --in_bvecs IN_BVECS [IN_BVECS ...] - Path to the bvec file, in FSL format, for each b-tensor encoding type. - --in_bdeltas {0,1,-0.5,0.5} [{0,1,-0.5,0.5} ...] - Value of b_delta for each b-tensor encoding type, in the same order as - dwi, bval and bvec inputs. - --mask MASK Path to a binary mask. Only the data inside the mask will be used for - computations and reconstruction. Useful if no tissue masks are available. - --mask_wm MASK_WM Path to the input WM mask file, used to improve the final WM frf mask. - --mask_gm MASK_GM Path to the input GM mask file, used to improve the final GM frf mask. - --mask_csf MASK_CSF Path to the input CSF mask file, used to improve the final CSF frf mask. - --fa_thr_wm FA_THR_WM - If supplied, use this threshold to select single WM fiber voxels from - the FA inside the WM mask defined by mask_wm. - Each voxel above this threshold will be selected. [0.7] - --fa_thr_gm FA_THR_GM - If supplied, use this threshold to select GM voxels from the FA inside - the GM mask defined by mask_gm. - Each voxel below this threshold will be selected. [0.2] - --fa_thr_csf FA_THR_CSF - If supplied, use this threshold to select CSF voxels from the FA inside - the CSF mask defined by mask_csf. - Each voxel below this threshold will be selected. [0.1] - --md_thr_gm MD_THR_GM - If supplied, use this threshold to select GM voxels from the MD inside - the GM mask defined by mask_gm. - Each voxel below this threshold will be selected. [0.0007] - --md_thr_csf MD_THR_CSF - If supplied, use this threshold to select CSF voxels from the MD inside - the CSF mask defined by mask_csf. - Each voxel below this threshold will be selected. [0.003] - --min_nvox MIN_NVOX Minimal number of voxels needed for each tissue masks in order to - proceed to frf estimation. [100] - --tolerance tol The tolerated gap between the b-values to extract and the current b-value. - [Default: 20] - * Note. We would expect to find at least one b-value in the - range [0, tolerance]. To skip this check, use --skip_b0_check. - --skip_b0_check By default, we supervise that at least one b0 exists in your data - (i.e. b-values below the default --tolerance). Use this option to - allow continuing even if the minimum b-value is suspiciously high. - Use with care, and only if you understand your data. - --dti_bval_limit DTI_BVAL_LIMIT - The highest b-value taken for the DTI model. [1200] - --roi_radii ROI_RADII [ROI_RADII ...] - If supplied, use those radii to select a cuboid roi to estimate the - response functions. The roi will be a cuboid spanning from the middle of - the volume in each direction with the different radii. The type is either - an int (e.g. --roi_radii 10) or an array-like (3,) (e.g. --roi_radii 20 30 10). [[20]] - --roi_center tuple(3) tuple(3) tuple(3) - If supplied, use this center to span the cuboid roi using roi_radii. - [center of the 3D volume] (e.g. --roi_center 66 79 79) - --wm_frf_mask file Path to the output WM frf mask file, the voxels used to compute the WM frf. - --gm_frf_mask file Path to the output GM frf mask file, the voxels used to compute the GM frf. - --csf_frf_mask file Path to the output CSF frf mask file, the voxels used to compute the CSF frf. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_frf_msmt.py.help b/scripts/.hidden/scil_frf_msmt.py.help deleted file mode 100644 index 300813235..000000000 --- a/scripts/.hidden/scil_frf_msmt.py.help +++ /dev/null @@ -1,114 +0,0 @@ -usage: scil_frf_msmt.py [-h] [--mask MASK] [--mask_wm MASK_WM] - [--mask_gm MASK_GM] [--mask_csf MASK_CSF] - [--fa_thr_wm FA_THR_WM] [--fa_thr_gm FA_THR_GM] - [--fa_thr_csf FA_THR_CSF] [--md_thr_gm MD_THR_GM] - [--md_thr_csf MD_THR_CSF] [--min_nvox MIN_NVOX] - [--tolerance TOLERANCE] [--skip_b0_check] - [--dti_bval_limit DTI_BVAL_LIMIT] - [--roi_radii ROI_RADII [ROI_RADII ...]] - [--roi_center tuple(3) tuple(3) tuple(3)] - [--wm_frf_mask file] [--gm_frf_mask file] - [--csf_frf_mask file] [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bval in_bvec out_wm_frf out_gm_frf - out_csf_frf - -Compute response functions for multi-shell multi-tissue (MSMT) constrained -spherical deconvolution from DWI data. - -The script computes a response function for white-matter (wm), -gray-matter (gm), csf and the mean b=0. - - In the wm, we compute the response function in each voxel where the FA is - superior at threshold_fa_wm. - - In the gm (or csf), we compute the response function in each voxel where - the FA is below at threshold_fa_gm (or threshold_fa_csf) and where the MD - is below threshold_md_gm (or threshold_md_csf). - -We output one response function file for each tissue, containing the response -function for each b-value (arranged by lines). These are saved as the diagonal -of the axis-symmetric diffusion tensor (3 e-values) and a mean b0 value. -For example, a typical wm_frf is [15e-4, 4e-4, 4e-4, 700], where the tensor -e-values are (15,4,4)x10^-4 mm^2/s and the mean b0 is 700. - -Based on B. Jeurissen et al., Multi-tissue constrained spherical deconvolution -for improved analysis of multi-shell diffusion MRI data. Neuroimage (2014) - -Formerly: scil_compute_msmt_frf.py - -positional arguments: - in_dwi Path to the input diffusion volume. - in_bval Path to the bval file, in FSL format. - in_bvec Path to the bvec file, in FSL format. - out_wm_frf Path to the output WM frf file, in .txt format. - out_gm_frf Path to the output GM frf file, in .txt format. - out_csf_frf Path to the output CSF frf file, in .txt format. - -options: - -h, --help show this help message and exit - --mask MASK Path to a binary mask. Only the data inside the mask - will be used for computations and reconstruction. - Useful if no tissue masks are available. - --mask_wm MASK_WM Path to the input WM mask file, used to improve the - final WM frf mask. - --mask_gm MASK_GM Path to the input GM mask file, used to improve the - final GM frf mask. - --mask_csf MASK_CSF Path to the input CSF mask file, used to improve the - final CSF frf mask. - --fa_thr_wm FA_THR_WM - If supplied, use this threshold to select single WM - fiber voxels from the FA inside the WM mask defined by - mask_wm. Each voxel above this threshold will be - selected. [0.7] - --fa_thr_gm FA_THR_GM - If supplied, use this threshold to select GM voxels - from the FA inside the GM mask defined by mask_gm. - Each voxel below this threshold will be selected. - [0.2] - --fa_thr_csf FA_THR_CSF - If supplied, use this threshold to select CSF voxels - from the FA inside the CSF mask defined by mask_csf. - Each voxel below this threshold will be selected. - [0.1] - --md_thr_gm MD_THR_GM - If supplied, use this threshold to select GM voxels - from the MD inside the GM mask defined by mask_gm. - Each voxel below this threshold will be selected. - [0.0007] - --md_thr_csf MD_THR_CSF - If supplied, use this threshold to select CSF voxels - from the MD inside the CSF mask defined by mask_csf. - Each voxel below this threshold will be selected. - [0.003] - --min_nvox MIN_NVOX Minimal number of voxels needed for each tissue masks - in order to proceed to frf estimation. [100] - --tolerance TOLERANCE - The tolerated gap between the b-values to extract and - the current b-value. [20] - --skip_b0_check By default, we supervise that at least one b0 exists - in your data (i.e. b-values below the default - --tolerance). Use this option to allow continuing even - if the minimum b-value is suspiciously high. Use with - care, and only if you understand your data. - --dti_bval_limit DTI_BVAL_LIMIT - The highest b-value taken for the DTI model. [1200] - --roi_radii ROI_RADII [ROI_RADII ...] - If supplied, use those radii to select a cuboid roi to - estimate the response functions. The roi will be a - cuboid spanning from the middle of the volume in each - direction with the different radii. The type is either - an int (e.g. --roi_radii 10) or an array-like (3,) - (e.g. --roi_radii 20 30 10). [[20]] - --roi_center tuple(3) tuple(3) tuple(3) - If supplied, use this center to span the cuboid roi - using roi_radii. [center of the 3D volume] (e.g. - --roi_center 66 79 79) - --wm_frf_mask file Path to the output WM frf mask file, the voxels used - to compute the WM frf. - --gm_frf_mask file Path to the output GM frf mask file, the voxels used - to compute the GM frf. - --csf_frf_mask file Path to the output CSF frf mask file, the voxels used - to compute the CSF frf. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided - level. Default level is warning, default when using -v - is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_frf_set_diffusivities.py.help b/scripts/.hidden/scil_frf_set_diffusivities.py.help deleted file mode 100644 index cd534bf52..000000000 --- a/scripts/.hidden/scil_frf_set_diffusivities.py.help +++ /dev/null @@ -1,30 +0,0 @@ -usage: scil_frf_set_diffusivities.py [-h] [--no_factor] - [-v [{DEBUG,INFO,WARNING}]] [-f] - input new_frf output - -Replace the fiber response function in the FRF file. -Use this script when you want to use a fixed response function -and keep the mean b0. - -The FRF file is obtained from scil_frf_ssst.py or scil_frf_msmt.py in the case -of multi-shell data. - -Formerly: scil_set_response_function.py - -positional arguments: - input Path of the FRF file. - new_frf New response function given as a tuple. We will replace the - response function in frf_file with this fiber response - function x 10**-4 (e.g. 15,4,4). - If multi-shell, write the first shell,then the second shell, - and the third, etc. (e.g. 15,4,4,13,5,5,12,5,5). - output Path of the new FRF file. - -options: - -h, --help show this help message and exit - --no_factor If supplied, the fiber response function is - evaluated without the x 10**-4 factor. [False]. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_frf_ssst.py.help b/scripts/.hidden/scil_frf_ssst.py.help deleted file mode 100644 index e71476102..000000000 --- a/scripts/.hidden/scil_frf_ssst.py.help +++ /dev/null @@ -1,61 +0,0 @@ -usage: scil_frf_ssst.py [-h] [--mask MASK] [--mask_wm MASK_WM] - [--fa_thresh FA_THRESH] - [--min_fa_thresh MIN_FA_THRESH] [--min_nvox MIN_NVOX] - [--roi_radii ROI_RADII [ROI_RADII ...]] - [--roi_center tuple(3) tuple(3) tuple(3)] - [--b0_threshold thr] [--skip_b0_check] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bval in_bvec frf_file - -Compute a single Fiber Response Function from a DWI. - -A DTI fit is made, and voxels containing a single fiber population are -found using a threshold on the FA. - -Formerly: scil_compute_ssst_frf.py - -positional arguments: - in_dwi Path of the input diffusion volume. - in_bval Path of the bvals file, in FSL format. - in_bvec Path of the bvecs file, in FSL format. - frf_file Path to the output FRF file, in .txt format, saved by Numpy. - -options: - -h, --help show this help message and exit - --mask MASK Path to a binary mask. Only the data inside the mask will be used - for computations and reconstruction. Useful if no white matter mask - is available. - --mask_wm MASK_WM Path to a binary white matter mask. Only the data inside this mask - and above the threshold defined by --fa_thresh will be used to estimate the - fiber response function. - --fa_thresh FA_THRESH - If supplied, use this threshold as the initial threshold to select - single fiber voxels. [0.7] - --min_fa_thresh MIN_FA_THRESH - If supplied, this is the minimal value that will be tried when looking - for single fiber voxels. [0.5] - --min_nvox MIN_NVOX Minimal number of voxels needing to be identified as single fiber voxels - in the automatic estimation. [300] - --roi_radii ROI_RADII [ROI_RADII ...] - If supplied, use those radii to select a cuboid roi to estimate the - response functions. The roi will be a cuboid spanning from the middle of - the volume in each direction with the different radii. The type is either - an int (e.g. --roi_radii 10) or an array-like (3,) (e.g. --roi_radii 20 30 10). [[20]] - --roi_center tuple(3) tuple(3) tuple(3) - If supplied, use this center to span the roi of size roi_radius. [center of the 3D volume] - --b0_threshold thr Threshold under which b-values are considered to be b0s. - [Default: 20] - * Note. We would expect to find at least one b-value in the - range [0, b0_threshold]. To skip this check, use --skip_b0_check. - --skip_b0_check By default, we supervise that at least one b0 exists in your data - (i.e. b-values below the default --b0_threshold). Use this option to - allow continuing even if the minimum b-value is suspiciously high. - If no b-value is found below the threshold, the script will continue - with your minimal b-value as new --b0_threshold. - Use with care, and only if you understand your data. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -References: [1] Tournier et al. NeuroImage 2007 diff --git a/scripts/.hidden/scil_get_version.py.help b/scripts/.hidden/scil_get_version.py.help deleted file mode 100644 index 062cc2c1a..000000000 --- a/scripts/.hidden/scil_get_version.py.help +++ /dev/null @@ -1,16 +0,0 @@ -usage: scil_get_version.py [-h] [--show_dependencies] - [-v [{DEBUG,INFO,WARNING}]] - -Give you information about your current scilpy installation. -This is useful for non-developers to give you the information -needed to reproduce your results, or to help debugging. - -If you are experiencing a bug, please run this script and -send the output to the scilpy developers. - -options: - -h, --help show this help message and exit - --show_dependencies Show the dependencies of scilpy. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_gradients_apply_transform.py.help b/scripts/.hidden/scil_gradients_apply_transform.py.help deleted file mode 100644 index 82a99e372..000000000 --- a/scripts/.hidden/scil_gradients_apply_transform.py.help +++ /dev/null @@ -1,21 +0,0 @@ -usage: scil_gradients_apply_transform.py [-h] [--inverse] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bvecs in_transfo out_bvecs - -Transform bvecs using an affine/rigid transformation. - -Formerly: scil_apply_transform_to_bvecs.py. - -positional arguments: - in_bvecs Path of the bvec file, in FSL format - in_transfo Path of the file containing the 4x4 - transformation, matrix (.txt, .npy or .mat). - out_bvecs Output filename of the transformed bvecs. - -options: - -h, --help show this help message and exit - --inverse Apply the inverse transformation. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_gradients_convert.py.help b/scripts/.hidden/scil_gradients_convert.py.help deleted file mode 100644 index ffec51279..000000000 --- a/scripts/.hidden/scil_gradients_convert.py.help +++ /dev/null @@ -1,22 +0,0 @@ -usage: scil_gradients_convert.py [-h] (--input_fsl | --input_mrtrix) [-f] - [-v [{DEBUG,INFO,WARNING}]] - GRADIENT_FILE(S) [GRADIENT_FILE(S) ...] - output - -Script to convert gradient tables between FSL and MRtrix formats. - -Formerly: scil_convert_gradients_mrtrix_to_fsl.py or -scil_convert_gradients_fsl_to_mrtrix.py - -positional arguments: - GRADIENT_FILE(S) Path(s) to the gradient file(s). Either FSL (.bval, .bvec) or MRtrix (.b). - output Basename of output without extension. Extension(s) will be added automatically (.b for MRtrix, .bval/.bvec for FSL. - -options: - -h, --help show this help message and exit - --input_fsl FSL format. - --input_mrtrix MRtrix format. - -f Force overwriting of the output files. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_gradients_generate_sampling.py.help b/scripts/.hidden/scil_gradients_generate_sampling.py.help deleted file mode 100644 index e079dd95d..000000000 --- a/scripts/.hidden/scil_gradients_generate_sampling.py.help +++ /dev/null @@ -1,67 +0,0 @@ -usage: scil_gradients_generate_sampling.py [-h] [--eddy] [--duty] - [--no_b0_start NO_B0_START | --b0_every B0_EVERY] - [--b0_end] [--b0_value B0_VALUE] - [--b0_philips] - (--bvals bvals [bvals ...] | --b_lin_max B_LIN_MAX | --q_lin_max Q_LIN_MAX) - (--fsl | --mrtrix) - [-v [{DEBUG,INFO,WARNING}]] [-f] - nb_samples_per_shell - [nb_samples_per_shell ...] - out_basename - -Generate multi-shell gradient sampling with various processing options. Helps -accelerate gradients, optimize duty cycle and avoid artefacts. - -Multi-shell gradient sampling is generated as in [1]. The bvecs are then -flipped to maximize spread for eddy current correction, b0s are interleaved at -equal spacing and the non-b0 samples are finally shuffled to minimize the total -diffusion gradient amplitude over a few TR. - -Formerly: scil_generate_gradient_sampling.py - -positional arguments: - nb_samples_per_shell Number of samples on each non b0 shell. - If multishell, provide a number per shell. - out_basename Gradient sampling output basename (don't include extension). - Please add options --fsl and/or --mrtrix below. - -options: - -h, --help show this help message and exit - --eddy If set, we apply eddy optimization. - B-vectors are flipped to be well spread without symmetry. - --duty If set, we apply duty cycle optimization. - B-vectors are shuffled to reduce consecutive colinearity in the samples. [False] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -b0 acquisitions: - Default if you add no option is to have a b0 at the start. - - --no_b0_start NO_B0_START - If set, do not add a b0 at the beginning. - --b0_every B0_EVERY Interleave a b0 every n=b0_every values. Starts after the first b0 - (cannot be used with --no_b0_start). Must be an integer >= 1. - --b0_end If set, adds a b0 as last sample. - --b0_value B0_VALUE b-value of the b0s. [0.0] - --b0_philips If set, replaces values of b0s bvecs by existing bvecs for Philips handling. - -Non-b0 acquisitions: - --bvals bvals [bvals ...] - bval of each non-b0 shell. - --b_lin_max B_LIN_MAX - b-max for linear bval distribution in *b*. - --q_lin_max Q_LIN_MAX - b-max for linear bval distribution in *q*; - the square root of b-values will be linearly distributed.. - -Save as: - --fsl Save in FSL format (.bvec/.bval). - --mrtrix Save in MRtrix format (.b). - -References: [1] Emmanuel Caruyer, Christophe Lenglet, Guillermo Sapiro, -Rachid Deriche. Design of multishell gradient sampling with uniform coverage -in diffusion MRI. Magnetic Resonance in Medicine, Wiley, 2013, 69 (6), -pp. 1534-1540. - diff --git a/scripts/.hidden/scil_gradients_modify_axes.py.help b/scripts/.hidden/scil_gradients_modify_axes.py.help deleted file mode 100644 index 71b163f23..000000000 --- a/scripts/.hidden/scil_gradients_modify_axes.py.help +++ /dev/null @@ -1,28 +0,0 @@ -usage: scil_gradients_modify_axes.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] - in_gradient_sampling_file - out_gradient_sampling_file - {1,2,3,-1,-2,-3} {1,2,3,-1,-2,-3} - {1,2,3,-1,-2,-3} - -Flip (ex, x --> -x) or swap (ex, x <-> y) chosen axes of the gradient sampling -matrix. Result will be saved in the same format as input gradient sampling -file. - -Formerly: scil_flip_gradients.py or scil_swap_gradient_axis.py - -positional arguments: - in_gradient_sampling_file - Path to gradient sampling file. (.bvec or .b) - out_gradient_sampling_file - Where to save the flipped gradient sampling file.Extension (.bvec or .b) must be the same as in_gradient_sampling_file - {1,2,3,-1,-2,-3} The final order of the axes, compared to original order: x=1 y=2 z=3. - Ex: to only flip y: 1 -2 3. - Ex: to only swap x and y: 2 1 3. - Ex: to first flip x, then permute all three axes: 3 -1 2. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_gradients_round_bvals.py.help b/scripts/.hidden/scil_gradients_round_bvals.py.help deleted file mode 100644 index 030942a65..000000000 --- a/scripts/.hidden/scil_gradients_round_bvals.py.help +++ /dev/null @@ -1,33 +0,0 @@ -usage: scil_gradients_round_bvals.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bval shells [shells ...] out_bval - tolerance - -Select b-values on specific b-value shells. - -With the --tolerance argument, this is useful for sampling schemes where -b-values of a shell are not all identical. Adjust the tolerance to vary the -accepted interval around the targetted b-value. - -For example, a b-value of 2000 and a tolerance of 20 will select all b-values -between [1980, 2020] and round them to the value of 2000. - ->> scil_gradients_round_bvals.py bvals 0 1000 2000 newbvals --tolerance 20 - -Formerly: scil_resample_bvals.py - -positional arguments: - in_bval The b-values in FSL format. - shells The list of expected shells. For example 0 1000 2000. - All b-values in the b_val file should correspond to one given shell (up to the tolerance). - out_bval The name of the output b-values. - tolerance The tolerated gap between the b-values to extract and the - actual b-values. Expecting an integer value. Comparison is - strict: a b-value of 1010 with a tolerance of 10 is NOT - included in shell 1000. Suggestion: 20. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_gradients_validate_correct.py.help b/scripts/.hidden/scil_gradients_validate_correct.py.help deleted file mode 100644 index 19f2ce9d1..000000000 --- a/scripts/.hidden/scil_gradients_validate_correct.py.help +++ /dev/null @@ -1,48 +0,0 @@ -usage: scil_gradients_validate_correct.py [-h] [--mask MASK] - [--fa_threshold FA_THRESHOLD] - [--column_wise] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bvec in_peaks in_FA out_bvec - -Detect sign flips and/or axes swaps in the gradients table from a fiber -coherence index [1]. The script takes as input the principal direction(s) -at each voxel, the b-vectors and the fractional anisotropy map and outputs -a corrected b-vectors file. - -A typical pipeline could be: ->>> scil_dti_metrics.py dwi.nii.gz bval bvec --not_all --fa fa.nii.gz - --evecs peaks.nii.gz ->>> scil_gradients_validate_correct.py bvec peaks_v1.nii.gz fa.nii.gz bvec_corr - -Note that peaks_v1.nii.gz is the file containing the direction associated -to the highest eigenvalue at each voxel. - -It is also possible to use a file containing multiple principal directions per -voxel, given that they are sorted by decreasing amplitude. In that case, the -first direction (with the highest amplitude) will be chosen for validation. -Only 4D data is supported, so the directions must be stored in a single -dimension. For example, peaks.nii.gz from scil_fodf_metrics.py could be used. - -Formerly: scil_validate_and_correct_bvecs.py - -positional arguments: - in_bvec Path to bvec file. - in_peaks Path to peaks file. - in_FA Path to the fractional anisotropy file. - out_bvec Path to corrected bvec file (FSL format). - -options: - -h, --help show this help message and exit - --mask MASK Path to an optional mask. If set, FA and Peaks will only be used inside the mask. - --fa_threshold FA_THRESHOLD - FA threshold. Only voxels with FA higher than fa_threshold will be considered. [0.2] - --column_wise Specify if input peaks are column-wise (..., 3, N) instead of row-wise (..., N, 3). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -[1] Schilling KG, Yeh FC, Nath V, Hansen C, Williams O, Resnick S, Anderson AW, -Landman BA. A fiber coherence index for quality control of B-table orientation -in diffusion MRI scans. Magn Reson Imaging. 2019 May;58:82-89. -doi: 10.1016/j.mri.2019.01.018. diff --git a/scripts/.hidden/scil_gradients_validate_correct_eddy.py.help b/scripts/.hidden/scil_gradients_validate_correct_eddy.py.help deleted file mode 100644 index 953b3d527..000000000 --- a/scripts/.hidden/scil_gradients_validate_correct_eddy.py.help +++ /dev/null @@ -1,25 +0,0 @@ -usage: scil_gradients_validate_correct_eddy.py [-h] - [-v [{DEBUG,INFO,WARNING}]] - [-f] - in_bvec in_bval nb_dirs - out_bvec out_bval - -Validate and correct gradients from eddy outputs -With full AP-PA eddy outputs a full bvec bval (2x nb of dirs and bval) -that doesnt fit with the output dwi (1x nb of dir) - -Formerly: scil_validate_and_correct_eddy_gradients.py - -positional arguments: - in_bvec In bvec file. - in_bval In bval file. - nb_dirs Number of directions per DWI. - out_bvec Out bvec file. - out_bval Out bval file. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_header_print_info.py.help b/scripts/.hidden/scil_header_print_info.py.help deleted file mode 100644 index baa2ca705..000000000 --- a/scripts/.hidden/scil_header_print_info.py.help +++ /dev/null @@ -1,20 +0,0 @@ -usage: scil_header_print_info.py [-h] [--keys KEYS [KEYS ...]] - [--print_affine] [-v [{DEBUG,INFO,WARNING}]] - in_file - -Print the raw header from the provided file or only the specified keys. -Support trk, nii and mgz files. - -Formerly: scil_print_header.py - -positional arguments: - in_file Input file (trk, nii and mgz). - -options: - -h, --help show this help message and exit - --keys KEYS [KEYS ...] - Print only the specified keys. - --print_affine Print nibabel's affine. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_header_validate_compatibility.py.help b/scripts/.hidden/scil_header_validate_compatibility.py.help deleted file mode 100644 index 28dc2ae4c..000000000 --- a/scripts/.hidden/scil_header_validate_compatibility.py.help +++ /dev/null @@ -1,22 +0,0 @@ -usage: scil_header_validate_compatibility.py [-h] [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] - in_files [in_files ...] - -Will compare all input files against the first one for the compatibility -of their spatial attributes. - -Spatial attributes are: affine, dimensions, voxel sizes and voxel order. - -Formerly: scil_verify_space_attributes_compatibility.py - -positional arguments: - in_files List of file to compare (trk, tck and nii/nii.gz). - -options: - -h, --help show this help message and exit - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_json_convert_entries_to_xlsx.py.help b/scripts/.hidden/scil_json_convert_entries_to_xlsx.py.help deleted file mode 100644 index 552987b90..000000000 --- a/scripts/.hidden/scil_json_convert_entries_to_xlsx.py.help +++ /dev/null @@ -1,29 +0,0 @@ -usage: scil_json_convert_entries_to_xlsx.py [-h] [--no_sort_subs] - [--no_sort_bundles] - [--ignore_bundles FILE] - [--stats_over_population] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_json out_xlsx - -Convert a final aggregated json file to an Excel spreadsheet. -Typically used during the tractometry pipeline. - -Formerly: scil_convert_json_to_xlsx.py - -positional arguments: - in_json File containing the json stats (.json). - out_xlsx Output Excel file for the stats (.xlsx). - -options: - -h, --help show this help message and exit - --no_sort_subs If set, subjects won't be sorted alphabetically. - --no_sort_bundles If set, bundles won't be sorted alphabetically. - --ignore_bundles FILE - Path to a text file containing a list of bundles to ignore (.txt). - One bundle, corresponding to keys in the json, per line. - --stats_over_population - If set, consider the input stats to be over an entire population and not subject-based. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_json_harmonize_entries.py.help b/scripts/.hidden/scil_json_harmonize_entries.py.help deleted file mode 100644 index 42dc105d6..000000000 --- a/scripts/.hidden/scil_json_harmonize_entries.py.help +++ /dev/null @@ -1,31 +0,0 @@ -usage: scil_json_harmonize_entries.py [-h] [--indent INDENT] [--sort_keys] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_file out_file - - This script will harmonize a json file by adding missing keys and values -that differs between the different layers of the dictionary. - -This is used only (for now) in Aggregate_All_* portion of tractometry-flow, -to counter the problem of missing bundles/metrics/lesions between subjects. - -The most common use case is when specific subjects have missing bundles -which will cause a panda array to be incomplete, and thus crash. Finding out -the union of all bundles/metrics/lesions will allow to create a complete json -(but with NaN for missing values). - -Formerly: scil_harmonize_json.py - -positional arguments: - in_file Input file (json). - out_file Output file (json). - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_json_merge_entries.py.help b/scripts/.hidden/scil_json_merge_entries.py.help deleted file mode 100644 index 704c245d7..000000000 --- a/scripts/.hidden/scil_json_merge_entries.py.help +++ /dev/null @@ -1,55 +0,0 @@ -usage: scil_json_merge_entries.py [-h] [--keep_separate] [--no_list] - [--add_parent_key ADD_PARENT_KEY] - [--remove_parent_key] [--recursive] - [--average_last_layer] [--indent INDENT] - [--sort_keys] [-v [{DEBUG,INFO,WARNING}]] - [-f] - in_json [in_json ...] out_json - - Merge multiple json files into a single one. -Typically used during the tractometry pipeline. - -Without option it will simply merge all entries at the top level, the top -level must not have any conflicting keys. - ---keep_separate option will add a parent for each file, its basename will -become the key. - ---no_list option will merge all entries at the top level, if there is a -conflict the lowest level will be extended with the new values (if list) or -added (if value) - ---add_parent_key option will add a parent key before merging all entries. - ---remove_parent_key option will remove the parent key before merging all -entries. - ---recursive option will merge all entries (scalar) at the lowest layers as a -list. - ---average_last_layer option will average all entries (scalar) at the lowest -layers, but instead of creating a list it creates a mean/std level. - -Formerly: scil_merge_json.py - -positional arguments: - in_json List of json files to merge (.json). - out_json Output json file (.json). - -options: - -h, --help show this help message and exit - --keep_separate Merge entries as separate keys based on filename. - --no_list Merge entries knowing there is no conflict. - --add_parent_key ADD_PARENT_KEY - Merge all entries under a single parent. - --remove_parent_key Merge ignoring parent key (e.g for population). - --recursive Merge all entries at the lowest layers. - --average_last_layer Average all entries at the lowest layers. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_labels_combine.py.help b/scripts/.hidden/scil_labels_combine.py.help deleted file mode 100644 index 3476d2a3a..000000000 --- a/scripts/.hidden/scil_labels_combine.py.help +++ /dev/null @@ -1,48 +0,0 @@ -usage: scil_labels_combine.py [-h] --volume_ids VOLUME_IDS [VOLUME_IDS ...] - [--out_labels_ids OUT_LABELS_IDS [OUT_LABELS_IDS ...] - | --unique | --group_in_m] - [--background BACKGROUND] [--merge_groups] - [-v [{DEBUG,INFO,WARNING}]] [-f] - output - -Script to combine labels from multiple volumes. If there is overlap, it will -overwrite them based on the input order. - - >>> scil_labels_combine.py out_labels.nii.gz - --volume_ids animal_labels.nii 20 - --volume_ids DKT_labels.nii.gz 44 53 - --out_labels_indices 20 44 53 - >>> scil_labels_combine.py slf_labels.nii.gz - --volume_ids a2009s_aseg.nii.gz all - --volume_ids clean/s1__DKT.nii.gz 1028 2028 - -Formerly: scil_combine_labels.py. - -positional arguments: - output Combined labels volume output. - -options: - -h, --help show this help message and exit - --volume_ids VOLUME_IDS [VOLUME_IDS ...] - List of volumes directly followed by their labels: - --volume_ids atlasA id1a id2a - --volume_ids atlasB id1b id2b ... - "all" can be used instead of id numbers. - --out_labels_ids OUT_LABELS_IDS [OUT_LABELS_IDS ...] - List of labels indices for output images. - --unique If set, output id with unique labels, excluding first background value. - --group_in_m Add (x * 10 000) to each volume labels, where x is the input volume order number. - --background BACKGROUND - Background id, excluded from output [0], - the value is used as output background value. - --merge_groups Each group from the --volume_ids option will be merged as a single labels. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - - References: - [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G., - Evans A.C. and Descoteaux M. OHBM 2019. - Surface integration for connectome analysis in age prediction. - diff --git a/scripts/.hidden/scil_labels_dilate.py.help b/scripts/.hidden/scil_labels_dilate.py.help deleted file mode 100644 index ccfb81a56..000000000 --- a/scripts/.hidden/scil_labels_dilate.py.help +++ /dev/null @@ -1,51 +0,0 @@ -usage: scil_labels_dilate.py [-h] [--distance DISTANCE] - [--labels_to_dilate LABELS_TO_DILATE [LABELS_TO_DILATE ...]] - [--labels_to_fill LABELS_TO_FILL [LABELS_TO_FILL ...]] - [--labels_not_to_dilate LABELS_NOT_TO_DILATE [LABELS_NOT_TO_DILATE ...]] - [--mask MASK] [--processes NBR] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_file out_file - -Dilate regions (with or without masking) from a labeled volume: -- "label_to_dilate" are regions that will dilate over - "label_to_fill" if close enough to it ("distance"). -- "label_to_dilate", by default (None) will be all - non-"label_to_fill" and non-"label_not_to_dilate". -- "label_not_to_dilate" will not be changed, but will not dilate. -- "mask" is where the dilation is allowed (constrained) - in addition to "background_label" (logical AND) - ->>> scil_labels_dilate.py wmparc_t1.nii.gz wmparc_dil.nii.gz \ - --label_to_fill 0 5001 5002 \ - --label_not_to_dilate 4 43 10 11 12 49 50 51 - -Formerly: scil_dilate_labels.py - -positional arguments: - in_file Path of the volume (nii or nii.gz). - out_file Output filename of the dilated labels. - -options: - -h, --help show this help message and exit - --distance DISTANCE Maximal distance to dilate (in mm) [2.0]. - --labels_to_dilate LABELS_TO_DILATE [LABELS_TO_DILATE ...] - Label list to dilate. By default it dilates all - labels not in labels_to_fill nor in labels_not_to_dilate. - --labels_to_fill LABELS_TO_FILL [LABELS_TO_FILL ...] - Background id / labels to be filled [[0]], - the first one is given as output background value. - --labels_not_to_dilate LABELS_NOT_TO_DILATE [LABELS_NOT_TO_DILATE ...] - Label list not to dilate. - --mask MASK Only dilate values inside the mask. - --processes NBR Number of sub-processes to start. - Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - - References: - [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G., - Evans A.C. and Descoteaux M. OHBM 2019. - Surface integration for connectome analysis in age prediction. - diff --git a/scripts/.hidden/scil_labels_remove.py.help b/scripts/.hidden/scil_labels_remove.py.help deleted file mode 100644 index 68a09bcaf..000000000 --- a/scripts/.hidden/scil_labels_remove.py.help +++ /dev/null @@ -1,31 +0,0 @@ -usage: scil_labels_remove.py [-h] -i INDICES [INDICES ...] - [--background BACKGROUND] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_labels out_labels - -Script to remove specific labels from an atlas volume. - - >>> scil_labels_remove.py DKT_labels.nii out_labels.nii.gz -i 5001 5002 - -Formerly: scil_remove_labels.py - -positional arguments: - in_labels Input labels volume. - out_labels Output labels volume. - -options: - -h, --help show this help message and exit - -i INDICES [INDICES ...], --indices INDICES [INDICES ...] - List of labels indices to remove. - --background BACKGROUND - Integer used for removed labels [0]. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - - References: - [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G., - Evans A.C. and Descoteaux M. OHBM 2019. - Surface integration for connectome analysis in age prediction. - diff --git a/scripts/.hidden/scil_labels_split_volume_by_ids.py.help b/scripts/.hidden/scil_labels_split_volume_by_ids.py.help deleted file mode 100644 index a563eb3e4..000000000 --- a/scripts/.hidden/scil_labels_split_volume_by_ids.py.help +++ /dev/null @@ -1,32 +0,0 @@ -usage: scil_labels_split_volume_by_ids.py [-h] [--out_dir OUT_DIR] - [--out_prefix OUT_PREFIX] - [-r min max min max] - [--background BACKGROUND] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_labels - -Split a label image into multiple images where the name of the output images -is the id of the label (ex. 35.nii.gz, 36.nii.gz, ...). If the --range option -is not provided, all labels of the image are extracted. The label 0 is -considered as the background and is ignored. - -IMPORTANT: your label image must be of an integer type. - -Formerly: scil_split_volume_by_ids.py - -positional arguments: - in_labels Path of the input label file, in a format supported by Nibabel. - -options: - -h, --help show this help message and exit - --out_dir OUT_DIR Put all ouptput images in a specific directory. - --out_prefix OUT_PREFIX - Prefix to be used for each output image. - -r min max min max, --range min max min max - Specifies a subset of labels to split, formatted as min max. Ex: -r 3 5 will give files _3, _4, _5. - --background BACKGROUND - Background value. Will not be saved as a separate label. Default: 0. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_labels_split_volume_from_lut.py.help b/scripts/.hidden/scil_labels_split_volume_from_lut.py.help deleted file mode 100644 index ee43eb4be..000000000 --- a/scripts/.hidden/scil_labels_split_volume_from_lut.py.help +++ /dev/null @@ -1,31 +0,0 @@ -usage: scil_labels_split_volume_from_lut.py [-h] [--out_dir OUT_DIR] - [--out_prefix OUT_PREFIX] - (--scilpy_lut {freesurfer_subcortical,dk_aggregate_structures,freesurfer_desikan_killiany} | --custom_lut CUSTOM_LUT) - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_label - -Split a label image into multiple images where the name of the output images -is taken from a lookup table (ex: left-lateral-occipital.nii.gz, -right-thalamus.nii.gz, ...). Only the labels included in the lookup table -are extracted. - -IMPORTANT: your label image must be of an integer type. - -Formerly: scil_split_volume_by_labels.py - -positional arguments: - in_label Path of the input label file, in a format supported by Nibabel. - -options: - -h, --help show this help message and exit - --out_dir OUT_DIR Put all ouptput images in a specific directory. - --out_prefix OUT_PREFIX - Prefix to be used for each output image. - --scilpy_lut {freesurfer_subcortical,dk_aggregate_structures,freesurfer_desikan_killiany} - Lookup table, in the file scilpy/data/LUT, used to name the output files. - --custom_lut CUSTOM_LUT - Path of the lookup table file, used to name the output files. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_lesions_info.py.help b/scripts/.hidden/scil_lesions_info.py.help deleted file mode 100644 index 70b783114..000000000 --- a/scripts/.hidden/scil_lesions_info.py.help +++ /dev/null @@ -1,50 +0,0 @@ -usage: scil_lesions_info.py [-h] - [--bundle BUNDLE | --bundle_mask BUNDLE_MASK | --bundle_labels_map BUNDLE_LABELS_MAP] - [--min_lesion_vol MIN_LESION_VOL] - [--out_lesion_atlas FILE] - [--out_lesion_stats FILE] - [--out_streamlines_stats FILE] [--indent INDENT] - [--sort_keys] [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_lesion out_json - -This script will output informations about lesion load in bundle(s). -The input can either be streamlines, binary bundle map, or a bundle voxel -label map. - -To be considered a valid lesion, the lesion volume must be at least -min_lesion_vol mm3. This avoid the detection of thousand of single voxel -lesions if an automatic lesion segmentation tool is used. - -Formerly: scil_analyse_lesions_load.py - -positional arguments: - in_lesion Binary mask of the lesion(s) (.nii.gz). - out_json Output file for lesion information (.json). - -options: - -h, --help show this help message and exit - --bundle BUNDLE Path of the bundle file (.trk). - --bundle_mask BUNDLE_MASK - Path of the bundle binary mask (.nii.gz). - --bundle_labels_map BUNDLE_LABELS_MAP - Path of the bundle labels map (.nii.gz). - --min_lesion_vol MIN_LESION_VOL - Minimum lesion volume in mm3 [7]. - --out_lesion_atlas FILE - Save the labelized lesion(s) map (.nii.gz). - --out_lesion_stats FILE - Save the lesion-wise volume measure (.json). - --out_streamlines_stats FILE - Save the lesion-wise streamline count (.json). - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_mti_adjust_B1_header.py.help b/scripts/.hidden/scil_mti_adjust_B1_header.py.help deleted file mode 100644 index 8ca74d463..000000000 --- a/scripts/.hidden/scil_mti_adjust_B1_header.py.help +++ /dev/null @@ -1,17 +0,0 @@ -usage: scil_mti_adjust_B1_header.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] - in_B1_map out_B1_map in_B1_json - -Correct B1 map header problem, by applying the scaling (slope) and setting -the slope to 1. - -positional arguments: - in_B1_map Path to input B1 map file. - out_B1_map Path to output B1 map file. - in_B1_json Json file of the B1 map. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_mti_maps_MT.py.help b/scripts/.hidden/scil_mti_maps_MT.py.help deleted file mode 100644 index 0745e3fe3..000000000 --- a/scripts/.hidden/scil_mti_maps_MT.py.help +++ /dev/null @@ -1,150 +0,0 @@ -usage: scil_mti_maps_MT.py [-h] [--out_prefix OUT_PREFIX] [--mask MASK] - --in_positive IN_POSITIVE [IN_POSITIVE ...] - --in_negative IN_NEGATIVE [IN_NEGATIVE ...] - --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...] - [--in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]] - [--extended] [--filtering] - [--in_jsons PD_json T1_json | --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time] - [--in_B1_map IN_B1_MAP] - [--B1_correction_method {empiric,model_based}] - [--B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]] - [--B1_nominal B1_NOMINAL] - [--B1_smooth_dims B1_SMOOTH_DIMS] - [-v [{DEBUG,INFO,WARNING}]] [-f] - out_dir - -This script computes two myelin indices maps from the Magnetization Transfer -(MT) images. -Magnetization Transfer is a contrast mechanism in tissue resulting from the -proton exchange between non-aqueous protons (from macromolecules and their -closely associated water molecules, the "bound" pool) and protons in the free -water pool called aqueous protons. This exchange attenuates the MRI signal, -introducing microstructure-dependent contrast. MT's effect reflects the -relative density of macromolecules such as proteins and lipids, it has been -associated with myelin content in white matter of the brain. - -Different contrasts can be done with an off-resonance pulse to saturating the -protons on non-aqueous molecules a frequency irradiation. The MT maps are -obtained using three or four contrasts: a single positive frequency image -and/or a single negative frequency image, and two unsaturated contrasts as -reference. These two references should be acquired with predominant PD -(proton density) and T1 weighting at different excitation flip angles -(a_PD, a_T1) and repetition times (TR_PD, TR_T1). - -Input Data recommendation: - - it is recommended to use dcm2niix (v1.0.20200331) to convert data - https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331 - - dcm2niix conversion will create all echo files for each contrast and - corresponding json files - - all input must have a matching json file with the same filename - - all contrasts must have a same number of echoes and coregistered - between them before running the script. - - Mask must be coregistered to the echo images - - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/) - -The output consists of a MT_native_maps folder containing the 2 myelin maps: - - MTR.nii.gz : Magnetization Transfer Ratio map - The MT ratio is a measure reflecting the amount of bound protons. - - MTsat.nii.gz : Magnetization Transfer saturation map - The MT saturation is a pseudo-quantitative maps representing - the signal change between the bound and free water pools. - -As an option, the Complementary_maps folder contains the following images: - - positive.nii.gz : single positive frequency image - - negative.nii.gz : single negative frequency image - - mtoff_PD.nii.gz : unsaturated proton density weighted image - - mtoff_T1.nii.gz : unsaturated T1 weighted image - - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image - - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image - - R1app.nii.gz : Apparent R1 map computed for MTsat. - - B1_map.nii.gz : B1 map after correction and smoothing (if given). - -The final maps from MT_native_maps can be corrected for B1+ field - inhomogeneity, using either an empiric method with - --in_B1_map option, suffix *B1_corrected is added for each map. - --B1_correction_method empiric - or a model-based method with - --in_B1_map option, suffix *B1_corrected is added for each map. - --B1_correction_method model_based - --B1_fitValues 1 or 2 .mat files, obtained externally from - https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction, - and given in this order: positive frequency saturation, negative frequency - saturation. -For both methods, the nominal value of the B1 map can be set with - --B1_nominal value - ->>> scil_mti_maps_MT.py path/to/output/directory - --in_mtoff_pd path/to/echo*mtoff.nii.gz - --in_positive path/to/echo*pos.nii.gz --in_negative path/to/echo*neg.nii.gz - --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz - --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json - -By default, the script uses all the echoes available in the input folder. -If you want to use a single echo, replace the * with the specific number of -the echo. - -positional arguments: - out_dir Path to output folder. - -options: - -h, --help show this help message and exit - --out_prefix OUT_PREFIX - Prefix to be used for each output image. - --mask MASK Path to the binary brain mask. - --extended If set, outputs the folder Complementary_maps. - --filtering Gaussian filtering to remove Gibbs ringing. Not recommended. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Contrast maps: - Path to echoes corresponding to contrast images. All constrasts must have - the same number of echoes and coregistered between them. Use * to include all echoes. - The in_mtoff_pd input and at least one of in_positive or in_negative are required. - - --in_positive IN_POSITIVE [IN_POSITIVE ...] - Path to all echoes corresponding to the positive frequency - saturation pulse. - --in_negative IN_NEGATIVE [IN_NEGATIVE ...] - Path to all echoes corresponding to the negative frequency - saturation pulse. - --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...] - Path to all echoes corresponding to the predominant PD - (proton density) weighting images with no saturation pulse. - --in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...] - Path to all echoes corresponding to the predominant T1 - weighting images with no saturation pulse. This one is optional, - since it is only needed for the calculation of MTsat. - Acquisition parameters should also be set with this image. - -Acquisition parameters: - Acquisition parameters required for MTsat and ihMTsat calculation. - These are the excitation flip angles (a_PD, a_T1), in DEGREES, and - repetition times (TR_PD, TR_T1) of the PD and T1 images, in SECONDS. - Can be given through json files (--in_jsons) or directly (--in_acq_parameters). - - --in_jsons PD_json T1_json - Path to MToff PD json file and MToff T1 json file, in that order. - The acquisition parameters will be extracted from these files. - Must come from a Philips acquisition, otherwise, use in_acq_parameters. - --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time - Acquisition parameters in that order: flip angle of mtoff_PD, - flip angle of mtoff_T1, repetition time of mtoff_PD, - repetition time of mtoff_T1 - -B1 correction: - --in_B1_map IN_B1_MAP - Path to B1 coregister map to MT contrasts. - --B1_correction_method {empiric,model_based} - Choice of B1 correction method. Choose between empiric and model-based. - Note that the model-based method requires a B1 fitvalues file. - Both method will only correct the saturation measures. [empiric] - --B1_fitvalues B1_FITVALUES [B1_FITVALUES ...] - Path to B1 fitvalues files obtained externally. Should be one .mat - file per input MT-on image, given in this specific order: - positive frequency saturation, negative frequency saturation. - --B1_nominal B1_NOMINAL - Nominal value for the B1 map. For Philips, should be 100. [100] - --B1_smooth_dims B1_SMOOTH_DIMS - Dimension of the squared window used for B1 smoothing, in number of voxels. [5] diff --git a/scripts/.hidden/scil_mti_maps_ihMT.py.help b/scripts/.hidden/scil_mti_maps_ihMT.py.help deleted file mode 100644 index a0de4d6b9..000000000 --- a/scripts/.hidden/scil_mti_maps_ihMT.py.help +++ /dev/null @@ -1,164 +0,0 @@ -usage: scil_mti_maps_ihMT.py [-h] [--out_prefix OUT_PREFIX] [--mask MASK] - --in_altnp IN_ALTNP [IN_ALTNP ...] --in_altpn - IN_ALTPN [IN_ALTPN ...] --in_negative IN_NEGATIVE - [IN_NEGATIVE ...] --in_positive IN_POSITIVE - [IN_POSITIVE ...] --in_mtoff_pd IN_MTOFF_PD - [IN_MTOFF_PD ...] - [--in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...]] - [--extended] [--filtering] - [--in_jsons PD_json T1_json | --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time] - [--in_B1_map IN_B1_MAP] - [--B1_correction_method {empiric,model_based}] - [--B1_fitvalues B1_FITVALUES [B1_FITVALUES ...]] - [--B1_nominal B1_NOMINAL] - [--B1_smooth_dims B1_SMOOTH_DIMS] - [-v [{DEBUG,INFO,WARNING}]] [-f] - out_dir - -This script computes four myelin indices maps from the Magnetization Transfer -(MT) and inhomogeneous Magnetization Transfer (ihMT) images. Magnetization -Transfer is a contrast mechanism in tissue resulting from the proton exchange -between non-aqueous protons (from macromolecules and their closely associated -water molecules, the "bound" pool) and protons in the free water pool called -aqueous protons. This exchange attenuates the MRI signal, introducing -microstructure-dependent contrast. MT's effect reflects the relative density -of macromolecules such as proteins and lipids, it has been associated with -myelin content in white matter of the brain. - -Different contrasts can be done with an off-resonance pulse prior to image -acquisition (a prepulse), saturating the protons on non-aqueous molecules, -by applying different frequency irradiation. The two MT maps and two ihMT maps -are obtained using six contrasts: single positive frequency image, single -negative frequency image, dual alternating positive/negative frequency image, -dual alternating negative/positive frequency image (saturated images); -and two unsaturated contrasts as reference. These two references should be -acquired with predominant PD (proton density) and T1 weighting at different -excitation flip angles (a_PD, a_T1) and repetition times (TR_PD, TR_T1). - -Input Data recommendation: - - it is recommended to use dcm2niix (v1.0.20200331) to convert data - https://github.com/rordenlab/dcm2niix/releases/tag/v1.0.20200331 - - dcm2niix conversion will create all echo files for each contrast and - corresponding json files - - all contrasts must have a same number of echoes and coregistered - between them before running the script - - Mask must be coregistered to the echo images - - ANTs can be used for the registration steps (http://stnava.github.io/ANTs/) - -The output consists of a ihMT_native_maps folder containing the 4 myelin maps: - - MTR.nii.gz : Magnetization Transfer Ratio map - - ihMTR.nii.gz : inhomogeneous Magnetization Transfer Ratio map - The (ih)MT ratio is a measure reflecting the amount of bound protons. - - MTsat.nii.gz : Magnetization Transfer saturation map - - ihMTsat.nii.gz : inhomogeneous Magnetization Transfer saturation map - The (ih)MT saturation is a pseudo-quantitative maps representing - the signal change between the bound and free water pools. - -As an option, the Complementary_maps folder contains the following images: - - altnp.nii.gz : dual alternating negative and positive frequency image - - altpn.nii.gz : dual alternating positive and negative frequency image - - positive.nii.gz : single positive frequency image - - negative.nii.gz : single negative frequency image - - mtoff_PD.nii.gz : unsaturated proton density weighted image - - mtoff_T1.nii.gz : unsaturated T1 weighted image - - MTsat_d.nii.gz : MTsat computed from the mean dual frequency images - - MTsat_sp.nii.gz : MTsat computed from the single positive frequency image - - MTsat_sn.nii.gz : MTsat computed from the single negative frequency image - - R1app.nii.gz : Apparent R1 map computed for MTsat. - - B1_map.nii.gz : B1 map after correction and smoothing (if given). - -The final maps from ihMT_native_maps can be corrected for B1+ field - inhomogeneity, using either an empiric method with - --in_B1_map option, suffix *B1_corrected is added for each map. - --B1_correction_method empiric - or a model-based method with - --in_B1_map option, suffix *B1_corrected is added for each map. - --B1_correction_method model_based - --B1_fitValues 3 .mat files, obtained externally from - https://github.com/TardifLab/OptimizeIHMTimaging/tree/master/b1Correction, - and given in this order: positive frequency saturation, negative frequency - saturation, dual frequency saturation. -For both methods, the nominal value of the B1 map can be set with - --B1_nominal value - ->>> scil_mti_maps_ihMT.py path/to/output/directory - --in_altnp path/to/echo*altnp.nii.gz --in_altpn path/to/echo*altpn.nii.gz - --in_mtoff_pd path/to/echo*mtoff.nii.gz - --in_negative path/to/echo*neg.nii.gz --in_positive path/to/echo*pos.nii.gz - --in_mtoff_t1 path/to/echo*T1w.nii.gz --mask path/to/mask_bin.nii.gz - --in_jsons path/to/echo*mtoff.json path/to/echo*T1w.json - -By default, the script uses all the echoes available in the input folder. -If you want to use a single echo, replace the * with the specific number of -the echo. - -positional arguments: - out_dir Path to output folder. - -options: - -h, --help show this help message and exit - --out_prefix OUT_PREFIX - Prefix to be used for each output image. - --mask MASK Path to the binary brain mask. - --extended If set, outputs the folder Complementary_maps. - --filtering Gaussian filtering to remove Gibbs ringing. Not recommended. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Contrast maps: - Path to echoes corresponding to contrast images. All constrasts must have - the same number of echoes and coregistered between them. Use * to include all echoes. - - --in_altnp IN_ALTNP [IN_ALTNP ...] - Path to all echoes corresponding to the alternation of - negative and positive frequency saturation pulse. - --in_altpn IN_ALTPN [IN_ALTPN ...] - Path to all echoes corresponding to the alternation of - positive and negative frequency saturation pulse. - --in_negative IN_NEGATIVE [IN_NEGATIVE ...] - Path to all echoes corresponding to the negative frequency - saturation pulse. - --in_positive IN_POSITIVE [IN_POSITIVE ...] - Path to all echoes corresponding to the positive frequency - saturation pulse. - --in_mtoff_pd IN_MTOFF_PD [IN_MTOFF_PD ...] - Path to all echoes corresponding to the predominant PD - (proton density) weighting images with no saturation pulse. - --in_mtoff_t1 IN_MTOFF_T1 [IN_MTOFF_T1 ...] - Path to all echoes corresponding to the predominant T1 - weighting images with no saturation pulse. This one is optional, - since it is only needed for the calculation of MTsat and ihMTsat. - Acquisition parameters should also be set with this image. - -Acquisition parameters: - Acquisition parameters required for MTsat and ihMTsat calculation. - These are the excitation flip angles (a_PD, a_T1), in DEGREES, and - repetition times (TR_PD, TR_T1) of the PD and T1 images, in SECONDS. - Can be given through json files (--in_jsons) or directly (--in_acq_parameters). - - --in_jsons PD_json T1_json - Path to MToff PD json file and MToff T1 json file, in that order. - The acquisition parameters will be extracted from these files. - Must come from a Philips acquisition, otherwise, use in_acq_parameters. - --in_acq_parameters PD flip angle T1 flip angle PD repetition time T1 repetition time - Acquisition parameters in that order: flip angle of mtoff_PD, - flip angle of mtoff_T1, repetition time of mtoff_PD, - repetition time of mtoff_T1 - -B1 correction: - --in_B1_map IN_B1_MAP - Path to B1 coregister map to MT contrasts. - --B1_correction_method {empiric,model_based} - Choice of B1 correction method. Choose between empiric and model-based. - Note that the model-based method requires a B1 fitvalues file. - Both method will only correct the saturation measures. [empiric] - --B1_fitvalues B1_FITVALUES [B1_FITVALUES ...] - Path to B1 fitvalues files obtained externally. Should be one .mat - file per input MT-on image, given in this specific order: - positive frequency saturation, negative frequency saturation. - --B1_nominal B1_NOMINAL - Nominal value for the B1 map. For Philips, should be 100. [100] - --B1_smooth_dims B1_SMOOTH_DIMS - Dimension of the squared window used for B1 smoothing, in number of voxels. [5] diff --git a/scripts/.hidden/scil_plot_stats_per_point.py.help b/scripts/.hidden/scil_plot_stats_per_point.py.help deleted file mode 100644 index c156e3da1..000000000 --- a/scripts/.hidden/scil_plot_stats_per_point.py.help +++ /dev/null @@ -1,33 +0,0 @@ -usage: scil_plot_stats_per_point.py [-h] [--stats_over_population] - [--nb_pts NB_PTS] [--display_means] - [--fill_color FILL_COLOR | --dict_colors DICT_COLORS] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_json out_dir - -Plot all mean/std per point for a subject or population json file from -tractometry-flow. -WARNING: For population, the displayed STDs is only showing the variation -of the means. It does not account intra-subject STDs. - -Formerly: scil_plot_mean_std_per_point.py - -positional arguments: - in_json JSON file containing the mean/std per point. For example, can be created using scil_bundle_mean_std.py. - out_dir Output directory. - -options: - -h, --help show this help message and exit - --stats_over_population - If set, consider the input stats to be over an entire population and not subject-based. - --nb_pts NB_PTS Force the number of divisions for the bundles. - Avoid unequal plots across datasets, replace missing data with zeros. - --display_means Display the subjects means as semi-transparent line. - Poor results when the number of subject is high. - --fill_color FILL_COLOR - Hexadecimal RGB color filling the region between mean +/- std. The hexadecimal RGB color should be formatted as 0xRRGGBB. - --dict_colors DICT_COLORS - Dictionnary mapping basename to color.Same convention as --color. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_qball_metrics.py.help b/scripts/.hidden/scil_qball_metrics.py.help deleted file mode 100644 index 5c60bcc39..000000000 --- a/scripts/.hidden/scil_qball_metrics.py.help +++ /dev/null @@ -1,71 +0,0 @@ -usage: scil_qball_metrics.py [-h] [-f] [--sh_order SH_ORDER] [--mask MASK] - [--use_qball] [--not_all] [--gfa GFA] - [--peaks PEAKS] [--peak_indices PEAK_INDICES] - [--sh SH] [--nufo NUFO] [--a_power A_POWER] - [--b0_threshold thr] [--skip_b0_check] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] - in_dwi in_bval in_bvec - -Script to compute the Constant Solid Angle (CSA) or Analytical Q-ball model, -the generalized fractional anisotropy (GFA) and the peaks of the model. - -By default, will output all possible files, using default names. Specific names -can be specified using the file flags specified in the "File flags" section. - -If --not_all is set, only the files specified explicitly by the flags will be -output. - -See [Descoteaux et al MRM 2007, Aganj et al MRM 2009] for details and -[Cote et al MEDIA 2013] for quantitative comparisons. - -Formerly: scil_compute_qball_metrics.py - -positional arguments: - in_dwi Path of the input diffusion volume. - in_bval Path of the bvals file, in FSL format. - in_bvec Path of the bvecs file, in FSL format. - -options: - -h, --help show this help message and exit - -f Force overwriting of the output files. - --sh_order SH_ORDER Spherical harmonics order. Must be a positive even number [4]. - --mask MASK Path to a binary mask. Only data inside the mask will be used for computations and reconstruction [None]. - --use_qball If set, qball will be used as the odf reconstruction model instead of CSA. - --not_all If set, will only save the files specified using the following flags. - --b0_threshold thr Threshold under which b-values are considered to be b0s. - [Default: 20] - * Note. We would expect to find at least one b-value in the - range [0, b0_threshold]. To skip this check, use --skip_b0_check. - --skip_b0_check By default, we supervise that at least one b0 exists in your data - (i.e. b-values below the default --b0_threshold). Use this option to - allow continuing even if the minimum b-value is suspiciously high. - If no b-value is found below the threshold, the script will continue - with your minimal b-value as new --b0_threshold. - Use with care, and only if you understand your data. - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - --processes NBR Number of sub-processes to start. - Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -File flags: - --gfa GFA Output filename for the generalized fractional anisotropy [gfa.nii.gz]. - --peaks PEAKS Output filename for the extracted peaks [peaks.nii.gz]. - --peak_indices PEAK_INDICES - Output filename for the generated peaks indices on the sphere [peaks_indices.nii.gz]. - --sh SH Output filename for the spherical harmonics coefficients [sh.nii.gz]. - --nufo NUFO Output filename for the NUFO map [nufo.nii.gz]. - --a_power A_POWER Output filename for the anisotropic power map[anisotropic_power.nii.gz]. diff --git a/scripts/.hidden/scil_rgb_convert.py.help b/scripts/.hidden/scil_rgb_convert.py.help deleted file mode 100644 index 145cd097c..000000000 --- a/scripts/.hidden/scil_rgb_convert.py.help +++ /dev/null @@ -1,33 +0,0 @@ -usage: scil_rgb_convert.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] - in_image out_image - -Converts a RGB image encoded as a 4D image to a RGB image encoded as -a 3D image, or vice versa. - -Typically, most software tools used in the SCIL (including MI-Brain) use -the former, while Trackvis uses the latter. - -Input --Case 1: 4D image where the 4th dimension contains 3 values. --Case 2: 3D image, in Trackvis format where each voxel contains a - tuple of 3 elements, one for each value. - -Output --Case 1: 3D image, in Trackvis format where each voxel contains a - tuple of 3 elements, one for each value (uint8). --Case 2: 4D image where the 4th dimension contains 3 values (uint8). - -Formerly: scil_convert_rgb.py - -positional arguments: - in_image name of input RGB image. - Either 4D or 3D image. - out_image name of output RGB image. - Either 3D or 4D image. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_sh_convert.py.help b/scripts/.hidden/scil_sh_convert.py.help deleted file mode 100644 index 9b4e814a6..000000000 --- a/scripts/.hidden/scil_sh_convert.py.help +++ /dev/null @@ -1,39 +0,0 @@ -usage: scil_sh_convert.py [-h] [--processes NBR] [-v [{DEBUG,INFO,WARNING}]] - [-f] - in_sh out_sh - {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - -Convert a SH file between the two of the following basis choices: -'descoteaux07', 'descoteaux07_legacy', 'tournier07' or 'tournier07_legacy'. -Using the sh_basis argument, both the input and the output SH bases must be -given, in the order. For more information about the bases, see -https://docs.dipy.org/stable/theory/sh_basis.html. - -Formerly: scil_convert_sh_basis.py - -positional arguments: - in_sh Input SH filename. (nii or nii.gz) - out_sh Output SH filename. (nii or nii.gz) - {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Both the input and output bases are required, in that order. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy', 'tournier07']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - -options: - -h, --help show this help message and exit - --processes NBR Number of sub-processes to start. - Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_sh_fusion.py.help b/scripts/.hidden/scil_sh_fusion.py.help deleted file mode 100644 index 16453420f..000000000 --- a/scripts/.hidden/scil_sh_fusion.py.help +++ /dev/null @@ -1,36 +0,0 @@ -usage: scil_sh_fusion.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] - in_shs [in_shs ...] out_sh - -Merge a list of Spherical Harmonics files. - -This merges the coefficients of multiple Spherical Harmonics files by taking, -for each coefficient, the one with the largest magnitude. - -Can be used to merge fODFs computed from different shells into 1, while -conserving the most relevant information. - -Based on [1] and [2]. - -Formerly: scil_merge_sh.py - -positional arguments: - in_shs List of SH files. - out_sh output SH file. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Reference: -[1] Garyfallidis, E., Zucchelli, M., Houde, J-C., Descoteaux, M. - How to perform best ODF reconstruction from the Human Connectome - Project sampling scheme? - ISMRM 2014. - -[2] Khachaturian, M. H., Wisco, J. J., & Tuch, D. S. (2007). Boosting the - sampling efficiency of q‐ball imaging using multiple wavevector fusion. - Magnetic Resonance in Medicine: An Official Journal of the International - Society for Magnetic Resonance in Medicine, 57(2), 289-296. diff --git a/scripts/.hidden/scil_sh_to_aodf.py.help b/scripts/.hidden/scil_sh_to_aodf.py.help deleted file mode 100644 index 8fbb6801a..000000000 --- a/scripts/.hidden/scil_sh_to_aodf.py.help +++ /dev/null @@ -1,96 +0,0 @@ -usage: scil_sh_to_aodf.py [-h] [--out_sym OUT_SYM] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}] - [--method {unified,cosine}] - [--sigma_spatial SIGMA_SPATIAL] - [--sigma_align SIGMA_ALIGN] - [--sigma_range SIGMA_RANGE] - [--sigma_angle SIGMA_ANGLE] [--disable_spatial] - [--disable_align] [--disable_range] - [--include_center] [--win_hwidth WIN_HWIDTH] - [--sharpness SHARPNESS] [--device {cpu,gpu}] - [--use_opencl] [--patch_size PATCH_SIZE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_sh out_sh - -Script to estimate asymmetric ODFs (aODFs) from a spherical harmonics image. - -Two methods are available: - * Unified filtering [1] combines four asymmetric filtering methods into - a single equation and relies on a combination of four gaussian filters. - * Cosine filtering [2] is a simpler implementation using cosine distance - for assigning weights to neighbours. - -Unified filtering can be accelerated using OpenCL with the option --use_opencl. -Make sure you have pyopencl installed before using this option. By default, the -OpenCL program will run on the cpu. To use a gpu instead, also specify the -option --device gpu. - -positional arguments: - in_sh Path to the input file. - out_sh File name for averaged signal. - -options: - -h, --help show this help message and exit - --out_sym OUT_SYM Name of optional symmetric output. [None] - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} - Sphere used for the SH to SF projection. [repulsion200] - --method {unified,cosine} - Method for estimating asymmetric ODFs [unified]. - One of: - 'unified': Unified filtering [1]. - 'cosine' : Cosine-based filtering [2]. - --device {cpu,gpu} Device to use for execution. [cpu] - --use_opencl Accelerate code using OpenCL (requires pyopencl - and a working OpenCL implementation). - --patch_size PATCH_SIZE - OpenCL patch size. [40] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Shared filter arguments: - --sigma_spatial SIGMA_SPATIAL - Standard deviation for spatial distance. [1.0] - -Unified filter arguments: - --sigma_align SIGMA_ALIGN - Standard deviation for alignment filter. [0.8] - --sigma_range SIGMA_RANGE - Standard deviation for range filter - *relative to SF range of image*. [0.2] - --sigma_angle SIGMA_ANGLE - Standard deviation for angular filter - (disabled by default). - --disable_spatial Disable spatial filtering. - --disable_align Disable alignment filtering. - --disable_range Disable range filtering. - --include_center Include center voxel in neighourhood. - --win_hwidth WIN_HWIDTH - Filtering window half-width. Defaults to 3*sigma_spatial. - -Cosine filter arguments: - --sharpness SHARPNESS - Specify sharpness factor to use for - weighted average. [1.0] - -[1] Poirier and Descoteaux, 2024, "A Unified Filtering Method for Estimating - Asymmetric Orientation Distribution Functions", Neuroimage, vol. 287, - https://doi.org/10.1016/j.neuroimage.2024.120516 - -[2] Poirier et al, 2021, "Investigating the Occurrence of Asymmetric Patterns - in White Matter Fiber Orientation Distribution Functions", ISMRM 2021 - (abstract 0865) diff --git a/scripts/.hidden/scil_sh_to_rish.py.help b/scripts/.hidden/scil_sh_to_rish.py.help deleted file mode 100644 index 460b67ed4..000000000 --- a/scripts/.hidden/scil_sh_to_rish.py.help +++ /dev/null @@ -1,36 +0,0 @@ -usage: scil_sh_to_rish.py [-h] [--full_basis] [--mask MASK] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_sh out_prefix - -Compute the RISH (Rotationally Invariant Spherical Harmonics) features of an SH -signal [1]. - -Each RISH feature map is the total energy of its associated order. -Mathematically, it is the sum of the squared SH coefficients of the SH order. - -This script supports both symmetrical and asymmetrical SH images as input, of -any SH order. - -Each RISH feature will be saved as a separate file. - -[1] Mirzaalian, Hengameh, et al. "Harmonizing diffusion MRI data across -multiple sites and scanners." MICCAI 2015. -https://scholar.harvard.edu/files/hengameh/files/miccai2015.pdf - -Formerly: scil_compute_rish_from_sh.py - -positional arguments: - in_sh Path of the sh image. They can be formatted in any sh basis, but we - expect it to be a symmetrical one. Else, provide --full_basis. - out_prefix Prefix of the output RISH files to save. Suffixes will be - based on the sh orders. - -options: - -h, --help show this help message and exit - --full_basis Input SH image uses a full SH basis (asymmetrical). - --mask MASK Path to a binary mask. - Only data inside the mask will be used for computation. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_sh_to_sf.py.help b/scripts/.hidden/scil_sh_to_sf.py.help deleted file mode 100644 index d943ec5e7..000000000 --- a/scripts/.hidden/scil_sh_to_sf.py.help +++ /dev/null @@ -1,67 +0,0 @@ -usage: scil_sh_to_sf.py [-h] - (--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} | --in_bvec IN_BVEC) - [--dtype {float32,float64}] [--in_bval IN_BVAL] - [--in_b0 IN_B0] [--out_bval OUT_BVAL] - [--out_bvec OUT_BVEC] [--b0_scaling] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [--full_basis] [--b0_threshold thr] [--processes NBR] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_sh out_sf - -Script to sample SF values from a Spherical Harmonics signal. Outputs a Nifti -file with the SF values and an associated .bvec file with the chosen -directions. - -If converting from SH to a DWI-like SF volume, --in_bval and --in_b0 need -to be provided to concatenate the b0 image to the SF, and to generate the new -bvals file. Otherwise, no .bval file will be created. - -Formerly: scil_compute_sf_from_sh.py - -positional arguments: - in_sh Path of the SH volume. - out_sf Name of the output SF file to save (bvals/bvecs will be automatically named when necessary). - -options: - -h, --help show this help message and exit - --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} - Sphere used for the SH to SF projection. - --in_bvec IN_BVEC Directions used for the SH to SF projection. - If given, --in_bval must also be provided. - --dtype {float32,float64} - Datatype to use for SF computation and output array.'[float32]' - --in_bval IN_BVAL b-value file, in FSL format, used to assign a b-value to the - output SF and generate a `.bval` file. - - If used, --out_bval is required. - - The output bval will contain one b-value per point in the SF - output (i.e. one per point on the --sphere or one per --in_bvec.) - - The values of the output bval will all be set to the same b-value: - the average of your in_bval. (Any b0 found in this file, i.e - b-values under --b0_threshold, will be removed beforehand.) - - To add b0s to both the SF volume and the --out_bval file, use --in_b0. - --in_b0 IN_B0 b0 volume to concatenate to the final SF volume. - --out_bval OUT_BVAL Optional output bval file. - --out_bvec OUT_BVEC Optional output bvec file. - --b0_scaling Scale resulting SF by the b0 image (--in_b0 mustbe given). - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - --full_basis If true, use a full basis for the input SH coefficients. - --b0_threshold thr Threshold under which b-values are considered to be b0s. - Default if not set is 20. - This value is used with option --in_bval only: any b0 found in the in_bval will be removed. - --processes NBR Number of sub-processes to start. - Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_stats_group_comparison.py.help b/scripts/.hidden/scil_stats_group_comparison.py.help deleted file mode 100644 index af5020579..000000000 --- a/scripts/.hidden/scil_stats_group_comparison.py.help +++ /dev/null @@ -1,70 +0,0 @@ -usage: scil_stats_group_comparison.py [-h] [--out_dir OUT_DIR] - [--out_json OUT_JSON] - [--bundles BUNDLES [BUNDLES ...]] - [--metrics METRICS [METRICS ...]] - [--values VALUES [VALUES ...]] - [--alpha_error ALPHA_ERROR] - [--generate_graph] [--indent INDENT] - [--sort_keys] - [-v [{DEBUG,INFO,WARNING}]] [-f] - IN_JSON IN_PARTICIPANTS GROUP_BY - -Run group comparison statistics on metrics from tractometry -1) Separate the sample given a particular variable (group_by) into groups - -2) Does Shapiro-Wilk test of normality for every sample -https://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test - -3) Does Levene or Bartlett (depending on normality) test of variance -homogeneity Levene: -https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm -Bartlett: -https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm - -4) Test the group difference for every measure with the correct test depending - on the sample (Student, Welch, Mannwhitneyu, ANOVA, Kruskall-Wallis) -Student : -https://en.wikipedia.org/wiki/Student%27s_t-test#Independent_two-sample_t-test -Welch : -https://en.wikipedia.org/wiki/Welch%27s_t-test -Mann-Whitney U : -https://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test -ANOVA : -http://www.biostathandbook.com/onewayanova.html -Kruskall-Wallis : -https://en.wikipedia.org/wiki/Kruskal%E2%80%93Wallis_one-way_analysis_of_variance - -5) If the group difference test is positive and number of group is greater than - 2, test the group difference two by two. - -6) Generate the result for all metrics and bundles - -Formerly: scil_group_comparison.py - -positional arguments: - IN_JSON Input JSON file from tractometry nextflow pipeline or equivalent. - IN_PARTICIPANTS Input tsv participants file.See doc in https://scilpy.readthedocs.io/en/latest/documentation/construct_participants_tsv_file.html. - GROUP_BY Variable that will be used to compare group together. - -options: - -h, --help show this help message and exit - --out_dir OUT_DIR Name of the output folder path. [stats] - --out_json OUT_JSON The name of the result json output file otherwise it will be printed. - --bundles BUNDLES [BUNDLES ...], -b BUNDLES [BUNDLES ...] - Bundle(s) in which you want to do stats. [all] - --metrics METRICS [METRICS ...], -m METRICS [METRICS ...] - Metric(s) on which you want to do stats. [all] - --values VALUES [VALUES ...], --va VALUES [VALUES ...] - Value(s) on which you want to do stats (mean, std). [all] - --alpha_error ALPHA_ERROR, -a ALPHA_ERROR - Type 1 error for all the test. [0.05] - --generate_graph, --gg - Generate a simple plot of every metric across groups. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_surface_apply_transform.py.help b/scripts/.hidden/scil_surface_apply_transform.py.help deleted file mode 100644 index 3ce730c8c..000000000 --- a/scripts/.hidden/scil_surface_apply_transform.py.help +++ /dev/null @@ -1,38 +0,0 @@ -usage: scil_surface_apply_transform.py [-h] [--ants_warp ANTS_WARP] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_surface ants_affine out_surface - -Script to apply a transform to a surface (FreeSurfer or VTK supported), -using output from ANTs registration tools (i.e. affine.txt, warp.nii.gz). - -Example usage from T1 to b0 using ANTs transforms: -> ConvertTransformFile 3 output0GenericAffine.mat vtk_transfo.txt --hm -> scil_surface_apply_transform.py lh_white_lps.vtk affine.txt lh_white_b0.vtk\ - --ants_warp warp.nii.gz - -Important: The input surface needs to be in *T1 world LPS* coordinates -(aligned over the T1 in MI-Brain). - -The script will use the linear affine first and then the warp image. -The resulting surface will be in *b0 world LPS* coordinates -(aligned over the b0 in MI-Brain). - -Formerly: scil_apply_transform_to_surface.py. - -positional arguments: - in_surface Input surface (.vtk). - ants_affine Affine transform from ANTs (.txt or .mat). - out_surface Output surface (.vtk). - -options: - -h, --help show this help message and exit - --ants_warp ANTS_WARP - Warp image from ANTs (Nifti image). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -References: -[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018. - Surface-enhanced tractography (SET). NeuroImage. diff --git a/scripts/.hidden/scil_surface_convert.py.help b/scripts/.hidden/scil_surface_convert.py.help deleted file mode 100644 index ed5db7ceb..000000000 --- a/scripts/.hidden/scil_surface_convert.py.help +++ /dev/null @@ -1,32 +0,0 @@ -usage: scil_surface_convert.py [-h] [--xform XFORM] [--to_lps] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_surface out_surface - -Script to convert surface formats - -Supported formats: - ".vtk", ".vtp", ".ply", ".stl", ".xml", ".obj" - and FreeSurfer surfaces - -> scil_surface_convert.py surf.vtk converted_surf.ply - -Formerly: scil_convert_surface.py - -positional arguments: - in_surface Input a surface (FreeSurfer or supported by VTK). - out_surface Output surface (formats supported by VTK). - -options: - -h, --help show this help message and exit - --xform XFORM Path of the copy-paste output from mri_info - Using: mri_info $input >> log.txt, - The file log.txt would be this parameter - --to_lps Flip for Surface/MI-Brain LPS - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -References: -[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018. - Surface-enhanced tractography (SET). NeuroImage. diff --git a/scripts/.hidden/scil_surface_flip.py.help b/scripts/.hidden/scil_surface_flip.py.help deleted file mode 100644 index ffc1aba29..000000000 --- a/scripts/.hidden/scil_surface_flip.py.help +++ /dev/null @@ -1,25 +0,0 @@ -usage: scil_surface_flip.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] - in_surface out_surface {x,y,z,n} [{x,y,z,n} ...] - -Script to flip a given surface (FreeSurfer or VTK supported). - -Can flip surface coordinates around a single or multiple axes -Can also be used to reverse the orientation of the surface normals. - -Formerly: scil_flip_surface.py - -positional arguments: - in_surface Input surface (.vtk). - out_surface Output flipped surface (.vtk). - {x,y,z,n} The axes you want to flip. eg: to flip the x and y axes use: x y. to reverse the surface normals use: n - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -References: -[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018. - Surface-enhanced tractography (SET). NeuroImage. diff --git a/scripts/.hidden/scil_surface_smooth.py.help b/scripts/.hidden/scil_surface_smooth.py.help deleted file mode 100644 index e208926d0..000000000 --- a/scripts/.hidden/scil_surface_smooth.py.help +++ /dev/null @@ -1,36 +0,0 @@ -usage: scil_surface_smooth.py [-h] [-m VTS_MASK] [-n NB_STEPS] [-s STEP_SIZE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_surface out_surface - -Script to smooth a surface with a Laplacian blur. - -For a standard FreeSurfer white matter mesh a step_size from 0.1 to 10 -is recommended - -Smoothing time = step_size * nb_steps - small amount of smoothing [step_size 1, nb_steps 10] - moderate amount of smoothing [step_size 10, nb_steps 100] - large amount of smoothing [step_size 100, nb_steps 1000] - -Formerly: scil_smooth_surface.py - -positional arguments: - in_surface Input surface (.vtk). - out_surface Output smoothed surface (.vtk). - -options: - -h, --help show this help message and exit - -m VTS_MASK, --vts_mask VTS_MASK - Vertex mask no smoothing where mask equals 0 (.npy). - -n NB_STEPS, --nb_steps NB_STEPS - Number of steps for laplacian smooth [2]. - -s STEP_SIZE, --step_size STEP_SIZE - Laplacian smooth step size [5.0]. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -References: -[1] St-Onge, E., Daducci, A., Girard, G. and Descoteaux, M. 2018. - Surface-enhanced tractography (SET). NeuroImage. diff --git a/scripts/.hidden/scil_tracking_local.py.help b/scripts/.hidden/scil_tracking_local.py.help deleted file mode 100644 index 42177325b..000000000 --- a/scripts/.hidden/scil_tracking_local.py.help +++ /dev/null @@ -1,167 +0,0 @@ -usage: scil_tracking_local.py [-h] [--step STEP_SIZE] [--min_length m] - [--max_length M] [--theta THETA] - [--sfthres sf_th] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [--npv NPV | --nt NT] [--sh_to_pmf] - [--algo {det,prob,ptt,eudx}] - [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}] - [--sub_sphere SUB_SPHERE] - [--probe_length PROBE_LENGTH] - [--probe_radius PROBE_RADIUS] - [--probe_quality PROBE_QUALITY] - [--probe_count PROBE_COUNT] - [--support_exponent SUPPORT_EXPONENT] - [--use_gpu] [--sh_interp {trilinear,nearest}] - [--forward_only] [--batch_size BATCH_SIZE] - [--compress [COMPRESS_TH]] [-f] [--save_seeds] - [--seed SEED] [-v [{DEBUG,INFO,WARNING}]] - in_odf in_seed in_mask out_tractogram - -Local streamline HARDI tractography. -The tracking direction is chosen in the aperture cone defined by the -previous tracking direction and the angular constraint. - -WARNING: This script DOES NOT support asymetric FODF input (aFODF). - -Algo 'eudx': select the peak from the spherical function (SF) most closely -aligned to the previous direction, and follow an average of it and the previous -direction [1]. - -Algo 'det': select the orientation corresponding to the maximum of the -spherical function. - -Algo 'prob': select a direction drawn from the empirical distribution function -defined from the SF. - -Algo 'ptt': select the propagation direction using Parallel-Transport -Tractography (PTT) framework, see [2] for more details. - -NOTE: eudx can be used with pre-computed peaks from fodf as well as -evecs_v1.nii.gz from scil_dti_metrics.py (experimental). - -NOTE: If tracking with PTT, the step-size should be smaller than usual, -i.e 0.1-0.2mm or lower. The maximum angle between segments (theta) should -be between 10 and 20 degrees. - -The local tracking algorithm can also run on the GPU using the --use_gpu -option (experimental). By default, GPU tracking behaves the same as -DIPY. Below is a list of known divergences between the CPU and GPU -implementations: - * Backend: The CPU implementation uses DIPY's LocalTracking and the - GPU implementation uses an in-house OpenCL implementation. - * Algo: For the GPU implementation, the only available algorithm is - Algo 'prob'. - * SH interpolation: For GPU tracking, SH interpolation can be set to either - nearest neighbour or trilinear (default). With DIPY, the only available - method is trilinear. - * Forward tracking: For GPU tracking, the `--forward_only` flag can be used - to disable backward tracking. This option isn't available for CPU - tracking. - -All the input nifti files must be in isotropic resolution. - -References ----------- -[1]: Garyfallidis, E. (2012). Towards an accurate brain tractography -[PhD thesis]. University of Cambridge. United Kingdom. - -[2]: Aydogan, D. B., & Shi, Y. (2020). Parallel transport tractography. -IEEE transactions on medical imaging, 40(2), 635-647. - -Formerly: scil_compute_local_tracking.py - -positional arguments: - in_odf File containing the orientation diffusion function - as spherical harmonics file (.nii.gz). Ex: ODF or fODF. - in_seed Seeding mask (.nii.gz). - in_mask Tracking mask (.nii.gz). - Tracking will stop outside this mask. The last point of each - streamline (triggering the stopping criteria) IS added to the streamline. - out_tractogram Tractogram output file (must be .trk or .tck). - -options: - -h, --help show this help message and exit - -Tracking options: - --step STEP_SIZE Step size in mm. [0.5] - --min_length m Minimum length of a streamline in mm. [10.0] - --max_length M Maximum length of a streamline in mm. [300.0] - --theta THETA Maximum angle between 2 steps. If the angle is too big, streamline is - stopped and the following point is NOT included. - ["eudx"=60, "det"=45, "prob"=20, "ptt"=20] - --sfthres sf_th Spherical function relative threshold. [0.1] - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - --sh_to_pmf If set, map sherical harmonics to spherical function (pmf) before - tracking (faster, requires more memory) - --algo {det,prob,ptt,eudx} - Algorithm to use. [prob] - --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} - Dipy sphere; set of possible directions. - Default: [repulsion724] - --sub_sphere SUB_SPHERE - Subdivides each face of the sphere into 4^s new faces. [0] - -Seeding options: - When no option is provided, uses --npv 1. - - --npv NPV Number of seeds per voxel. - --nt NT Total number of seeds to use. - -PTT options: - --probe_length PROBE_LENGTH - The length of the probes. Smaller value - yields more dispersed fibers. [1.0] - --probe_radius PROBE_RADIUS - The radius of the probe. A large probe_radius - helps mitigate noise in the pmf but it might - make it harder to sample thin and intricate - connections, also the boundary of fiber - bundles might be eroded. [0] - --probe_quality PROBE_QUALITY - The quality of the probe. This parameter sets - the number of segments to split the cylinder - along the length of the probe (minimum=2) [3] - --probe_count PROBE_COUNT - The number of probes. This parameter sets the - number of parallel lines used to model the - cylinder (minimum=1). [1] - --support_exponent SUPPORT_EXPONENT - Data support exponent, used for rejection - sampling. [3] - -GPU options: - --use_gpu Enable GPU tracking (experimental). - --sh_interp {trilinear,nearest} - SH image interpolation method. [trilinear] - --forward_only Perform forward tracking only. - --batch_size BATCH_SIZE - Approximate size of GPU batches (number - of streamlines to track in parallel). [10000] - -Output options: - --compress [COMPRESS_TH] - If set, compress the resulting streamline. Value is the maximum - compression distance in mm. - A rule of thumb is to set it to 0.1mm for deterministic - streamlines and to 0.2mm for probabilitic streamlines.[0.1] - -f Force overwriting of the output files. - --save_seeds If set, save the seeds used for the tracking - in the data_per_streamline property. - Hint: you can then use scil_tractogram_seed_density_map. - --seed SEED Random number generator seed. - -Logging options: - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_tracking_local_dev.py.help b/scripts/.hidden/scil_tracking_local_dev.py.help deleted file mode 100644 index 53d69d330..000000000 --- a/scripts/.hidden/scil_tracking_local_dev.py.help +++ /dev/null @@ -1,158 +0,0 @@ -usage: scil_tracking_local_dev.py [-h] [--step STEP_SIZE] [--min_length m] - [--max_length M] [--theta THETA] - [--sfthres sf_th] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [--npv NPV | --nt NT] [--algo {det,prob}] - [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}] - [--sub_sphere SUB_SPHERE] - [--sfthres_init sf_th] [--rk_order K] - [--max_invalid_nb_points MAX] - [--forward_only] - [--sh_interp {nearest,trilinear}] - [--mask_interp {nearest,trilinear}] - [--keep_last_out_point] - [--n_repeats_per_seed N_REPEATS_PER_SEED] - [--rng_seed RNG_SEED] [--skip SKIP] - [--processes NBR] [--compress [COMPRESS_TH]] - [-f] [--save_seeds] - [-v [{DEBUG,INFO,WARNING}]] - in_odf in_seed in_mask out_tractogram - -Local streamline HARDI tractography using scilpy-only methods -- no dipy (i.e -no cython). The goal of this is to have a python-only version that can be -modified more easily by our team when testing new algorithms and parameters, -and that can be used as parent classes in sub-projects of our lab such as in -dwi_ml. - -WARNING. MUCH SLOWER THAN scil_tracking_local.py. We recommand using multi- -processing with option --nb_processes. - -Similar to scil_tracking_local: - The tracking direction is chosen in the aperture cone defined by the - previous tracking direction and the angular constraint. - - Algo 'det': the maxima of the spherical function (SF) the most closely - aligned to the previous direction. - - Algo 'prob': a direction drawn from the empirical distribution function - defined from the SF. - -Contrary to scil_tracking_local: - - Algo 'eudx' is not yet available! - - Input nifti files do not necessarily need to be in isotropic resolution. - - The script works with asymmetric input ODF. - - The interpolation for the tracking mask and spherical function can be - one of 'nearest' or 'trilinear'. - - Runge-Kutta integration is supported for the step function. - -A few notes on Runge-Kutta integration. - 1. Runge-Kutta integration is used to approximate the next tracking - direction by estimating directions from future tracking steps. This - works well for deterministic tracking. However, in the context of - probabilistic tracking, the next tracking directions cannot be estimated - in advance, because they are picked randomly from a distribution. It is - therefore recommanded to keep the rk_order to 1 for probabilistic - tracking. - 2. As a rule of thumb, doubling the rk_order will double the computation - time in the worst case. - -References: [1] Girard, G., Whittingstall K., Deriche, R., and - Descoteaux, M. (2014). Towards quantitative connectivity analysis: - reducing tractography biases. Neuroimage, 98, 266-278. - -Formerly: scil_compute_local_tracking_dev.py - -positional arguments: - in_odf File containing the orientation diffusion function - as spherical harmonics file (.nii.gz). Ex: ODF or fODF. - in_seed Seeding mask (.nii.gz). - in_mask Tracking mask (.nii.gz). - Tracking will stop outside this mask. The last point of each - streamline (triggering the stopping criteria) IS added to the streamline. - out_tractogram Tractogram output file (must be .trk or .tck). - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -Tracking options: - --step STEP_SIZE Step size in mm. [0.5] - --min_length m Minimum length of a streamline in mm. [10.0] - --max_length M Maximum length of a streamline in mm. [300.0] - --theta THETA Maximum angle between 2 steps. If the angle is too big, streamline is - stopped and the following point is NOT included. - ["eudx"=60, "det"=45, "prob"=20, "ptt"=20] - --sfthres sf_th Spherical function relative threshold. [0.1] - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - --algo {det,prob} Algorithm to use. [prob] - --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} - Dipy sphere; set of possible directions. - Default: [repulsion724] - --sub_sphere SUB_SPHERE - Subdivides each face of the sphere into 4^s new faces. [0] - --sfthres_init sf_th Spherical function relative threshold value for the - initial direction. [0.5] - --rk_order K The order of the Runge-Kutta integration used for the step function. - For more information, refer to the note in the script description. [1] - --max_invalid_nb_points MAX - Maximum number of steps without valid direction, - ex: if threshold on ODF or max angles are reached. - Default: 0, i.e. do not add points following an invalid direction. - --forward_only If set, tracks in one direction only (forward) given the - initial seed. The direction is randomly drawn from the ODF. - --sh_interp {nearest,trilinear} - Spherical harmonic interpolation: nearest-neighbor - or trilinear. [trilinear] - --mask_interp {nearest,trilinear} - Mask interpolation: nearest-neighbor or trilinear. [nearest] - --keep_last_out_point - If set, keep the last point (once out of the tracking mask) of - the streamline. Default: discard them. This is the default in - Dipy too. Note that points obtained after an invalid direction - (ex when angle is too sharp or sh_threshold not reached) are - never added. - --n_repeats_per_seed N_REPEATS_PER_SEED - By default, each seed position is used only once. This option - allows for tracking from the exact same seed n_repeats_per_seed - times. [1] - -Seeding options: - When no option is provided, uses --npv 1. - - --npv NPV Number of seeds per voxel. - --nt NT Total number of seeds to use. - -Random seeding options: - --rng_seed RNG_SEED Initial value for the random number generator. [0] - --skip SKIP Skip the first N random number. - Useful if you want to create new streamlines to add to - a previously created tractogram with a fixed --rng_seed. - Ex: If tractogram_1 was created with -nt 1,000,000, - you can create tractogram_2 with - --skip 1,000,000. - -Memory options: - --processes NBR Number of sub-processes to start. - Default: [1] - -Output options: - --compress [COMPRESS_TH] - If set, compress the resulting streamline. Value is the maximum - compression distance in mm. - A rule of thumb is to set it to 0.1mm for deterministic - streamlines and to 0.2mm for probabilitic streamlines.[0.1] - -f Force overwriting of the output files. - --save_seeds If set, save the seeds used for the tracking - in the data_per_streamline property. - Hint: you can then use scil_tractogram_seed_density_map. diff --git a/scripts/.hidden/scil_tracking_pft.py.help b/scripts/.hidden/scil_tracking_pft.py.help deleted file mode 100644 index 54db07bd3..000000000 --- a/scripts/.hidden/scil_tracking_pft.py.help +++ /dev/null @@ -1,107 +0,0 @@ -usage: scil_tracking_pft.py [-h] [--algo {det,prob}] [--step STEP_SIZE] - [--min_length MIN_LENGTH] - [--max_length MAX_LENGTH] [--theta THETA] [--act] - [--sfthres SF_THRESHOLD] - [--sfthres_init SF_THRESHOLD_INIT] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [--npv NPV | --nt NT] [--particles PARTICLES] - [--back BACK_TRACKING] - [--forward FORWARD_TRACKING] [--all] [--seed SEED] - [-f] [--save_seeds] [--compress [COMPRESS_TH]] - [-v [{DEBUG,INFO,WARNING}]] - in_sh in_seed in_map_include map_exclude_file - out_tractogram - -Local streamline HARDI tractography including Particle Filtering tracking. - -WARNING: This script DOES NOT support asymetric FODF input (aFODF). - -The tracking is done inside partial volume estimation maps and uses the -particle filtering tractography (PFT) algorithm. See -scil_tracking_pft_maps.py to generate PFT required maps. - -Streamlines longer than min_length and shorter than max_length are kept. -The tracking direction is chosen in the aperture cone defined by the -previous tracking direction and the angular constraint. -Default parameters as suggested in [1]. - -Algo 'det': the maxima of the spherical function (SF) the most closely aligned -to the previous direction. -Algo 'prob': a direction drawn from the empirical distribution function defined -from the SF. - -For streamline compression, a rule of thumb is to set it to 0.1mm for the -deterministic algorithm and 0.2mm for probabilitic algorithm. - -All the input nifti files must be in isotropic resolution. - -Formerly: scil_compute_pft.py - -positional arguments: - in_sh Spherical harmonic file (.nii.gz). - in_seed Seeding mask (.nii.gz). - in_map_include The probability map (.nii.gz) of ending the - streamline and including it in the output (CMC, PFT [1]) - map_exclude_file The probability map (.nii.gz) of ending the - streamline and excluding it in the output (CMC, PFT [1]). - out_tractogram Tractogram output file (must be .trk or .tck). - -Generic options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -Tracking options: - --algo {det,prob} Algorithm to use (must be "det" or "prob"). [prob] - --step STEP_SIZE Step size in mm. [0.2] - --min_length MIN_LENGTH - Minimum length of a streamline in mm. [10.0] - --max_length MAX_LENGTH - Maximum length of a streamline in mm. [300.0] - --theta THETA Maximum angle between 2 steps. ["det"=45, "prob"=20] - --act If set, uses anatomically-constrained tractography (ACT) - instead of continuous map criterion (CMC). - --sfthres SF_THRESHOLD - Spherical function relative threshold. [0.1] - --sfthres_init SF_THRESHOLD_INIT - Spherical function relative threshold value for the - initial direction. [0.5] - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - -Seeding options: - When no option is provided, uses --npv 1. - - --npv NPV Number of seeds per voxel. - --nt NT Total number of seeds to use. - -PFT options: - --particles PARTICLES - Number of particles to use for PFT. [15] - --back BACK_TRACKING Length of PFT back tracking (mm). [2.0] - --forward FORWARD_TRACKING - Length of PFT forward tracking (mm). [1.0] - -Output options: - --all If set, keeps "excluded" streamlines. - NOT RECOMMENDED, except for debugging. - --seed SEED Random number generator seed. - -f Force overwriting of the output files. - --save_seeds If set, save the seeds used for the tracking - in the data_per_streamline property. - --compress [COMPRESS_TH] - If set, compress the resulting streamline. Value is the maximum - compression distance in mm.[0.1] - -References: [1] Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M. (2014). Towards quantitative connectivity analysis: reducing tractography biases. Neuroimage, 98, 266-278. diff --git a/scripts/.hidden/scil_tracking_pft_maps.py.help b/scripts/.hidden/scil_tracking_pft_maps.py.help deleted file mode 100644 index a29968244..000000000 --- a/scripts/.hidden/scil_tracking_pft_maps.py.help +++ /dev/null @@ -1,31 +0,0 @@ -usage: scil_tracking_pft_maps.py [-h] [--include filename] - [--exclude filename] [--interface filename] - [-t THRESHOLD] [-f] - [-v [{DEBUG,INFO,WARNING}]] - in_wm in_gm in_csf - -Compute include and exclude maps, and the seeding interface mask from partial -volume estimation (PVE) maps. Maps should have values in [0,1], gm+wm+csf=1 in -all voxels of the brain, gm+wm+csf=0 elsewhere. - -References: Girard, G., Whittingstall K., Deriche, R., and Descoteaux, M. -(2014). Towards quantitative connectivity analysis: reducing tractography -biases. Neuroimage. - -Formerly: scil_compute_maps_for_particle_filter_tracking.py - -positional arguments: - in_wm White matter PVE map (nifti). From normal FAST output, has a PVE_2 name suffix. - in_gm Grey matter PVE map (nifti). From normal FAST output, has a PVE_1 name suffix. - in_csf Cerebrospinal fluid PVE map (nifti). From normal FAST output, has a PVE_0 name suffix. - -options: - -h, --help show this help message and exit - --include filename Output include map (nifti). [map_include.nii.gz] - --exclude filename Output exclude map (nifti). [map_exclude.nii.gz] - --interface filename Output interface seeding mask (nifti). [interface.nii.gz] - -t THRESHOLD Minimum gm and wm PVE values in a voxel to be into the interface. [0.1] - -f Force overwriting of the output files. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_tracking_pft_maps_edit.py.help b/scripts/.hidden/scil_tracking_pft_maps_edit.py.help deleted file mode 100644 index 49889877d..000000000 --- a/scripts/.hidden/scil_tracking_pft_maps_edit.py.help +++ /dev/null @@ -1,21 +0,0 @@ -usage: scil_tracking_pft_maps_edit.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] - map_include map_exclude additional_mask - map_include_corr map_exclude_corr - -Modify PFT maps to allow PFT tracking in given mask (e.g edema). - -Formerly: scil_add_tracking_mask_to_pft_maps.py. - -positional arguments: - map_include PFT map include. - map_exclude PFT map exclude. - additional_mask Allow PFT tracking in this mask. - map_include_corr Corrected PFT map include output file name. - map_exclude_corr Corrected PFT map exclude output file name. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_apply_transform.py.help b/scripts/.hidden/scil_tractogram_apply_transform.py.help deleted file mode 100644 index d74fb6374..000000000 --- a/scripts/.hidden/scil_tractogram_apply_transform.py.help +++ /dev/null @@ -1,78 +0,0 @@ -usage: scil_tractogram_apply_transform.py [-h] [--inverse] - [--in_deformation file] - [--reverse_operation] - [--cut_invalid | --remove_invalid | --keep_invalid] - [--no_empty] [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_moving_tractogram in_target_file - in_transfo out_tractogram - -Transform a tractogram using an affine/rigid transformation and nonlinear -deformation (optional). - -For more information on how to use the registration script, follow this link: -https://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html - -Applying transformation to a tractogram can lead to invalid streamlines (out of -the bounding box), and thus three strategies are available: -1) Do nothing, may crash at saving if invalid streamlines are present. - [This is the default] -2) --keep_invalid, save invalid streamlines. Leave it to the user to run - scil_tractogram_remove_invalid.py if needed. -3) --remove_invalid, automatically remove invalid streamlines before saving. - Should not remove more than a few streamlines. Typically, the streamlines - that are rejected are the ones reaching the limits of the brain, ex, near - the pons. -4) --cut_invalid, automatically cut invalid streamlines before saving, i.e. the - streamlines are kept but the points out of the bounding box are cut. - -Example: -To apply a transformation from ANTs to a tractogram, if the ANTs command was -MOVING->REFERENCE... -1) To apply the original transformation: -scil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE} - 0GenericAffine.mat ${OUTPUT_NAME} - --inverse - --in_deformation 1InverseWarp.nii.gz - -2) To apply the inverse transformation, i.e. REFERENCE->MOVING: -scil_tractogram_apply_transform.py ${MOVING_FILE} ${REFERENCE_FILE} - 0GenericAffine.mat ${OUTPUT_NAME} - --in_deformation 1Warp.nii.gz - --reverse_operation - -Formerly: scil_apply_transform_to_tractogram.py - -positional arguments: - in_moving_tractogram Path of the tractogram to be transformed. - Bounding box validity will not be checked (could - contain invalid streamlines). - in_target_file Path of the reference target file (trk or nii). - in_transfo Path of the file containing the 4x4 - transformation, matrix (.txt, .npy or .mat). - out_tractogram Output tractogram filename (transformed data). - -options: - -h, --help show this help message and exit - --no_empty Do not write file if there is no streamline. - You may save an empty file if you use remove_invalid. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Transformation options: - --inverse Apply the inverse linear transformation. - --in_deformation file - Path to the file containing a deformation field. - --reverse_operation Apply the transformation in reverse (see doc), warp - first, then linear. - -Management of invalid streamlines: - --cut_invalid Cut invalid streamlines rather than removing them. - Keep the longest segment only. - --remove_invalid Remove the streamlines landing out of the bounding box. - --keep_invalid Keep the streamlines landing out of the bounding box. diff --git a/scripts/.hidden/scil_tractogram_apply_transform_to_hdf5.py.help b/scripts/.hidden/scil_tractogram_apply_transform_to_hdf5.py.help deleted file mode 100644 index b60da727a..000000000 --- a/scripts/.hidden/scil_tractogram_apply_transform_to_hdf5.py.help +++ /dev/null @@ -1,52 +0,0 @@ -usage: scil_tractogram_apply_transform_to_hdf5.py [-h] [--inverse] - [--in_deformation file] - [--reverse_operation] - [--cut_invalid | --remove_invalid | --keep_invalid] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] - [-f] - in_hdf5 in_target_file - in_transfo out_hdf5 - -Transform tractogram(s) contained in the hdf5 output from a connectivity -script, using an affine/rigid transformation and nonlinear deformation -(optional). - -See scil_tractogram_apply_transform.py to apply directly to a tractogram. - -For more information on how to use the registration script, follow this link: -https://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html - -Or use >> scil_tractogram_apply_transform.py --help - -Formerly: scil_apply_transform_to_hdf5.py - -positional arguments: - in_hdf5 Path of the hdf5 containing the moving tractogram, to be transformed. (.h5 extension). - in_target_file Path of the reference target file (.trk or .nii). - in_transfo Path of the file containing the 4x4 - transformation, matrix (.txt, .npy or .mat). - out_hdf5 Output tractogram filename (transformed data). - -options: - -h, --help show this help message and exit - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Transformation options: - --inverse Apply the inverse linear transformation. - --in_deformation file - Path to the file containing a deformation field. - --reverse_operation Apply the transformation in reverse (see doc), warp - first, then linear. - -Management of invalid streamlines: - --cut_invalid Cut invalid streamlines rather than removing them. - Keep the longest segment only. - --remove_invalid Remove the streamlines landing out of the bounding box. - --keep_invalid Keep the streamlines landing out of the bounding box. diff --git a/scripts/.hidden/scil_tractogram_assign_custom_color.py.help b/scripts/.hidden/scil_tractogram_assign_custom_color.py.help deleted file mode 100644 index e69de29bb..000000000 diff --git a/scripts/.hidden/scil_tractogram_assign_uniform_color.py.help b/scripts/.hidden/scil_tractogram_assign_uniform_color.py.help deleted file mode 100644 index ece21001f..000000000 --- a/scripts/.hidden/scil_tractogram_assign_uniform_color.py.help +++ /dev/null @@ -1,50 +0,0 @@ -usage: scil_tractogram_assign_uniform_color.py [-h] - (--fill_color str | --dict_colors file.json) - (--out_suffix [suffix] | --out_tractogram file.trk) - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] - [-f] - in_tractograms - [in_tractograms ...] - -Assign an hexadecimal RGB color to one or more Trackvis (.trk) tractogram. -(If called with .tck, the output will always be .trk, because data_per_point -has no equivalent in tck file.) - -Saves the RGB values in the data_per_point 'color' with values -(color_x, color_y, color_z). - -The hexadecimal RGB color should be formatted as 0xRRGGBB or "#RRGGBB". - -See also: scil_tractogram_assign_custom_color.py - -Formerly: scil_assign_uniform_color_to_tractograms.py - -positional arguments: - in_tractograms Input tractograms (.trk or .tck). - -options: - -h, --help show this help message and exit - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Coloring Methods: - --fill_color str Can be hexadecimal (ie. either "#RRGGBB" or 0xRRGGBB). - --dict_colors file.json - Json file: dictionnary mapping each tractogram's basename to a color. - Do not put your file's extension in your dict. - Same convention as --fill_color. - -Output options: - --out_suffix [suffix] - Specify suffix to append to input basename. - Mandatory choice if you run this script on multiple tractograms. - Mandatory choice with --dict_colors. - [None] - --out_tractogram file.trk - Output filename of colored tractogram (.trk). diff --git a/scripts/.hidden/scil_tractogram_commit.py.help b/scripts/.hidden/scil_tractogram_commit.py.help deleted file mode 100644 index dae781282..000000000 --- a/scripts/.hidden/scil_tractogram_commit.py.help +++ /dev/null @@ -1,160 +0,0 @@ -usage: scil_tractogram_commit.py [-h] [--nbr_dir NBR_DIR] - [--nbr_iter NBR_ITER] [--in_peaks IN_PEAKS] - [--in_tracking_mask IN_TRACKING_MASK] - [--commit2] - [--lambda_commit_2 LAMBDA_COMMIT_2] - [--ball_stick] [--para_diff PARA_DIFF] - [--perp_diff PERP_DIFF [PERP_DIFF ...]] - [--iso_diff ISO_DIFF [ISO_DIFF ...]] - [--keep_whole_tractogram] - [--save_kernels DIRECTORY | --load_kernels DIRECTORY] - [--compute_only] [--tolerance tol] - [--skip_b0_check] [--processes NBR] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram in_dwi in_bval in_bvec out_dir - -Convex Optimization Modeling for Microstructure Informed Tractography (COMMIT) -estimates, globally, how a given tractogram explains the DWI in terms of signal -fit, assuming a certain forward microstructure model. It assigns a weight to -each streamline, which represents how well it explains the DWI signal globally. -The default forward microstructure model is stick-zeppelin-ball, which requires -multi-shell data and a peak file (principal fiber directions in each voxel, -typically from a field of fODFs). - -It is possible to use the ball-and-stick model for single-shell and multi-shell -data. In this case, the peak file is not mandatory. Multi-shell should follow a -"NODDI protocol" (low and high b-values), multiple shells with similar b-values -should not be used with COMMIT. - -The output from COMMIT is: -- fit_NRMSE.nii.gz - fiting error (Normalized Root Mean Square Error) -- fit_RMSE.nii.gz - fiting error (Root Mean Square Error) -- results.pickle - Dictionary containing the experiment parameters and final weights -- compartment_EC.nii.gz - (est. Extra-Cellular signal fraction) -- compartment_IC.nii.gz - (est. Intra-Cellular signal fraction) -- compartment_ISO.nii.gz - (est. isotropic signal fraction (freewater comportment)): - Each of COMMIT compartments -- streamline_weights.txt - Text file containing the commit weights for each streamline of the - input tractogram. -- streamlines_length.txt - Text file containing the length (mm) of each streamline. -- streamline_weights_by_length.txt - Text file containing the commit weights for each streamline of the - input tractogram, ordered by their length. -- tot_streamline_weights - Text file containing the total commit weights of each streamline. - Equal to commit_weights * streamlines_length (W_i * L_i) -- essential.trk / non_essential.trk - Tractograms containing the streamlines below or equal (essential) and - above (non_essential) a threshold_weights of 0. -- decompose_commit.h5 - In the case where the input is a hdf5 file only, we will save an output - hdf5 with the following information separated into each bundle's dps: - - streamlines_weights - - streamline_weights_by_length - For each bundle, only the essential streamlines are kept. - -This script can divide the input tractogram in two using a threshold to apply -on the streamlines' weight. The threshold used is 0.0, keeping only streamlines -that have non-zero weight and that contribute to explain the DWI signal. -Streamlines with 0 weight are essentially not necessary according to COMMIT. - -COMMIT2 is available only for HDF5 data from -scil_tractogram_segment_bundles_for_connectivity.py and -with the --ball_stick option. Use the --commit2 option to activite it, slightly -longer computation time. This wrapper offers a simplify way to call COMMIT, -but does not allow to use (or fine-tune) every parameter. If you want to use -COMMIT with full access to all parameters, -visit: https://github.com/daducci/COMMIT - -When tunning parameters, such as --iso_diff, --para_diff, --perp_diff or ---lambda_commit_2 you should evaluate the quality of results by: - - Looking at the 'density' (GTM) of the connnectome (essential tractogram) - - Confirm the quality of WM bundles reconstruction (essential tractogram) - - Inspect the (N)RMSE map and look for peaks or anomalies - - Compare the density map before and after (essential tractogram) - -Formerly: scil_run_commit.py - -positional arguments: - in_tractogram Input tractogram (.trk or .tck or .h5). - in_dwi Diffusion-weighted image used by COMMIT (.nii.gz). - in_bval b-values in the FSL format (.bval). - in_bvec b-vectors in the FSL format (.bvec). - out_dir Output directory for the COMMIT maps. - -options: - -h, --help show this help message and exit - --nbr_dir NBR_DIR Number of directions, on the half of the sphere, - representing the possible orientations of the response functions [500]. - --nbr_iter NBR_ITER Maximum number of iterations [1000]. - --in_peaks IN_PEAKS Peaks file representing principal direction(s) locally, - typically coming from fODFs. This file is mandatory for the default - stick-zeppelin-ball model. - --in_tracking_mask IN_TRACKING_MASK - Binary mask where tratography was allowed. - If not set, uses a binary mask computed from the streamlines. - --tolerance tol The tolerated gap between the b-values to extract and the current b-value. - [Default: 20] - * Note. We would expect to find at least one b-value in the - range [0, tolerance]. To skip this check, use --skip_b0_check. - --skip_b0_check By default, we supervise that at least one b0 exists in your data - (i.e. b-values below the default --b0_threshold). Use this option to - allow continuing even if the minimum b-value is suspiciously high. - If no b-value is found below the threshold, the script will continue - with your minimal b-value as new --b0_threshold. - Use with care, and only if you understand your data. - --processes NBR Number of sub-processes to start. - Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -COMMIT2 options: - --commit2 Run commit2, requires .h5 as input and will force - ball&stick model. - --lambda_commit_2 LAMBDA_COMMIT_2 - Specify the clustering prior strength [0.001]. - -Model options: - --ball_stick Use the ball&Stick model, disable the zeppelin compartment. - Only model suitable for single-shell data. - --para_diff PARA_DIFF - Parallel diffusivity in mm^2/s. - Default for both ball_stick and stick_zeppelin_ball: 1.7E-3. - --perp_diff PERP_DIFF [PERP_DIFF ...] - Perpendicular diffusivity in mm^2/s. - Default for ball_stick: None - Default for stick_zeppelin_ball: [0.51E-3] - --iso_diff ISO_DIFF [ISO_DIFF ...] - Istropic diffusivity in mm^2/s. - Default for ball_stick: [2.0E-3] - Default for stick_zeppelin_ball: [1.7E-3, 3.0E-3] - -Tractogram options: - --keep_whole_tractogram - Save a tractogram copy with streamlines weights in the data_per_streamline - [False]. - --compute_only Compute kernels only, --save_kernels must be used. - -Kernels options: - --save_kernels DIRECTORY - Output directory for the COMMIT kernels. - --load_kernels DIRECTORY - Input directory where the COMMIT kernels are located. - -References: -[1] Daducci, Alessandro, et al. "COMMIT: convex optimization modeling for - microstructure informed tractography." IEEE transactions on medical - imaging 34.1 (2014): 246-257. -[2] Schiavi, Simona, et al. "A new method for accurate in vivo mapping of - human brain connections using microstructural and anatomical information." - Science advances 6.31 (2020): eaba8245. diff --git a/scripts/.hidden/scil_tractogram_compress.py.help b/scripts/.hidden/scil_tractogram_compress.py.help deleted file mode 100644 index 03f751591..000000000 --- a/scripts/.hidden/scil_tractogram_compress.py.help +++ /dev/null @@ -1,22 +0,0 @@ -usage: scil_tractogram_compress.py [-h] [-e ERROR_RATE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram out_tractogram - -Compress tractogram by removing collinear (or almost) points. - -The compression threshold represents the maximum distance (in mm) to the -original position of the point. - -Formerly: scil_compress_streamlines.py - -positional arguments: - in_tractogram Path of the input tractogram file (trk or tck). - out_tractogram Path of the output tractogram file (trk or tck). - -options: - -h, --help show this help message and exit - -e ERROR_RATE Maximum compression distance in mm [0.1]. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_compute_TODI.py.help b/scripts/.hidden/scil_tractogram_compute_TODI.py.help deleted file mode 100644 index 9f203a5f2..000000000 --- a/scripts/.hidden/scil_tractogram_compute_TODI.py.help +++ /dev/null @@ -1,74 +0,0 @@ -usage: scil_tractogram_compute_TODI.py [-h] [--sphere SPHERE] [--mask MASK] - [--sh_order SH_ORDER] - [--normalize_per_voxel] - [--smooth_todi | --asymmetric] - [--n_steps N_STEPS] - [--out_mask OUT_MASK] - [--out_tdi OUT_TDI] - [--out_todi_sf OUT_TODI_SF] - [--out_todi_sh OUT_TODI_SH] - [--reference REFERENCE] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram - -Compute a Track Orientation Density Image (TODI). - -Each segment of the streamlines is weighted by its length (to support -compressed streamlines). - -This script can afterwards output a Track Density Image (TDI) or a TODI with SF -or SH representation, based on streamlines' segments. - -Formerly: scil_compute_todi.py - -positional arguments: - in_tractogram Input streamlines file. - -options: - -h, --help show this help message and exit - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Computing options: - --sphere SPHERE Sphere used for the angular discretization. [repulsion724] - --mask MASK If set, use the given mask. - --sh_order SH_ORDER Order of the original SH. [8] - --normalize_per_voxel - If set, normalize each SF/SH at each voxel. - --smooth_todi If set, smooth TODI (angular and spatial). - --asymmetric If set, compute asymmetric TODI. - Cannot be used with --smooth_todi. - --n_steps N_STEPS Number of steps for streamline segments subdivision prior to binning [1]. - -Output files. Saves only when filename is set: - --out_mask OUT_MASK Mask showing where TDI > 0. - --out_tdi OUT_TDI Output Track Density Image (TDI). - --out_todi_sf OUT_TODI_SF - Output TODI, with SF (each directions - on the sphere, requires a lot of memory) - --out_todi_sh OUT_TODI_SH - Output TODI, with SH coefficients. - -References: - [1] Dhollander T, Emsell L, Van Hecke W, Maes F, Sunaert S, Suetens P. - Track orientation density imaging (TODI) and - track orientation distribution (TOD) based tractography. - NeuroImage. 2014 Jul 1;94:312-36. diff --git a/scripts/.hidden/scil_tractogram_compute_density_map.py.help b/scripts/.hidden/scil_tractogram_compute_density_map.py.help deleted file mode 100644 index f78ac000e..000000000 --- a/scripts/.hidden/scil_tractogram_compute_density_map.py.help +++ /dev/null @@ -1,28 +0,0 @@ -usage: scil_tractogram_compute_density_map.py [-h] [--binary [FIXED_VALUE]] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bundle out_img - -Compute a density map from a streamlines file. Can be binary. - -This script correctly handles compressed streamlines. - -Formerly: scil_compute_streamlines_density_map.py - -positional arguments: - in_bundle Tractogram filename. - out_img path of the output image file. - -options: - -h, --help show this help message and exit - --binary [FIXED_VALUE] - If set, will store the same value for all intersected voxels, - creating a binary map.When set without a value, 1 is used (and dtype - uint8). If a value is given, will be used as the stored value. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_convert.py.help b/scripts/.hidden/scil_tractogram_convert.py.help deleted file mode 100644 index bbb6b6074..000000000 --- a/scripts/.hidden/scil_tractogram_convert.py.help +++ /dev/null @@ -1,28 +0,0 @@ -usage: scil_tractogram_convert.py [-h] [--no_bbox_check] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram output_name - -Conversion of '.tck', '.trk', '.fib', '.vtk' and 'dpy' files using updated file -format standard. TRK file always needs a reference file, a NIFTI, for -conversion. The FIB file format is in fact a VTK, MITK Diffusion supports it. - -Formerly: scil_convert_tractogram.py - -positional arguments: - in_tractogram Tractogram filename. Format must be one of - trk, tck, vtk, fib, dpy - output_name Output filename. Format must be one of - trk, tck, vtk, fib, dpy - -options: - -h, --help show this help message and exit - --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of - tractograms (ignores the presence of invalid streamlines). - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_convert_hdf5_to_trk.py.help b/scripts/.hidden/scil_tractogram_convert_hdf5_to_trk.py.help deleted file mode 100644 index 8046c19e4..000000000 --- a/scripts/.hidden/scil_tractogram_convert_hdf5_to_trk.py.help +++ /dev/null @@ -1,50 +0,0 @@ -usage: scil_tractogram_convert_hdf5_to_trk.py [-h] [--include_dps] - [--edge_keys LABEL1_LABEL2 [LABEL1_LABEL2 ...] - | --node_keys NODE [NODE ...]] - [--save_empty labels_list] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_hdf5 out_dir - -Save connections of a hdf5 created with ->> scil_tractogram_segment_bundles_for_connectivity.py. - -Useful for quality control and visual inspections. - -It can either save all connections (default), individual connections specified -with --edge_keys or connections from specific nodes specified with --node_keys. - -With the option --save_empty, a label_lists, as a txt file, must be provided. -This option saves existing connections and empty connections. - -The output is a directory containing the thousands of connections: -out_dir/ - |-- LABEL1_LABEL1.trk - |-- LABEL1_LABEL2.trk - |-- [...] - |-- LABEL90_LABEL90.trk - -Formerly: scil_save_connections_from_hdf5.py - -positional arguments: - in_hdf5 HDF5 filename (.h5) containing decomposed connections. - out_dir Path of the output directory. - -options: - -h, --help show this help message and exit - --include_dps Include the data_per_streamline the metadata. - --edge_keys LABEL1_LABEL2 [LABEL1_LABEL2 ...] - Keys to identify the edges (connections) of interest. - --node_keys NODE [NODE ...] - Node keys to identify the sub-networks of interest. - Equivalent to adding any --edge_keys node_LABEL2 or LABEL2_node. - --save_empty labels_list - Save empty connections. Then, the list of possible connections is - not found from the hdf5 but inferred from labels_list, a txt file - containing a list of nodes saved by the decomposition script. - *If used together with edge_keys or node_keys, the provided nodes must - exist in labels_list. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - CAREFUL. The whole output directory will be deleted if it exists. diff --git a/scripts/.hidden/scil_tractogram_count_streamlines.py.help b/scripts/.hidden/scil_tractogram_count_streamlines.py.help deleted file mode 100644 index 72ab12609..000000000 --- a/scripts/.hidden/scil_tractogram_count_streamlines.py.help +++ /dev/null @@ -1,24 +0,0 @@ -usage: scil_tractogram_count_streamlines.py [-h] [--print_count_alone] - [--indent INDENT] [--sort_keys] - [-v [{DEBUG,INFO,WARNING}]] - in_tractogram - -Return the number of streamlines in a tractogram. Only support trk and tck in -order to support the lazy loading from nibabel. - -Formerly: scil_count_streamlines.py - -positional arguments: - in_tractogram Path of the input tractogram file. - -options: - -h, --help show this help message and exit - --print_count_alone If true, prints the result only. - Else, prints the bundle name and count formatted as a json dict.(default) - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_cut_streamlines.py.help b/scripts/.hidden/scil_tractogram_cut_streamlines.py.help deleted file mode 100644 index 24395fc68..000000000 --- a/scripts/.hidden/scil_tractogram_cut_streamlines.py.help +++ /dev/null @@ -1,60 +0,0 @@ -usage: scil_tractogram_cut_streamlines.py [-h] (--mask MASK | --label LABEL) - [--label_ids LABEL_IDS LABEL_IDS] - [--resample STEP_SIZE] - [--biggest_blob] - [--compress [COMPRESS_TH]] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram out_tractogram - -Filters streamlines and only keeps the parts of streamlines within or -between the ROIs. Two options are available. - -Input mask: - -The mask has either 1 entity/blob or -2 entities/blobs (does not support disconnected voxels). -The option --biggest_blob can help if you have such a scenario. - -The 1 entity scenario will 'trim' the streamlines so their longest segment is -within the bounding box or a binary mask. - -The 2 entities scenario will cut streamlines so their segment are within the -bounding box or going from binary mask #1 to binary mask #2. - -Input label: - -The label MUST contain 2 labels different from zero. -Label values could be anything. -The script will cut streamlines going from label 1 to label 2. - -Both inputs and scenarios will erase data_per_point and data_per_streamline. - -Formerly: scil_cut_streamlines.py - -positional arguments: - in_tractogram Input tractogram file. - out_tractogram Output tractogram file. Note: data_per_point will be discarded, if any! - -options: - -h, --help show this help message and exit - --label_ids LABEL_IDS LABEL_IDS - List of labels indices to use to cut streamlines (2 values). - --resample STEP_SIZE Resample streamlines to a specific step-size in mm [None]. - --biggest_blob Use the biggest entity and force the 1 ROI scenario. - --compress [COMPRESS_TH] - If set, compress the resulting streamline. Value is the maximum - compression distance in mm.[0.1] - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Mandatory mask options: - Choose between mask or label input. - - --mask MASK Binary mask containing either 1 or 2 blobs. - --label LABEL Label containing 2 blobs. diff --git a/scripts/.hidden/scil_tractogram_detect_loops.py.help b/scripts/.hidden/scil_tractogram_detect_loops.py.help deleted file mode 100644 index 6559c83b8..000000000 --- a/scripts/.hidden/scil_tractogram_detect_loops.py.help +++ /dev/null @@ -1,57 +0,0 @@ -usage: scil_tractogram_detect_loops.py [-h] - [--looping_tractogram out_filename] - [--qb [threshold]] [--angle ANGLE] - [--display_counts] [--no_empty] - [--indent INDENT] [--sort_keys] - [--processes NBR] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram out_tractogram - -This script can be used to remove loops in two types of streamline datasets: - - - Whole brain: For this type, the script removes streamlines if they - make a loop with an angle of more than 360 degrees. It's possible to change - this angle with the --angle option. Warning: Don't use --qb option for a - whole brain tractography. - - - Bundle dataset: For this type, it is possible to remove loops and - streamlines outside the bundle. For the sharp angle turn, use --qb option. - -See also: - scil_tractogram_filter_by_anatomy.py - -Formerly: scil_detect_streamlines_loops.py - -positional arguments: - in_tractogram Tractogram input file name. - out_tractogram Output tractogram without loops. - -options: - -h, --help show this help message and exit - --looping_tractogram out_filename - If set, saves detected looping streamlines. - --qb [threshold] If set, uses QuickBundles to detect outliers (loops, sharp angle - turns). Given threshold is the maximal streamline to bundle - distance for a streamline to be considered as a tracking error. - Default if set: [8.0] - --angle ANGLE Maximum looping (or turning) angle of - a streamline in degrees. [360] - --display_counts Print streamline count before and after filtering - --no_empty If set, will not save outputs if they are empty. - --processes NBR Number of sub-processes to start. - Default: [1] - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. - -References: - QuickBundles, based on [Garyfallidis12] Frontiers in Neuroscience, 2012. diff --git a/scripts/.hidden/scil_tractogram_dpp_math.py.help b/scripts/.hidden/scil_tractogram_dpp_math.py.help deleted file mode 100644 index 59c47e0c6..000000000 --- a/scripts/.hidden/scil_tractogram_dpp_math.py.help +++ /dev/null @@ -1,76 +0,0 @@ -usage: scil_tractogram_dpp_math.py [-h] --mode {dpp,dps} --in_dpp_name key - [key ...] --out_keys key [key ...] - [--endpoints_only] [--keep_all_dpp_dps] - [--overwrite_dpp_dps] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - [--no_bbox_check] - OPERATION INPUT_FILE OUTPUT_FILE - -Performs an operation on data per point (dpp) from input streamlines. - -Although the input data always comes from the dpp, the output can be either -a dpp or a data_per_streamline (dps), depending on the chosen options. -Two modes of operation are supported: dpp and dps. - - In dps mode, the operation is performed on dpp across the dimension of - the streamlines resulting in a single value (or array in the 4D case) - per streamline, stored as dps. - - In dpp mode, the operation is performed on each point separately, - resulting in a new dpp. - -If endpoints_only and dpp mode is set the operation will only be calculated at -the streamline endpoints the rest of the values along the streamline will be -NaN. - -If endpoints_only and dps mode is set operation will be calculated across the -data at the endpoints and stored as a single value (or array in the 4D case) -per streamline. - -Endpoint only operation: -correlation: correlation calculated between arrays extracted from streamline -endpoints (data must be multivalued per point) and dps mode must be set. - -positional arguments: - OPERATION The type of operation to be performed on the - streamlines. Must be one of the following: - [mean, sum, min, max, correlation.] - INPUT_FILE Input tractogram containing streamlines and metadata. - OUTPUT_FILE The file where the remaining streamlines - are saved. - -options: - -h, --help show this help message and exit - --mode {dpp,dps} Set to dps if the operation is to be performed - across all dimensions resulting in a single value per - streamline. Set to dpp if the operation is to be - performed on each point separately resulting in a - single value per point. - --in_dpp_name key [key ...] - Name or list of names of the data_per_point for - operation to be performed on. If more than one dpp - is selected, the same operation will be applied - separately to each one. - --out_keys key [key ...] - Name of the resulting data_per_point or - data_per_streamline to be saved in the output - tractogram. If more than one --in_dpp_name was used, - enter the same number of --out_keys values. - --endpoints_only If set, will only perform operation on endpoints - If not set, will perform operation on all streamline - points. - --keep_all_dpp_dps If set, previous data_per_point will be preserved - in the output tractogram. Else, only --out_dpp_name - keys will be saved. - --overwrite_dpp_dps If set, if --keep_all_dpp_dps is set and some - --out_keys keys already existed in your - data_per_point or data_per_streamline, allow - overwriting old data_per_point. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of - tractograms (ignores the presence of invalid streamlines). diff --git a/scripts/.hidden/scil_tractogram_extract_ushape.py.help b/scripts/.hidden/scil_tractogram_extract_ushape.py.help deleted file mode 100644 index 6050a0e92..000000000 --- a/scripts/.hidden/scil_tractogram_extract_ushape.py.help +++ /dev/null @@ -1,41 +0,0 @@ -usage: scil_tractogram_extract_ushape.py [-h] [--minU MINU] [--maxU MAXU] - [--remaining_tractogram filename] - [--no_empty] [--display_counts] - [--indent INDENT] [--sort_keys] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram out_tractogram - -This script extracts streamlines depending on their U-shapeness. -This script is a replica of Trackvis method. - -When ufactor is close to: -* 0 it defines straight streamlines -* 1 it defines U-fibers -* -1 it defines S-fibers - -Formerly: scil_extract_ushape.py - -positional arguments: - in_tractogram Tractogram input file name. - out_tractogram Output tractogram file name. - -options: - -h, --help show this help message and exit - --minU MINU Min ufactor value. [0.5] - --maxU MAXU Max ufactor value. [1.0] - --remaining_tractogram filename - If set, saves remaining streamlines. - --no_empty Do not write file if there is no streamline. - --display_counts Print streamline count before and after filtering. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_filter_by_anatomy.py.help b/scripts/.hidden/scil_tractogram_filter_by_anatomy.py.help deleted file mode 100644 index b711f8591..000000000 --- a/scripts/.hidden/scil_tractogram_filter_by_anatomy.py.help +++ /dev/null @@ -1,111 +0,0 @@ -usage: scil_tractogram_filter_by_anatomy.py [-h] [--minL MINL] [--maxL MAXL] - [--angle ANGLE] - [--csf_bin CSF_BIN] - [--dilate_ctx value] - [--save_intermediate_tractograms] - [--save_volumes] [--save_counts] - [--save_rejected] [--no_empty] - [--indent INDENT] [--sort_keys] - [--processes NBR] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram in_wmparc out_path - -This script filters streamlines in a tractogram according to their geometrical -properties (i.e. limiting their length and looping angle) and their anatomical -ending properties (i.e. the anatomical tissue or region their endpoints lie -in). - -See also: - - scil_tractogram_detect_loops.py - - scil_tractogram_filter_by_length.py - - scil_tractogram_filter_by_orientation.py - - scil_tractogram_filter_by_roi.py - -The filtering is performed sequentially in four steps, each step processing the -data on the output of the previous step: - - Step 1 - Remove streamlines below the minimum length and above the - maximum length. These thresholds must be set with the ``--minL`` - and ``--maxL`` options. - Step 2 - Ensure that no streamlines end in the cerebrospinal fluid - according to the provided parcellation. A binary mask can be used - alternatively through the ``--csf_bin`` option. - Step 3 - Ensure that no streamlines end in white matter by ensuring that - they reach the cortical regions according to the provided - parcellation. The cortical regions of the parcellation can be - dilated using the ``--ctx_dilation_radius``. - Step 4 - Remove streamlines if they make a loop with an angle above a - certain threshold. It's possible to change this angle with the - ``-a`` option. - -Length and loop-based filtering (steps 1 and 2) will not have practical effects -if no specific thresholds are provided (but will be still executed), since -default values are 0 for the minimum allowed length and infinite for the -maximum allowed length and angle. - -The anatomical region endings filtering requires a parcellation or label image -file including the cerebrospinal fluid and gray matter (cortical) regions -according to the Desikan-Killiany atlas. Intermediate tractograms (results of -each step and outliers) and volumes can be saved throughout the process. - -Example usages: - -# Filter length, looping angle and anatomical ending region ->>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz - path/to/output/directory --minL 20 --maxL 200 -a 300 -# Filter only anatomical ending region, with WM dilation and provided csf mask ->>> scil_tractogram_filter_by_anatomy.py tractogram.trk wmparc.nii.gz - path/to/output/directory --csf_bin csf_bin.nii.gz --ctx_dilation_radius 2 - -Formerly: scil_filter_streamlines_anatomically.py - -NOTE: As of version 2.0.0, the dilation of the cortical mask has changed; from -an in-house process to scipy's dilation. Results may differ from previous -versions. - -positional arguments: - in_tractogram Path of the input tractogram file. - in_wmparc Path of the white matter parcellation atlas (.nii or .nii.gz) - out_path Path to the output files. - -options: - -h, --help show this help message and exit - --minL MINL Minimum length of streamlines, in mm. [0.0] - --maxL MAXL Maximum length of streamlines, in mm. [inf] - --angle ANGLE Maximum looping (or turning) angle of a streamline, - in degrees. [inf] - --csf_bin CSF_BIN Allow CSF endings filtering with this binary - mask instead of using the atlas (.nii or .nii.gz) - --dilate_ctx value If set, dilate the cortical labels. Value is the dilation - radius, in voxels (an integer > 0) - --save_intermediate_tractograms - Save accepted and discarded streamlines after each step. - --save_volumes Save volumetric images (e.g. binarised label - images, etc) in the filtering process. - --save_counts Save the streamline counts to a file (.json) - --save_rejected Save rejected streamlines to output tractogram. - --no_empty Do not write file if there is no streamlines. - --processes NBR Number of sub-processes to start. - Default: [1] - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. - - References: - [1] Jörgens, D., Descoteaux, M., Moreno, R., 2021. Challenges for - tractogram filtering. In: Özarslan, E., Schultz, T., Zhang, E., Fuster, - A. (Eds.), Anisotropy Across Fields and Scales. Springer. Mathematics - and Visualization. - [2] Legarreta, J., Petit, L., Rheault, F., Theaud, G., Lemaire, C., - Descoteaux, M., Jodoin, P.M. Filtering in tractography using - autoencoders (FINTA). Medical Image Analysis. 2021 - diff --git a/scripts/.hidden/scil_tractogram_filter_by_length.py.help b/scripts/.hidden/scil_tractogram_filter_by_length.py.help deleted file mode 100644 index e1f33549e..000000000 --- a/scripts/.hidden/scil_tractogram_filter_by_length.py.help +++ /dev/null @@ -1,41 +0,0 @@ -usage: scil_tractogram_filter_by_length.py [-h] [--minL MINL] [--maxL MAXL] - [--no_empty] [--display_counts] - [--save_rejected] [--indent INDENT] - [--sort_keys] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram out_tractogram - -Script to filter streamlines based on their lengths. - -See also: - - scil_tractogram_detect_loops.py - - scil_tractogram_filter_by_anatomy.py - (Filtering by length is its step1) - - scil_tractogram_filter_by_orientation.py - - scil_tractogram_filter_by_roi.py - -Formerly: scil_filter_streamlines_by_length.py - -positional arguments: - in_tractogram Streamlines input file name. - out_tractogram Streamlines output file name. - -options: - -h, --help show this help message and exit - --minL MINL Minimum length of streamlines, in mm. [0.0] - --maxL MAXL Maximum length of streamlines, in mm. [inf] - --no_empty Do not write file if there is no streamline. - --display_counts Print streamline count before and after filtering - --save_rejected Save rejected streamlines to output tractogram. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_filter_by_orientation.py.help b/scripts/.hidden/scil_tractogram_filter_by_orientation.py.help deleted file mode 100644 index 85f41e4b9..000000000 --- a/scripts/.hidden/scil_tractogram_filter_by_orientation.py.help +++ /dev/null @@ -1,65 +0,0 @@ -usage: scil_tractogram_filter_by_orientation.py [-h] [--min_x MIN_X] - [--max_x MAX_X] - [--min_y MIN_Y] - [--max_y MAX_Y] - [--min_z MIN_Z] - [--max_z MAX_Z] [--use_abs] - [--no_empty] - [--display_counts] - [--save_rejected filename] - [--indent INDENT] - [--sort_keys] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] - [-f] - in_tractogram out_tractogram - -Script to filter streamlines based on their distance traveled in a specific -dimension (x, y, or z). - -Useful to help differentiate bundles. - -Examples: In a brain aligned with x coordinates in left - right axis and y -coordinates in anterior-posterior axis, a streamline from the ... - - corpus callosum will likely travel a very short distance in the y axis. - - cingulum will likely travel a very short distance in the x axis. - -Note: we consider that x, y, z are the coordinates of the streamlines; we -do not verify if they are aligned with the brain's orientation. - -See also: - - scil_tractogram_detect_loops.py - - scil_tractogram_filter_by_anatomy.py - - scil_tractogram_filter_by_length.py - - scil_tractogram_filter_by_roi.py - -Formerly: scil_filter_streamlines_by_orientation.py - -positional arguments: - in_tractogram Streamlines input file name. - out_tractogram Streamlines output file name. - -options: - -h, --help show this help message and exit - --min_x MIN_X Minimum distance in the first dimension, in mm.[0.0] - --max_x MAX_X Maximum distance in the first dimension, in mm.[inf] - --min_y MIN_Y Minimum distance in the second dimension, in mm.[0.0] - --max_y MAX_Y Maximum distance in the second dimension, in mm.[inf] - --min_z MIN_Z Minimum distance in the third dimension, in mm.[0.0] - --max_z MAX_Z Maximum distance in the third dimension, in mm.[inf] - --use_abs If set, will use the total of distances in absolute value (ex, coming back on yourself will contribute to the total distance instead of cancelling it). - --no_empty Do not write file if there is no streamline. - --display_counts Print streamline count before and after filtering. - --save_rejected filename - Save the SFT of rejected streamlines. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_filter_by_roi.py.help b/scripts/.hidden/scil_tractogram_filter_by_roi.py.help deleted file mode 100644 index f2c796254..000000000 --- a/scripts/.hidden/scil_tractogram_filter_by_roi.py.help +++ /dev/null @@ -1,127 +0,0 @@ -usage: scil_tractogram_filter_by_roi.py [-h] - [--drawn_roi DRAWN_ROI [DRAWN_ROI ...]] - [--atlas_roi ATLAS_ROI [ATLAS_ROI ...]] - [--bdo BDO [BDO ...]] - [--x_plane X_PLANE [X_PLANE ...]] - [--y_plane Y_PLANE [Y_PLANE ...]] - [--z_plane Z_PLANE [Z_PLANE ...]] - [--filtering_list FILTERING_LIST] - [--overwrite_distance OVERWRITE_DISTANCE [OVERWRITE_DISTANCE ...]] - [--save_masks_atlas_roi SAVE_MASKS_ATLAS_ROI] - [--no_empty] [--display_counts] - [--save_rejected FILENAME] - [--indent INDENT] [--sort_keys] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram out_tractogram - -Filtering of a tractogram based on any combination of conditions involving a -ROI (ex: keep streamlines whose endoints are inside the ROI, exclude -streamlines not entirely included in a ROI, etc.) - -See also: - - scil_tractogram_detect_loops.py - - scil_tractogram_filter_by_anatomy.py - (Can reject streamlines with endpoints in the WM or the CSF based on - labels) - - scil_tractogram_filter_by_length.py - - scil_tractogram_filter_by_orientation.py - -Condition ---------- -For every type of ROI, two or three values are expected: MODE CRITERIA DISTANCE -(DISTANCE is always optional) -- MODE must be one of these values: ['any', 'all', 'either_end', 'both_ends'] - - any: any part of the streamline must be in the mask - - all: all parts of the streamline must be in the mask. - - either_end: at least one end of the streamline must be in the mask. - - both_ends: both ends of the streamline must be in the mask. -- CRITERIA must be one of these values: ['include', 'exclude'] - - Include: If condition from MODE is respected, streamline is included. - - Exlucde: If condition from MODE is respected, streamline is excluded. -- DISTANCE must be an int and is optional. - -Type of ROI ------------ -- Drawn ROI: Directly loaded from a binary file. -- Atlas ROI: Selected label from an atlas. - - ID is one or multiple integer values in the atlas. If multiple values, - ID needs to be between quotes. - Example: "1:6 9 10:15" will use values between 1 and 6 and values - between 10 and 15 included as well as value 9. -- BDO: The ROI is the interior of a bounding box. -- Planes: The ROI is the equivalent of a one-voxel plane. - * Using mode 'all' with x/y/z plane works but makes very little sense. - -Note: `--drawn_roi MASK.nii.gz all include` is equivalent to - `--drawn_roi INVERSE_MASK.nii.gz any exclude` - -For example, this allows to find out all streamlines entirely in the WM in one -command (without manually inverting the mask first) or to remove any streamline -staying in the GM without getting out. - -Supports multiple filtering conditions --------------------------------------- -Multiple filtering conditions can be used, with varied ROI types if necessary. -Combining two conditions is equivalent to a logical AND between the conditions. -Order of application does not matter for the final result, but may change the -intermediate files, if any. - -Distance management -------------------- -DISTANCE is optional, and it should be used carefully with large voxel size -(e.g > 2.5mm). The value is in voxel for ROIs and in mm for bounding boxes. -Anisotropic data will affect each direction differently. - When using --overwrite_distance, any filtering option with given criteria -will have its DISTANCE value replaced. - -Formerly: scil_filter_tractogram.py - -positional arguments: - in_tractogram Path of the input tractogram file. - out_tractogram Path of the output tractogram file. - -options: - -h, --help show this help message and exit - --drawn_roi DRAWN_ROI [DRAWN_ROI ...] - ROI_NAME MODE CRITERIA DISTANCE (distance in voxel is optional) - Filename of a hand drawn ROI (.nii or .nii.gz). - --atlas_roi ATLAS_ROI [ATLAS_ROI ...] - ATLAS_NAME ID MODE CRITERIA DISTANCE (distance in voxel is optional) - Filename of an atlas (.nii or .nii.gz). - --bdo BDO [BDO ...] BDO_NAME MODE CRITERIA DISTANCE (distance in mm is optional) - Filename of a bounding box (bdo) file from MI-Brain. - --x_plane X_PLANE [X_PLANE ...] - PLANE MODE CRITERIA DISTANCE (distance in voxel is optional) - Slice number in X, in voxel space. - --y_plane Y_PLANE [Y_PLANE ...] - PLANE MODE CRITERIA DISTANCE (distance in voxel is optional) - Slice number in Y, in voxel space. - --z_plane Z_PLANE [Z_PLANE ...] - PLANE MODE CRITERIA DISTANCE (distance in voxel is optional) - Slice number in Z, in voxel space. - --filtering_list FILTERING_LIST - Text file containing one rule per line - (i.e. drawn_roi mask.nii.gz both_ends include 1). - --overwrite_distance OVERWRITE_DISTANCE [OVERWRITE_DISTANCE ...] - MODE CRITERIA DISTANCE (distance in voxel for ROIs and in mm for bounding box). - If set, it will overwrite the distance associated to a specific mode/criteria. - --save_masks_atlas_roi SAVE_MASKS_ATLAS_ROI - If set, will save the atlas roi masks. The value to provide is the - prefix, ex: my_path/atlas_roi_. Whole filename will be - my_path/atlas_roi_{id}.nii.gz - --no_empty Do not write file if there is no streamline. - --display_counts Print streamline count before and after filtering - --save_rejected FILENAME - Save rejected streamlines to output tractogram. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_fix_trk.py.help b/scripts/.hidden/scil_tractogram_fix_trk.py.help deleted file mode 100644 index 124b0f01c..000000000 --- a/scripts/.hidden/scil_tractogram_fix_trk.py.help +++ /dev/null @@ -1,80 +0,0 @@ -usage: scil_tractogram_fix_trk.py [-h] [--software string] - [--cut_invalid | --remove_invalid] - [--in_dsi_fa IN_DSI_FA] - [--in_native_fa IN_NATIVE_FA] [--auto_crop] - [--save_transfo FILE | --load_transfo FILE] - [--reference REFERENCE] [--no_bbox_check] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram out_tractogram - -This script is made to fix DSI-Studio or Startrack TRK file -(unknown space/convention) to make it compatible with TrackVis, -MI-Brain, Dipy Horizon (Stateful Tractogram). - -DSI-Studio -========== - -The script either make it match with an anatomy from DSI-Studio (AC-PC aligned, -sometimes flipped) or if --in_native_fa is provided it moves it back to native -DWI space (this involved registration). - -Since DSI-Studio sometimes leaves some skull around the brain, the --auto_crop -aims to stabilize registration. If this option fails, manually BET both FA. -Registration is more robust at resolution above 2mm (iso), be careful. - -If you are fixing bundles, use this script once with --save_transfo and verify -results. Once satisfied, call the scripts on bundles using a bash for loop with ---load_transfo to save computation. - -We recommand the --cut_invalid to remove invalid points of streamlines rather -removing entire streamlines. - -This script was tested on various datasets and worked on all of them. However, -always verify the results and if a specific case does not work. Open an issue -on the Scilpy GitHub repository. - -Startrack -========== - -The script will create a new stateful tractogram using the reference in -order to fix the missing information in the header of the trk. - -WARNING: This script is still experimental, DSI-Studio and Startrack -evolve quickly and results may vary depending on the data itself -as well as DSI-studio/Startrack version. - -Formerly: scil_fix_dsi_studio_trk.py - -positional arguments: - in_tractogram Path of the input tractogram file from DSI studio (.trk). - out_tractogram Path of the output tractogram file. - -options: - -h, --help show this help message and exit - --software string Software used to create in_tractogram. - Choices: ['dsi_studio', 'startrack'] - --cut_invalid Cut invalid streamlines rather than removing them. - Keep the longest segment only. - --remove_invalid Remove the streamlines landing out of the bounding box. - --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of - tractograms (ignores the presence of invalid streamlines). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -DSI options: - --in_dsi_fa IN_DSI_FA - Path of the input FA from DSI Studio (.nii.gz). - --in_native_fa IN_NATIVE_FA - Path of the input FA from Dipy/MRtrix (.nii.gz). - Move the tractogram back to a "proper" space, include registration. - --auto_crop If both FA are not already BET, perform registration - using a centered-cube crop to ignore the skull. - A good BET for both is more robust. - --save_transfo FILE Save estimated transformation to avoid recomputing (.txt). - --load_transfo FILE Load estimated transformation to apply to other files (.txt). - -StarTrack options: - --reference REFERENCE - Reference anatomy (.nii or .nii.gz). diff --git a/scripts/.hidden/scil_tractogram_flip.py.help b/scripts/.hidden/scil_tractogram_flip.py.help deleted file mode 100644 index 20e260b2c..000000000 --- a/scripts/.hidden/scil_tractogram_flip.py.help +++ /dev/null @@ -1,27 +0,0 @@ -usage: scil_tractogram_flip.py [-h] [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram out_tractogram {x,y,z} - [{x,y,z} ...] - -Flip streamlines locally around specific axes. - -IMPORTANT: this script should only be used in case of absolute necessity. -It's better to fix the real tools than to force flipping streamlines to -have them fit in the tools. - -Formerly: scil_flip_streamlines.py - -positional arguments: - in_tractogram Path of the input tractogram file. - out_tractogram Path of the output tractogram file. - {x,y,z} The axes you want to flip. eg: to flip the x and y axes use: x y. - -options: - -h, --help show this help message and exit - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_math.py.help b/scripts/.hidden/scil_tractogram_math.py.help deleted file mode 100644 index d7f5f2226..000000000 --- a/scripts/.hidden/scil_tractogram_math.py.help +++ /dev/null @@ -1,75 +0,0 @@ -usage: scil_tractogram_math.py [-h] [--precision NBR_OF_DECIMALS] [--robust] - [--no_metadata] [--fake_metadata] - [--save_indices OUT_INDEX_FILE] [--save_empty] - [--no_bbox_check] [--indent INDENT] - [--sort_keys] [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - OPERATION INPUT_FILES [INPUT_FILES ...] - OUTPUT_FILE - -Performs an operation on a list of streamline files. The supported -operations are: - -difference: Keep the streamlines from the first file that are not in - any of the following files. - -intersection: Keep the streamlines that are present in all files. - -union: Keep all streamlines while removing duplicates. - -concatenate: Keep all streamlines with duplicates. - -lazy_concatenate: Keep all streamlines with duplicates, never load the whole - tractograms in memory. Only works with trk/tck file, - metadata will be lost and invalid streamlines are kept. - -If a file 'duplicate.trk' have identical streamlines, calling the script using -the difference/intersection/union with a single input will remove these -duplicated streamlines. - -To allow a soft match, use the --precision option to increase the allowed -threshold for similarity. A precision of 1 represents 10**(-1), so a -maximum distance of 0.1mm is allowed. If the streamlines are identical, the -default value of 3 (or 0.001mm distance) should work. - -If there is a 0.5mm shift, use a precision of 0 (or 1mm distance) and the ---robust option. Should make it work, but slightly slower. Will merge all -streamlines similar when rounded to that precision level. - -The metadata (data per point, data per streamline) of the streamlines that -are kept in the output will be preserved. This requires that all input files -share the same type of metadata. If this is not the case, use the option ---no_metadata to strip the metadata from the output. Or --fake_metadata to -initialize dummy metadata in the file missing them. - -Formerly: scil_streamlines_math.py - -positional arguments: - OPERATION The type of operation to be performed on the streamlines. Must - be one of the following: difference, intersection, union, concatenate, lazy_concatenate. - INPUT_FILES The list of files that contain the streamlines to operate on. - OUTPUT_FILE The file where the remaining streamlines are saved. - -options: - -h, --help show this help message and exit - --precision NBR_OF_DECIMALS, -p NBR_OF_DECIMALS - Precision used to compare streamlines [4]. - --robust, -r Use version robust to small translation/rotation. - --no_metadata, -n Strip the streamline metadata from the output. - --fake_metadata Skip the metadata verification, create fake metadata if missing, can lead to unexpected behavior. - --save_indices OUT_INDEX_FILE, -s OUT_INDEX_FILE - Save the streamline indices to the supplied json file. - --save_empty If set, we will save all results, even if tractogram if empty. - --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of - tractograms (ignores the presence of invalid streamlines). - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_pairwise_comparison.py.help b/scripts/.hidden/scil_tractogram_pairwise_comparison.py.help deleted file mode 100644 index d22379961..000000000 --- a/scripts/.hidden/scil_tractogram_pairwise_comparison.py.help +++ /dev/null @@ -1,51 +0,0 @@ -usage: scil_tractogram_pairwise_comparison.py [-h] [--out_dir OUT_DIR] - [--out_prefix OUT_PREFIX] - [--in_mask IN_FILE] - [--skip_streamlines_distance] - [--processes NBR] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram_1 in_tractogram_2 - -This script is designed to compare and help visualize differences between two -tractograms. This can be especially useful in studies where multiple -tractograms from different algorithms or parameters need to be compared. - -A similar script (scil_bundle_pairwise_comparison.py) is available for bundles, -with metrics more adapted to bundles (and spatial agreement). - -The difference is computed in terms of -- A voxel-wise spatial distance between streamlines crossing each voxel. - This can help to see if both tractography reconstructions at each voxel - look similar (out_diff.nii.gz) -- An angular correlation (ACC) between streamline orientation from TODI. - This compares the local orientation of streamlines at each voxel - (out_acc.nii.gz) -- A patch-wise correlation between streamline density maps from both - tractograms. This compares where the high/low density regions agree or not - (out_corr.nii.gz) -- A heatmap combining all the previous metrics using a harmonic means of the - normalized metrics to summarize general agreement (out_heatmap.nii.gz) - -positional arguments: - in_tractogram_1 Input tractogram 1. - in_tractogram_2 Input tractogram 2. - -options: - -h, --help show this help message and exit - --out_dir OUT_DIR Directory where all output files will be saved. - If not specified, outputs will be saved in the current directory. - --out_prefix OUT_PREFIX - Prefix for output files. Useful for distinguishing between different runs [out]. - --in_mask IN_FILE Optional input mask. - --skip_streamlines_distance - Skip computation of the spatial distance between streamlines. Slowest part of the computation. - --processes NBR Number of sub-processes to start. - Default: [1] - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_print_info.py.help b/scripts/.hidden/scil_tractogram_print_info.py.help deleted file mode 100644 index 22c270049..000000000 --- a/scripts/.hidden/scil_tractogram_print_info.py.help +++ /dev/null @@ -1,32 +0,0 @@ -usage: scil_tractogram_print_info.py [-h] [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] - [--indent INDENT] [--sort_keys] - in_tractogram - -Prints information on a loaded tractogram: number of streamlines, and -mean / min / max / std of - - length in number of points - - length in mm - - step size. - -For trk files: also prints the data_per_point and data_per_streamline keys. - -See also: - - scil_header_print_info.py to see the header, affine, volume dimension. - - scil_bundle_shape_measures.py to see bundle-specific information. - -positional arguments: - in_tractogram Tractogram file. - -options: - -h, --help show this help message and exit - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_project_map_to_streamlines.py.help b/scripts/.hidden/scil_tractogram_project_map_to_streamlines.py.help deleted file mode 100644 index 1dcdde3ff..000000000 --- a/scripts/.hidden/scil_tractogram_project_map_to_streamlines.py.help +++ /dev/null @@ -1,68 +0,0 @@ -usage: scil_tractogram_project_map_to_streamlines.py [-h] --in_maps IN_MAPS - [IN_MAPS ...] - --out_dpp_name - OUT_DPP_NAME - [OUT_DPP_NAME ...] - [--trilinear] - [--endpoints_only] - [--keep_all_dpp] - [--overwrite_dpp] - [--reference REFERENCE] - [-f] - [-v [{DEBUG,INFO,WARNING}]] - in_tractogram - out_tractogram - -Projects maps extracted from a map onto the points of streamlines. - -The default options will take data from a nifti image (3D or 4D) and project it -onto the points of streamlines. If the image is 4D, the data is stored as a -list of 1D arrays per streamline. If the image is 3D, the data is stored as a -list of values per streamline. - -See also scil_tractogram_project_streamlines_to_map.py for the reverse action. - -* Note that the data from your maps will be projected only on the coordinates -of the points of your streamlines. Data underlying the whole segments between -two consecutive points is not used. If your streamlines are strongly -compressed, or if they have a very big step size, the result will possibly -reflect poorly your map. You may use scil_tractogram_resample.py to upsample -your streamlines first. - -* Hint: The streamlines themselves are not modified here, only their dpp. To -avoid multiplying data on disk, you could use the following arguments to save -the new dpp in your current tractogram: ->> scil_tractogram_project_map_to_streamlines.py $in_bundle $in_bundle - --keep_all_dpp -f - -positional arguments: - in_tractogram Fiber bundle file. - out_tractogram Output file. - -options: - -h, --help show this help message and exit - --in_maps IN_MAPS [IN_MAPS ...] - Nifti map to project onto streamlines. - --out_dpp_name OUT_DPP_NAME [OUT_DPP_NAME ...] - Name of the data_per_point to be saved in the - output tractogram. - --trilinear If set, will use trilinear interpolation - else will use nearest neighbor interpolation - by default. - --endpoints_only If set, will only project the map onto the - endpoints of the streamlines (all other values along - streamlines will be NaN). If not set, will project - the map onto all points of the streamlines. - --keep_all_dpp If set, previous data_per_point will be preserved - in the output tractogram. Else, only --out_dpp_name - keys will be saved. - --overwrite_dpp If set, if --keep_all_dpp is set and some - --out_dpp_name keys already existed in your - data_per_point, allow overwriting old data_per_point. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -f Force overwriting of the output files. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_tractogram_project_streamlines_to_map.py.help b/scripts/.hidden/scil_tractogram_project_streamlines_to_map.py.help deleted file mode 100644 index c99ec3045..000000000 --- a/scripts/.hidden/scil_tractogram_project_streamlines_to_map.py.help +++ /dev/null @@ -1,77 +0,0 @@ -usage: scil_tractogram_project_streamlines_to_map.py [-h] - (--use_dps key [key ...] | --use_dpp key [key ...] | --load_dps file [file ...] | --load_dpp file [file ...]) - (--mean_endpoints | --mean_streamline | --point_by_point) - (--to_endpoints | --to_wm) - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] - [-f] - in_bundle out_prefix - -Projects metrics onto the underlying voxels of a streamlines. This script can -project data from data_per_point (dpp) or data_per_streamline (dps) to maps. - -You choose to project data from all points of the streamlines, or from the -endpoints only. The idea then is to visualize the cortical areas affected by -metrics (assuming streamlines start/end in the cortex). - -See also scil_tractogram_project_map_to_streamlines.py for the reverse action. - -How to the data is loaded: - - From dps: uses the same value for each point of the streamline. - - From dpp: one value per point. - -How the data is used: - 1. Average all points of the streamline to get a mean value, set this value - to all points. - 2. Average the two endpoints and get their mean value, set this value to - all points. - 3. Keep each point individually. - -How the data is projected to a map: - A. Using each point. - B. Using the endpoints only. - -For more complex operations than the average per streamline, see -scil_tractogram_dpp_math.py. - -positional arguments: - in_bundle Fiber bundle file. - out_prefix Folder + prefix to save endpoints metric(s). We will save - one nifti file per per dpp/dps key given. - Ex: my_path/subjX_bundleY_ with --use_dpp key1 will output - my_path/subjX_bundleY_key1.nii.gz - -options: - -h, --help show this help message and exit - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Where to get the statistics from. (Choose one): - --use_dps key [key ...] - Use the data_per_streamline from the tractogram. - It must be a .trk - --use_dpp key [key ...] - Use the data_per_point from the tractogram. - It must be a trk. - --load_dps file [file ...] - Load data per streamline (scalar) .txt or .npy. - Must load an array with the right shape. - --load_dpp file [file ...] - Load data per point (scalar) from .txt or .npy. - Must load an array with the right shape. - -Processing choices. (Choose one): - --mean_endpoints Uses one single value per streamline: the mean of the two - endpoints. - --mean_streamline Use one single value per streamline: the mean of all - points of the streamline. - --point_by_point Directly project the streamlines values onto the map. - -Where to send the statistics. (Choose one): - --to_endpoints Project metrics onto a mask of the endpoints. - --to_wm Project metrics into streamlines coverage. diff --git a/scripts/.hidden/scil_tractogram_qbx.py.help b/scripts/.hidden/scil_tractogram_qbx.py.help deleted file mode 100644 index 8ff05ebba..000000000 --- a/scripts/.hidden/scil_tractogram_qbx.py.help +++ /dev/null @@ -1,43 +0,0 @@ -usage: scil_tractogram_qbx.py [-h] [--nb_points NB_POINTS] - [--out_centroids OUT_CENTROIDS] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram dist_thresh out_clusters_dir - -Compute clusters using QuickBundlesX and save them separately. -We cannot know the number of clusters in advance. - -Quickbundles: -Garyfallidis, E. et al. (2012). Quickbundles, a method for tractography -simplification. Frontiers in neuroscience, 6, 175. - -QuickbundlesX: -Garyfallidis, E. et al. (2016) QuickBundlesX: sequential clustering of millions -of streamlines in multiple levels of detail at record execution time. 24th -International Society of Magnetic Resonance in Medicine (ISMRM). - -"QuickBundlesX shows a remarkable 20+X speedup over its predecessor" - -Formerly: scil_compute_qbx.py - -positional arguments: - in_tractogram Tractogram filename. - Path of the input tractogram or bundle. - dist_thresh Last QuickBundlesX threshold in mm. Typically - the value are between 10-20mm. - out_clusters_dir Path where to save the clusters directory. - -options: - -h, --help show this help message and exit - --nb_points NB_POINTS - Streamlines will be resampled to have this number of points [20]. - --out_centroids OUT_CENTROIDS - Output tractogram filename. - Format must be readable by the Nibabel API. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_register.py.help b/scripts/.hidden/scil_tractogram_register.py.help deleted file mode 100644 index c2bde4463..000000000 --- a/scripts/.hidden/scil_tractogram_register.py.help +++ /dev/null @@ -1,42 +0,0 @@ -usage: scil_tractogram_register.py [-h] [--out_name OUT_NAME] [--only_rigid] - [--moving_tractogram_ref MOVING_TRACTOGRAM_REF] - [--static_tractogram_ref STATIC_TRACTOGRAM_REF] - [-v [{DEBUG,INFO,WARNING}]] [-f] - moving_tractogram static_tractogram - -Generate a linear transformation matrix from the registration of 2 tractograms. -Typically, this script is run before scil_tractogram_apply_transform.py. - -For more information on how to use the various registration scripts, see the -doc at: -https://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html - -Formerly: scil_register_tractogram.py - -positional arguments: - moving_tractogram Path of the moving tractogram. - static_tractogram Path of the target tractogram. - -options: - -h, --help show this help message and exit - --out_name OUT_NAME Filename of the transformation matrix. - The registration type will be appended as a suffix, - [_.txt]. - Default: [transformation.txt] - --only_rigid If set, will only use a rigid transformation (uses affine by default). - --moving_tractogram_ref MOVING_TRACTOGRAM_REF - Reference anatomy for moving_tractogram (if tck/vtk/fib/dpy) file - support (.nii or .nii.gz). - --static_tractogram_ref STATIC_TRACTOGRAM_REF - Reference anatomy for static_tractogram (if tck/vtk/fib/dpy) file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -References: -[1] E. Garyfallidis, O. Ocegueda, D. Wassermann, M. Descoteaux -Robust and efficient linear registration of white-matter fascicles in the -space of streamlines, NeuroImage, Volume 117, 15 August 2015, Pages 124-140 -(http://www.sciencedirect.com/science/article/pii/S1053811915003961) diff --git a/scripts/.hidden/scil_tractogram_remove_invalid.py.help b/scripts/.hidden/scil_tractogram_remove_invalid.py.help deleted file mode 100644 index a57dbbd40..000000000 --- a/scripts/.hidden/scil_tractogram_remove_invalid.py.help +++ /dev/null @@ -1,41 +0,0 @@ -usage: scil_tractogram_remove_invalid.py [-h] [--cut_invalid] - [--remove_single_point] - [--remove_overlapping_points] - [--threshold THRESHOLD] [--no_empty] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram out_tractogram - -Removal of streamlines that are out of the volume bounding box. In voxel space -no negative coordinate and no above volume dimension coordinate are possible. -Any streamline that do not respect these two conditions are removed. - -The --cut_invalid option will cut streamlines so that their longest segment are -within the bounding box - -Formerly: scil_remove_invalid_streamlines.py - -positional arguments: - in_tractogram Tractogram filename. Format must be one of - trk, tck, vtk, fib, dpy. - out_tractogram Output filename. Format must be one of - trk, tck, vtk, fib, dpy. - -options: - -h, --help show this help message and exit - --cut_invalid Cut invalid streamlines rather than removing them. - Keep the longest segment only. - --remove_single_point - Consider single point streamlines invalid. - --remove_overlapping_points - Consider streamlines with overlapping points invalid. - --threshold THRESHOLD - Maximum distance between two points to be considered overlapping [0.001 mm]. - --no_empty Do not save empty tractogram. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_resample.py.help b/scripts/.hidden/scil_tractogram_resample.py.help deleted file mode 100644 index a224b167f..000000000 --- a/scripts/.hidden/scil_tractogram_resample.py.help +++ /dev/null @@ -1,72 +0,0 @@ -usage: scil_tractogram_resample.py [-h] [--never_upsample] - [--point_wise_std POINT_WISE_STD] - [--tube_radius TUBE_RADIUS] - [--gaussian SIGMA] [-e ERROR_RATE] - [--keep_invalid_streamlines] - [--downsample_per_cluster] - [--qbx_thresholds QBX_THRESHOLDS [QBX_THRESHOLDS ...]] - [--seed SEED] [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram nb_streamlines out_tractogram - -Script to resample a tractogram to a set number of streamlines. -Default behavior: -- IF number of requested streamlines is lower than streamline count: DOWNSAMPLE -- IF number of requested streamlines is higher than streamline count: UPSAMPLE -To prevent upsample if not desired use --never_upsample. - -Can be useful to build training sets for machine learning algorithms, to -upsample under-represented bundles or downsample over-represented bundles. - -Works by either selecting a subset of streamlines or by generating new -streamlines by adding gaussian noise to existing ones. - -Upsampling: - Includes smoothing to compensate for the noisiness of new streamlines - generated by the process. -Downsampling: - Includes the possibility of choosing randomly *per Quickbundle cluster* to - ensure that all clusters are represented in the final tractogram. - -Example usage: -$ scil_tractogram_resample.py input.trk 1000 output.trk --point_wise_std 0.5 --gaussian 5 --keep_invalid_streamlines -$ scil_visualize_bundles.py output.trk --local_coloring --width=0.1 - -positional arguments: - in_tractogram Input tractography file. - nb_streamlines Number of streamlines to resample the tractogram to. - out_tractogram Output tractography file. - -options: - -h, --help show this help message and exit - --never_upsample Make sure to never upsample a tractogram. - Useful when downsample batch of files using bash. - --seed SEED Use a specific random seed for the resampling. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Upsampling params: - --point_wise_std POINT_WISE_STD - Noise to add to existing streamlines points to generate new ones [1]. - --tube_radius TUBE_RADIUS - Maximum distance to generate streamlines around the original ones [1]. - --gaussian SIGMA Sigma for smoothing. Use the value of surrounding X,Y,Z points on the streamline to blur the streamlines. - A good sigma choice would be around 5. - -e ERROR_RATE Maximum compression distance in mm [0.1]. - --keep_invalid_streamlines - Keep invalid newly generated streamlines that may go out of the - bounding box. - -Downsampling params: - --downsample_per_cluster - If set, downsampling will be done per cluster (computed with - Quickbundles) to ensure that at least some streamlines are - kept per bundle. Else, random downsampling is performed (default). - --qbx_thresholds QBX_THRESHOLDS [QBX_THRESHOLDS ...] - If you chose option '--downsample_per_cluster', you may set - the QBx threshold value(s) here. Default: [40, 30, 20] diff --git a/scripts/.hidden/scil_tractogram_resample_nb_points.py.help b/scripts/.hidden/scil_tractogram_resample_nb_points.py.help deleted file mode 100644 index 3a7d23f3d..000000000 --- a/scripts/.hidden/scil_tractogram_resample_nb_points.py.help +++ /dev/null @@ -1,28 +0,0 @@ -usage: scil_tractogram_resample_nb_points.py [-h] - (--nb_pts_per_streamline NB_PTS_PER_STREAMLINE | --step_size STEP_SIZE) - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram out_tractogram - -Script to resample a set of streamlines to either a new number of points per -streamline or to a fixed step size. WARNING: data_per_point is not carried. - -Formerly: scil_resample_streamlines.py - -positional arguments: - in_tractogram Streamlines input file name. - out_tractogram Streamlines output file name. - -options: - -h, --help show this help message and exit - --nb_pts_per_streamline NB_PTS_PER_STREAMLINE - Number of points per streamline in the output. - --step_size STEP_SIZE - Step size in the output (in mm). - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_seed_density_map.py.help b/scripts/.hidden/scil_tractogram_seed_density_map.py.help deleted file mode 100644 index 8ecbf4470..000000000 --- a/scripts/.hidden/scil_tractogram_seed_density_map.py.help +++ /dev/null @@ -1,29 +0,0 @@ -usage: scil_tractogram_seed_density_map.py [-h] [--binary [FIXED_VALUE]] - [--no_bbox_check] - [-v [{DEBUG,INFO,WARNING}]] [-f] - tractogram_filename - seed_density_filename - -Compute a density map of seeds saved in .trk file. - -Formerly: scil_compute_seed_density_map.py - -positional arguments: - tractogram_filename Tracts filename. Format must be .trk. - File should contain a "seeds" value in the data_per_streamline. - These seeds must be in space: voxel, origin: corner. - seed_density_filename - Output seed density filename. Format must be Nifti. - -options: - -h, --help show this help message and exit - --binary [FIXED_VALUE] - If set, will store the same value for all intersected voxels, creating a binary map. - When set without a value, 1 is used (and dtype uint8). - If a value is given, will be used as the stored value. - --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of - tractograms (ignores the presence of invalid streamlines). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_segment_and_score.py.help b/scripts/.hidden/scil_tractogram_segment_and_score.py.help deleted file mode 100644 index 63434cac7..000000000 --- a/scripts/.hidden/scil_tractogram_segment_and_score.py.help +++ /dev/null @@ -1,164 +0,0 @@ -usage: scil_tractogram_segment_and_score.py [-h] [--json_prefix p] - [--gt_dir DIR] - [--use_gt_masks_as_all_masks] - [--dilate_endpoints NB_PASS] - [--remove_invalid] - [--save_wpc_separately] - [--compute_ic] [--unique] - [--remove_wpc_belonging_to_another_bundle] - [--no_empty] [--indent INDENT] - [--sort_keys] [--no_bbox_check] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram gt_config out_dir - -Scores input tractogram overall and bundlewise. - -Outputs -------- - - - results.json: Contains a full tractometry report. - - processing_stats.json: Contains information on the segmentation of - bundles (ex: the number of wpc per criteria). - - Splits the input tractogram into - segmented_VB/*_VS.trk. - segmented_IB/*_*_IC.trk (if args.compute_ic) - segmented_WPC/*_wpc.trk (if args.save_wpc_separately) - IS.trk OR NC.trk (if args.compute_ic) - -By default, if a streamline fits in many bundles, it will be included in every -one. This means a streamline may be a VS for a bundle and an IS for -(potentially many) others. If you want to assign each streamline to at most one -bundle, use the `--unique` flag. - -Config file ------------ - -The config file needs to be a json containing a dict of the ground-truth -bundles as keys. The value for each bundle is itself a dictionnary with: - -Mandatory: - - endpoints OR [head AND tail]: filename for the endpoints ROI. - If 'enpoints' is used, we will automatically separate the mask into two - ROIs, acting as head and tail. Quality check is strongly recommended. - -Optional: - Concerning metrics: - - gt_mask: expected result. OL and OR metrics will be computed from this.* - - Concerning inclusion criteria (other streamlines will be WPC): - - all_mask: ROI serving as "all" criteria: to be included in the bundle, - ALL points of a streamline must be inside the mask.* - - any_mask: ROI serving as "any" criteria: streamlines - must touch that mask in at least one point ("any" point) to be included - in the bundle. - - angle: angle criteria. Streamlines containing loops and sharp turns above - given angle will be rejected from the bundle. - - length: maximum and minimum lengths per bundle. - - length_x / length_x_abs: maximum and minimum total distance in the x - direction (i.e. first coordinate).** - - length_y / length_y_abs: maximum and minimum total distance in the y - direction (i.e. second coordinate).** - - length_z / length_z_abs: maximum and minimum total distance in the z - direction (i.e. third coordinate).** - -* Files must be .tck, .trk, .nii or .nii.gz. If it is a tractogram, a mask will -be created. If it is a nifti file, it will be considered to be a mask. -** With absolute values: coming back on yourself will contribute to the total -distance instead of cancelling it. - -Exemple config file: -{ - "Ground_truth_bundle_0": { - "gt_mask": "PATH/bundle0.nii.gz", - "angle": 300, - "length": [140, 150], - "endpoints": "PATH/file1.nii.gz" - } -} - -Tractometry ------------ -Global connectivity metrics: - -- Computed by default: - - VS: valid streamlines, belonging to a bundle (i.e. respecting all the - criteria for that bundle; endpoints, limit_mask, gt_mask.). - - IS: invalid streamlines. All other streamlines. IS = IC + NC. - -- Optional: - - WPC: wrong path connections, streamlines connecting correct ROIs but not - respecting the other criteria for that bundle. Such streamlines always - exist but they are only saved separately if specified in the options. - Else, they are merged back with the IS. - By definition. WPC are only computed if "limits masks" are provided. - - IC: invalid connections, streamlines joining an incorrect combination of - ROIs. Use carefully, quality depends on the quality of your ROIs and no - analysis is done on the shape of the streamlines. - - NC: no connections. Invalid streamlines minus invalid connections. - -- Fidelity metrics: - - OL: Overlap. Percentage of ground truth voxels containing streamline(s) - for a given bundle. - - OR: Overreach. Amount of voxels containing streamline(s) when they - shouldn't, for a given bundle. We compute two versions : - OR_pct_vs = divided by the total number of voxel covered by the bundle. - (percentage of the voxels touched by VS). - Values range between 0 and 100%. Values are not defined when we - recovered no streamline for a bundle, but we set the OR_pct_vs to 0 - in that case. - OR_pct_gt = divided by the total size of the ground truth bundle mask. - Values could be higher than 100%. - - f1 score: which is the same as the Dice score. - -positional arguments: - in_tractogram Input tractogram to score - gt_config .json dict configured as specified above. - out_dir Output directory for the resulting segmented bundles. - -options: - -h, --help show this help message and exit - --json_prefix p Prefix of the two output json files. Ex: 'study_x_'.Files will be saved inside out_dir. - Suffixes will be 'processing_stats.json' and 'results.json'. - --no_empty Do not write file if there is no streamline. - --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of - tractograms (ignores the presence of invalid streamlines). - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Additions to gt_config: - --gt_dir DIR Root path of the ground truth files listed in the gt_config. - If not set, filenames in the config file are considered - as absolute paths. - --use_gt_masks_as_all_masks - If set, the gt_config's 'gt_mask' will also be used as - 'all_mask' for each bundle. Note that this means the - OR will necessarily be 0. - -Preprocessing: - --dilate_endpoints NB_PASS - Dilate endpoint masks n-times. Default: 0. - --remove_invalid Remove invalid streamlines before scoring. - -Tractometry choices: - --save_wpc_separately - If set, streamlines rejected from VC based on the config - file criteria will be saved separately from IS (and IC) - in one file *_wpc.tck per bundle. - --compute_ic If set, IS are split into NC + IC, where IC are computed as one bundle per - pair of ROI not belonging to a true connection, named - *_*_IC.tck. - --unique If set, streamlines are assigned to the first bundle they fit in and not to all. - --remove_wpc_belonging_to_another_bundle - If set, WPC actually belonging to any VB (in the - case of overlapping ROIs) will be removed - from the WPC classification. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_tractogram_segment_bundles.py.help b/scripts/.hidden/scil_tractogram_segment_bundles.py.help deleted file mode 100644 index 567dd9376..000000000 --- a/scripts/.hidden/scil_tractogram_segment_bundles.py.help +++ /dev/null @@ -1,65 +0,0 @@ -usage: scil_tractogram_segment_bundles.py [-h] [--out_dir OUT_DIR] - [--minimal_vote_ratio MINIMAL_VOTE_RATIO] - [--seed SEED] [--inverse] - [--reference REFERENCE] - [--processes NBR] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractograms [in_tractograms ...] - in_config_file in_directory - in_transfo - -Compute BundleSeg & supports multi-atlas & multi-parameters (RBx-like). -The model needs to be cleaned and lightweight. -Transform should come from ANTs: (using the --inverse flag) -AntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF - -If you are not sure about the transformation 'direction' you can try -scil_tractogram_segment_bundles.py (with the -v option), a warning will popup -if the provided transformation is not used correctly. - -The number of folders inside 'models_directories' will increase the number of -runs. Each folder is considered like an atlas and bundles inside will initiate -more BundleSeg executions. The more atlases you have, the more robust the -recognition will be. - ---minimal_vote_ratio is a value between 0 and 1. If you have 5 input model -directories and a minimal_vote_ratio of 0.5, you will need at least 3 votes - -Example data and usage available at: https://zenodo.org/record/7950602 - -For RAM usage, it is recommanded to use this heuristic: - (size of inputs tractogram (GB) * number of processes) < RAM (GB) -This is important because many instances of data structures are initialized -in parallel and can lead to a RAM overflow. - -Formerly: scil_recognize_multi_bundles.py - -positional arguments: - in_tractograms Input tractogram filename (.trk or .tck). - in_config_file Path of the config file (.json) - in_directory Path of parent folder of models directories. - Each folder inside will be considered as adifferent atlas. - in_transfo Path for the transformation to model space (.txt, .npy or .mat). - -options: - -h, --help show this help message and exit - --out_dir OUT_DIR Path for the output directory [voting_results]. - --minimal_vote_ratio MINIMAL_VOTE_RATIO - Streamlines will only be considered for saving if - recognized often enough [0.5]. - --seed SEED Random number generator seed 0. - --inverse Use the inverse transformation. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - --processes NBR Number of sub-processes to start. - Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -[1] St-Onge, Etienne, Kurt G. Schilling, and Francois Rheault. -"BundleSeg: A versatile,reliable and reproducible approach to white -matter bundle segmentation." International Workshop on Computational -Diffusion MRI. Cham: Springer Nature Switzerland (2023) diff --git a/scripts/.hidden/scil_tractogram_segment_bundles_for_connectivity.py.help b/scripts/.hidden/scil_tractogram_segment_bundles_for_connectivity.py.help deleted file mode 100644 index a966fa827..000000000 --- a/scripts/.hidden/scil_tractogram_segment_bundles_for_connectivity.py.help +++ /dev/null @@ -1,105 +0,0 @@ -usage: scil_tractogram_segment_bundles_for_connectivity.py [-h] [--no_pruning] - [--no_remove_loops] - [--no_remove_outliers] - [--no_remove_curv_dev] - [--min_length MIN_LENGTH] - [--max_length MAX_LENGTH] - [--outlier_threshold OUTLIER_THRESHOLD] - [--loop_max_angle LOOP_MAX_ANGLE] - [--curv_qb_distance CURV_QB_DISTANCE] - [--out_dir OUT_DIR] - [--save_raw_connections] - [--save_intermediate] - [--save_discarded] - [--out_labels_list OUT_FILE] - [--reference REFERENCE] - [--no_bbox_check] - [--processes NBR] - [-v [{DEBUG,INFO,WARNING}]] - [-f] - in_tractograms - [in_tractograms ...] - in_labels out_hdf5 - -Compute a connectivity matrix from a tractogram and a parcellation. - -Current strategy is to keep the longest streamline segment connecting 2 -regions. If the streamline crosses other gray matter regions before reaching -its final connected region, the kept connection is still the longest. This is -robust to compressed streamlines. - -The output file is a hdf5 (.h5) where the keys are 'LABEL1_LABEL2' and each -group is composed of 'data', 'offsets' and 'lengths' from the array_sequence. -The 'data' is stored in VOX/CORNER for simplicity and efficiency. See script -scil_tractogram_convert_hdf5_to_trk.py to convert to a list of .trk bundles. - -For the --outlier_threshold option the default is a recommended good trade-off -for a freesurfer parcellation. With smaller parcels (brainnetome, glasser) the -threshold should most likely be reduced. - -Good candidate connections to QC are the brainstem to precentral gyrus -connection and precentral left to precentral right connection, or equivalent -in your parcellation. - -NOTE: this script can take a while to run. Please be patient. -Example: on a tractogram with 1.8M streamlines, running on a SSD: -- 15 minutes without post-processing, only saving final bundles. -- 30 minutes with full post-processing, only saving final bundles. -- 60 minutes with full post-processing, saving all possible files. - -Formerly: scil_decompose_connectivity.py - -positional arguments: - in_tractograms Tractogram filenames. Format must be one of - trk, tck, vtk, fib, dpy. - in_labels Labels file name (nifti). Labels must have 0 as background. - out_hdf5 Output hdf5 file (.h5). - -options: - -h, --help show this help message and exit - --out_labels_list OUT_FILE - Save the labels list as text file. - Needed for scil_connectivity_compute_matrices.py and others. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - --no_bbox_check Activate to ignore validity of the bounding box during loading / saving of - tractograms (ignores the presence of invalid streamlines). - --processes NBR Number of sub-processes to start. - Default: [1] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Post-processing options: - --no_pruning If set, will NOT prune on length. - Length criteria in --min_length, --max_length. - --no_remove_loops If set, will NOT remove streamlines making loops. - Angle criteria based on --loop_max_angle. - --no_remove_outliers If set, will NOT remove outliers using QB. - Criteria based on --outlier_threshold. - --no_remove_curv_dev If set, will NOT remove streamlines that deviate from the mean curvature. - Threshold based on --curv_qb_distance. - -Pruning options: - --min_length MIN_LENGTH - Pruning minimal segment length. [20.0] - --max_length MAX_LENGTH - Pruning maximal segment length. [200.0] - -Outliers and loops options: - --outlier_threshold OUTLIER_THRESHOLD - Outlier removal threshold when using hierarchical QB. [0.6] - --loop_max_angle LOOP_MAX_ANGLE - Maximal winding angle over which a streamline is considered as looping. [330.0] - --curv_qb_distance CURV_QB_DISTANCE - Clustering threshold for centroids curvature filtering with QB. [10.0] - -Saving options: - --out_dir OUT_DIR Output directory for each connection as separate file (.trk). - --save_raw_connections - If set, will save all raw cut connections in a subdirectory. - --save_intermediate If set, will save the intermediate results of filtering. - --save_discarded If set, will save discarded streamlines in subdirectories. - Includes loops, outliers and qb_loops. diff --git a/scripts/.hidden/scil_tractogram_segment_one_bundle.py.help b/scripts/.hidden/scil_tractogram_segment_one_bundle.py.help deleted file mode 100644 index 604735985..000000000 --- a/scripts/.hidden/scil_tractogram_segment_one_bundle.py.help +++ /dev/null @@ -1,62 +0,0 @@ -usage: scil_tractogram_segment_one_bundle.py [-h] - [--tractogram_clustering_thr TRACTOGRAM_CLUSTERING_THR] - [--model_clustering_thr MODEL_CLUSTERING_THR] - [--pruning_thr PRUNING_THR] - [--slr_threads SLR_THREADS] - [--seed SEED] [--inverse] - [--no_empty] - [--in_pickle IN_PICKLE | --out_pickle OUT_PICKLE] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram in_model in_transfo - out_tractogram - -Compute a simple Recobundles (single-atlas & single-parameters). -The model need to be cleaned and lightweight. -Transform should come from ANTs: (using the --inverse flag) -AntsRegistrationSyNQuick.sh -d 3 -m MODEL_REF -f SUBJ_REF - -If you are unsure about the transformation 'direction' use the verbose -option (-v) and try with and without the --inverse flag. If you are not using -the right transformation 'direction' a warning will popup. If there is no -warning in both case it means the transformation is very close to identity and -both 'direction' will work. - -Formerly: scil_recognize_single_bundles.py - -positional arguments: - in_tractogram Input tractogram filename. - in_model Model to use for recognition. - in_transfo Path for the transformation to model space (.txt, .npy or .mat). - out_tractogram Output tractogram filename. - -options: - -h, --help show this help message and exit - --tractogram_clustering_thr TRACTOGRAM_CLUSTERING_THR - Clustering threshold used for the whole brain [8mm]. - --model_clustering_thr MODEL_CLUSTERING_THR - Clustering threshold used for the model [4mm]. - --pruning_thr PRUNING_THR - MDF threshold used for final streamlines selection [6mm]. - --slr_threads SLR_THREADS - Number of threads for SLR [1]. - --seed SEED Random number generator seed [None]. - --inverse Use the inverse transformation. - --no_empty Do not write file if there is no streamline. - --in_pickle IN_PICKLE - Input pickle clusters map file. - Will override the tractogram_clustering_thr parameter. - --out_pickle OUT_PICKLE - Output pickle clusters map file. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Garyfallidis, E., Cote, M. A., Rheault, F., ... & -Descoteaux, M. (2018). Recognition of white matter -bundles using local and global streamline-based registration and -clustering. NeuroImage, 170, 283-295. diff --git a/scripts/.hidden/scil_tractogram_shuffle.py.help b/scripts/.hidden/scil_tractogram_shuffle.py.help deleted file mode 100644 index 88d645f66..000000000 --- a/scripts/.hidden/scil_tractogram_shuffle.py.help +++ /dev/null @@ -1,22 +0,0 @@ -usage: scil_tractogram_shuffle.py [-h] [--seed SEED] [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram out_tractogram - -Shuffle the ordering of streamlines. - -Formerly: scil_shuffle_streamlines.py - -positional arguments: - in_tractogram Input tractography file. - out_tractogram Output tractography file. - -options: - -h, --help show this help message and exit - --seed SEED Random number generator seed [None]. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_smooth.py.help b/scripts/.hidden/scil_tractogram_smooth.py.help deleted file mode 100644 index 06983fed4..000000000 --- a/scripts/.hidden/scil_tractogram_smooth.py.help +++ /dev/null @@ -1,51 +0,0 @@ -usage: scil_tractogram_smooth.py [-h] - (--gaussian SIGMA | --spline SIGMA NB_CTRL_POINT) - [--compress [COMPRESS_TH]] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram out_tractogram - -This script will smooth the streamlines, usually to remove the 'wiggles' in -probabilistic tracking. - -Two choices of methods are available: -- Gaussian will use the surrounding coordinates for smoothing. Streamlines are -resampled to 1mm step-size and the smoothing is performed on the coordinate -array. The sigma will be indicative of the number of points surrounding the -center points to be used for blurring. -- Spline will fit a spline curve to every streamline using a sigma and the -number of control points. The sigma represents the allowed distance from the -control points. The final streamlines are obtained by evaluating the spline at -constant intervals so that it will have the same number of points as initially. - -This script enforces endpoints to remain the same. - -WARNING: -- too low of a sigma (e.g: 1) with a lot of control points (e.g: 15) -will create crazy streamlines that could end up out of the bounding box. -- data_per_point will be lost. - -Formerly: scil_smooth_streamlines.py - -positional arguments: - in_tractogram Input tractography file. - out_tractogram Output tractography file. - -options: - -h, --help show this help message and exit - --gaussian SIGMA Sigma for smoothing. Use the value of surronding - X,Y,Z points on the streamline to blur the streamlines. - A good sigma choice would be around 5. - --spline SIGMA NB_CTRL_POINT - Sigma for smoothing. Model each streamline as a spline. - A good sigma choice would be around 5 and control point around 10. - --compress [COMPRESS_TH] - If set, compress the resulting streamline. Value is the maximum - compression distance in mm.[0.1] - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_tractogram_split.py.help b/scripts/.hidden/scil_tractogram_split.py.help deleted file mode 100644 index 3b58901c5..000000000 --- a/scripts/.hidden/scil_tractogram_split.py.help +++ /dev/null @@ -1,48 +0,0 @@ -usage: scil_tractogram_split.py [-h] [--out_dir OUT_DIR] - (--chunk_size CHUNK_SIZE | --nb_chunks NB_CHUNKS) - [--split_per_cluster | --do_not_randomize] - [--qbx_thresholds t [t ...]] [--seed SEED] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_tractogram out_prefix - -Split a tractogram into multiple files, 2 options available : -Split into X files, or split into files of Y streamlines. - -By default, streamlines to add to each chunk will be chosen randomly. -Optionally, you can split streamlines... - - sequentially (the first n/nb_chunks streamlines in the first chunk and so - on). - - randomly, but per Quickbundles clusters. - -Formerly: scil_split_tractogram.py - -positional arguments: - in_tractogram Tractogram input file name. - out_prefix Prefix for the output tractogram, index will be appended - automatically (ex, _0.trk), based on input type. - -options: - -h, --help show this help message and exit - --out_dir OUT_DIR Put all output tractogram in a specific directory. - --chunk_size CHUNK_SIZE - The maximum number of streamlines per file. - --nb_chunks NB_CHUNKS - Divide the file in equal parts. - --split_per_cluster If set, splitting will be done per cluster (computed with - Quickbundles) to ensure that at least some streamlines are - kept from each bundle in each chunk. Else, random splitting is - performed (default). - --do_not_randomize If set, splitting is done sequentially through the original - sft instead of using random indices. - --qbx_thresholds t [t ...] - If you chose option '--split_per_cluster', you may set the - QBx threshold value(s) here. Default: [40, 30, 20] - --seed SEED Use a specific random seed for the subsampling. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_viz_bingham_fit.py.help b/scripts/.hidden/scil_viz_bingham_fit.py.help deleted file mode 100644 index af1f892cf..000000000 --- a/scripts/.hidden/scil_viz_bingham_fit.py.help +++ /dev/null @@ -1,38 +0,0 @@ -usage: scil_viz_bingham_fit.py [-h] [--slice_index SLICE_INDEX] - [--win_dims WIDTH HEIGHT] - [--interactor {image,trackball}] - [--axis_name {sagittal,coronal,axial}] - [--silent] [--output OUTPUT] - [-v [{DEBUG,INFO,WARNING}]] [-f] - [--sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724}] - [--color_per_lobe] - in_bingham - -Visualize 2-dimensional Bingham volume slice loaded from disk. The volume is -assumed to be saved from scil_fodf_to_bingham.py. - -Given an image of Bingham coefficients, this script displays a slice in a -given orientation. - -positional arguments: - in_bingham Input SH image file. - -options: - -h, --help show this help message and exit - --slice_index SLICE_INDEX - Index of the slice to visualize along a given axis. Defaults to middle of volume. - --win_dims WIDTH HEIGHT - The dimensions for the vtk window. [(768, 768)] - --interactor {image,trackball} - Specify interactor mode for vtk window. [trackball] - --axis_name {sagittal,coronal,axial} - Name of the axis to visualize. [axial] - --silent Disable interactive visualization. - --output OUTPUT Path to output file. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - --sphere {repulsion100,repulsion200,repulsion724,symmetric362,symmetric642,symmetric724} - Name of the sphere used to reconstruct SF. [symmetric362] - --color_per_lobe Color each bingham distribution with a different color. [False] diff --git a/scripts/.hidden/scil_viz_bundle.py.help b/scripts/.hidden/scil_viz_bundle.py.help deleted file mode 100644 index 57d58effb..000000000 --- a/scripts/.hidden/scil_viz_bundle.py.help +++ /dev/null @@ -1,56 +0,0 @@ -usage: scil_viz_bundle.py [-h] - [--random_coloring SEED | --uniform_coloring R G B | --local_coloring | --color_dict JSON | --color_from_streamlines KEY | --color_from_points KEY] - [--shape {line,tube}] [--width WIDTH] - [--subsample SUBSAMPLE] [--downsample DOWNSAMPLE] - [--background R G B] [-v [{DEBUG,INFO,WARNING}]] - in_bundles [in_bundles ...] - -Visualize bundles. - -Example usages: - -# Visualize streamlines as tubes, each bundle with a different color ->>> scil_viz_bundle.py path_to_bundles/ --shape tube --random_coloring 1337 - -# Visualize a tractogram with each streamlines drawn as lines, colored with -# their local orientation, but only load 1 in 10 streamlines ->>> scil_viz_bundle.py tractogram.trk --shape line --subsample 10 - -# Visualize CSTs as large tubes and color them from a list of colors in a file ->>> scil_viz_bundle.py path_to_bundles/CST_* --width 0.5 - --color_dict colors.json - -positional arguments: - in_bundles List of tractography files supported by nibabel. - -options: - -h, --help show this help message and exit - --shape {line,tube} Display streamlines either as lines or tubes. - [Default: tube] - --width WIDTH Width of tubes or lines representing streamlines - [Default: 0.25] - --subsample SUBSAMPLE - Only load 1 in N streamlines. - [Default: 1] - --downsample DOWNSAMPLE - Downsample streamlines to N points. - [Default: None] - --background R G B RBG values [0, 255] of the color of the background. - [Default: [0, 0, 0]] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -Colouring options: - --random_coloring SEED - Assign a random color to bundles. - --uniform_coloring R G B - Assign a uniform color to streamlines. - --local_coloring Assign coloring to streamlines depending on their local orientations. - --color_dict JSON JSON file containing colors for each bundle. - Bundle filenames are indicated as keys and colors as values. - A 'default' key and value can be included. - --color_from_streamlines KEY - Extract a color per streamline from the data_per_streamline property of the tractogram at the specified key. - --color_from_points KEY - Extract a color per point from the data_per_point property of the tractogram at the specified key. diff --git a/scripts/.hidden/scil_viz_bundle_screenshot_mni.py.help b/scripts/.hidden/scil_viz_bundle_screenshot_mni.py.help deleted file mode 100644 index 2eb32fe86..000000000 --- a/scripts/.hidden/scil_viz_bundle_screenshot_mni.py.help +++ /dev/null @@ -1,48 +0,0 @@ -usage: scil_viz_bundle_screenshot_mni.py [-h] - [--target_template TARGET_TEMPLATE] - [--local_coloring | --uniform_coloring R G B | --reference_coloring COLORBAR] - [--roi ROI [ROI ...]] [--right] - [--anat_opacity ANAT_OPACITY] - [--output_suffix OUTPUT_SUFFIX] - [--out_dir OUT_DIR] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_bundle in_anat - -Register bundle to a template for screenshots using a reference. -The template can be any MNI152 (any resolution, cropped or not) -If your in_anat has a skull, select a MNI152 template with a skull and -vice-versa. - -If the bundle is already in MNI152 space, do not use --target_template. - -Axial, coronal and sagittal slices are captured. -Sagittal can be capture from the left (default) or the right. - -For the --roi argument: If 1 value is provided, the ROI will be white, -if 4 values are provided, the ROI will be colored with the RGB values -provided, if 5 values are provided, it is RGBA (values from 0-255). - -positional arguments: - in_bundle Path of the input bundle. - in_anat Path of the reference file (.nii or nii.gz). - -options: - -h, --help show this help message and exit - --target_template TARGET_TEMPLATE - Path to the target MNI152 template for registration. If in_anat has a skull, select a MNI152 template with a skull and vice-versa. - --local_coloring Color streamlines using local segments orientation. - --uniform_coloring R G B - Color streamlines with uniform coloring. - --reference_coloring COLORBAR - Color streamlines with reference coloring (0-255). - --roi ROI [ROI ...] Path to a ROI file (.nii or nii.gz). - --right Take screenshot from the right instead of the left for the sagittal plane. - --anat_opacity ANAT_OPACITY - Set the opacity for the anatomy, use 0 for complete transparency, 1 for opaque. [0.3] - --output_suffix OUTPUT_SUFFIX - Add a suffix to the output, else the axis name is used. - --out_dir OUT_DIR Put all images in a specific directory. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_viz_bundle_screenshot_mosaic.py.help b/scripts/.hidden/scil_viz_bundle_screenshot_mosaic.py.help deleted file mode 100644 index 97c2c78ce..000000000 --- a/scripts/.hidden/scil_viz_bundle_screenshot_mosaic.py.help +++ /dev/null @@ -1,49 +0,0 @@ -usage: scil_viz_bundle_screenshot_mosaic.py [-h] [--uniform_coloring R G B] - [--random_coloring SEED] - [--zoom ZOOM] [--ttf TTF] - [--ttf_size TTF_SIZE] - [--opacity_background OPACITY_BACKGROUND] - [--resolution_of_thumbnails RESOLUTION_OF_THUMBNAILS] - [--light_screenshot] - [--no_information] - [--no_bundle_name] - [--no_streamline_number] - [--reference REFERENCE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_volume in_bundles - [in_bundles ...] out_image - -Visualize bundles from a list. The script will output a mosaic (image) with -screenshots, 6 views per bundle in the list. - -positional arguments: - in_volume Volume used as background (e.g. T1, FA, b0). - in_bundles List of tractography files supported by nibabel or binary mask files. - out_image Name of the output image mosaic (e.g. mosaic.jpg, mosaic.png). - -options: - -h, --help show this help message and exit - --uniform_coloring R G B - Assign an uniform color to streamlines (or ROIs). - --random_coloring SEED - Assign a random color to streamlines (or ROIs). - --zoom ZOOM Rendering zoom. A value greater than 1 is a zoom-in, - a value less than 1 is a zoom-out [1.0]. - --ttf TTF Path of the true type font to use for legends. - --ttf_size TTF_SIZE Font size (int) to use for the legends [35]. - --opacity_background OPACITY_BACKGROUND - Opacity of background image, between 0 and 1.0 [0.4]. - --resolution_of_thumbnails RESOLUTION_OF_THUMBNAILS - Resolution of thumbnails used in mosaic [300]. - --light_screenshot Keep only 3 views instead of 6 [False]. - --no_information Don't display axis and bundle information [False]. - --no_bundle_name Don't display bundle name [False]. - --no_streamline_number - Don't display bundle streamlines number [False]. - --reference REFERENCE - Reference anatomy for tck/vtk/fib/dpy file - support (.nii or .nii.gz). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_viz_connectivity.py.help b/scripts/.hidden/scil_viz_connectivity.py.help deleted file mode 100644 index e69de29bb..000000000 diff --git a/scripts/.hidden/scil_viz_dti_screenshot.py.help b/scripts/.hidden/scil_viz_dti_screenshot.py.help deleted file mode 100644 index c5f045dc1..000000000 --- a/scripts/.hidden/scil_viz_dti_screenshot.py.help +++ /dev/null @@ -1,30 +0,0 @@ -usage: scil_viz_dti_screenshot.py [-h] [--shells SHELLS [SHELLS ...]] - [--out_suffix OUT_SUFFIX] - [--out_dir OUT_DIR] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_dwi in_bval in_bvec in_template - -Register DWI to a template for screenshots. -The templates are on http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009 - -For quick quality control, the MNI template can be downsampled to 2mm iso. -Axial, coronal and sagittal slices are captured. - -positional arguments: - in_dwi Path of the input diffusion volume. - in_bval Path of the bval file, in FSL format. - in_bvec Path of the bvec file, in FSL format. - in_template Path to the target MNI152 template for - registration, use the one provided online. - -options: - -h, --help show this help message and exit - --shells SHELLS [SHELLS ...] - Shells to use for DTI fit (usually below 1200), b0 must be listed. - --out_suffix OUT_SUFFIX - Add a suffix to the output, else the axis name is used. - --out_dir OUT_DIR Put all images in a specific directory. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_viz_fodf.py.help b/scripts/.hidden/scil_viz_fodf.py.help deleted file mode 100644 index d79004688..000000000 --- a/scripts/.hidden/scil_viz_fodf.py.help +++ /dev/null @@ -1,119 +0,0 @@ -usage: scil_viz_fodf.py [-h] [--slice_index SLICE_INDEX] - [--win_dims WIDTH HEIGHT] - [--interactor {image,trackball}] - [--axis_name {sagittal,coronal,axial}] [--silent] - [--in_transparency_mask IN_TRANSPARENCY_MASK] - [--output OUTPUT] [-f] - [--sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy}] - [--sphere {repulsion200,repulsion100,symmetric724,symmetric642,symmetric362,repulsion724}] - [--sph_subdivide SPH_SUBDIVIDE] [--mask MASK] - [--colormap COLORMAP | --color_rgb COLOR_RGB COLOR_RGB COLOR_RGB] - [--scale SCALE] [--radial_scale_off] [--norm_off] - [-v [{DEBUG,INFO,WARNING}]] [--background BACKGROUND] - [--bg_range MIN MAX] [--bg_opacity BG_OPACITY] - [--bg_offset BG_OFFSET] - [--bg_interpolation {nearest,linear}] - [--bg_color BG_COLOR BG_COLOR BG_COLOR] - [--peaks PEAKS] - [--peaks_color PEAKS_COLOR PEAKS_COLOR PEAKS_COLOR] - [--peaks_width PEAKS_WIDTH] - [--peaks_values PEAKS_VALUES | --peaks_length PEAKS_LENGTH] - [--variance VARIANCE] [--variance_k VARIANCE_K] - [--var_color VAR_COLOR VAR_COLOR VAR_COLOR] - in_fodf - -Visualize 2-dimensional fODF slice loaded from disk. - -Given an image of SH coefficients, this script displays a slice in a -given orientation. The user can also add a background on top of which the -fODF are to be displayed. Using a full SH basis, the script can be used to -visualize asymmetric fODF. The user can supply a peaks image to visualize -peaks on top of fODF. - -If a transparency_mask is given (e.g. a brain mask), all values outside the -mask non-zero values are set to full transparency in the saved scene. - -!!! CAUTION !!! The script is memory intensive about (9kB of allocated RAM per -voxel, or 9GB for a 1M voxel volume) with a sphere interpolated to 362 points. - -positional arguments: - in_fodf Input SH image file. - -options: - -h, --help show this help message and exit - --slice_index SLICE_INDEX - Index of the slice to visualize along a given axis. Defaults to middle of volume. - --win_dims WIDTH HEIGHT - The dimensions for the vtk window. [(768, 768)] - --interactor {image,trackball} - Specify interactor mode for vtk window. [trackball] - --axis_name {sagittal,coronal,axial} - Name of the axis to visualize. [axial] - --silent Disable interactive visualization. - --in_transparency_mask IN_TRANSPARENCY_MASK - Input mask image file. - --output OUTPUT Path to output file. - -f Force overwriting of the output files. - --sh_basis {descoteaux07,tournier07,descoteaux07_legacy,tournier07_legacy} - Spherical harmonics basis used for the SH coefficients. - Must be either descoteaux07', 'tournier07', - 'descoteaux07_legacy' or 'tournier07_legacy' [['descoteaux07_legacy']]: - 'descoteaux07' : SH basis from the Descoteaux et al. - MRM 2007 paper - 'tournier07' : SH basis from the new Tournier et al. - NeuroImage 2019 paper, as in MRtrix 3. - 'descoteaux07_legacy': SH basis from the legacy Dipy implementation - of the Descoteaux et al. MRM 2007 paper - 'tournier07_legacy' : SH basis from the legacy Tournier et al. - NeuroImage 2007 paper. - --sphere {repulsion200,repulsion100,symmetric724,symmetric642,symmetric362,repulsion724} - Name of the sphere used to reconstruct SF. [symmetric362] - --sph_subdivide SPH_SUBDIVIDE - Number of subdivisions for given sphere. If not supplied, use the given sphere as is. - --mask MASK Optional mask file. Only fODF inside the mask are displayed. - --colormap COLORMAP Colormap for the ODF slicer. If None, then a RGB colormap will be used. [None] - --color_rgb COLOR_RGB COLOR_RGB COLOR_RGB - Uniform color for the ODF slicer given as RGB, scaled between 0 and 1. [None] - --scale SCALE Scaling factor for FODF. [0.5] - --radial_scale_off Disable radial scale for ODF slicer. - --norm_off Disable normalization of ODF slicer. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -Background arguments: - --background BACKGROUND - Background image file. If RGB, values must be between 0 and 255. - --bg_range MIN MAX The range of values mapped to range [0, 1] for background image. [(bg.min(), bg.max())] - --bg_opacity BG_OPACITY - The opacity of the background image. Opacity of 0.0 means transparent and 1.0 is completely visible. [1.0] - --bg_offset BG_OFFSET - The offset of the background image. [0.5] - --bg_interpolation {nearest,linear} - Interpolation mode for the background image. [nearest] - --bg_color BG_COLOR BG_COLOR BG_COLOR - The color of the overall background, behind everything. Must be RGB values scaled between 0 and 1. [(0, 0, 0)] - -Peaks arguments: - --peaks PEAKS Peaks image file. - --peaks_color PEAKS_COLOR PEAKS_COLOR PEAKS_COLOR - Color used for peaks, as RGB values scaled between 0 and 1. If None, then a RGB colormap is used. [None] - --peaks_width PEAKS_WIDTH - Width of peaks segments. [1.0] - -Peaks scaling arguments: - Choose between peaks values and arbitrary length. - - --peaks_values PEAKS_VALUES - Peaks values file. - --peaks_length PEAKS_LENGTH - Length of the peaks segments. [0.65] - -Variance arguments: - For the visualization of fodf uncertainty, the variance is used as follow: mean + k * sqrt(variance), where mean is the input fodf (in_fodf) and k is the scaling factor (variance_k). - - --variance VARIANCE FODF variance file. - --variance_k VARIANCE_K - Scaling factor (k) for the computation of the fodf uncertainty. [1] - --var_color VAR_COLOR VAR_COLOR VAR_COLOR - Color of variance outline. Must be RGB values scaled between 0 and 1. [(1, 1, 1)] diff --git a/scripts/.hidden/scil_viz_gradients_screenshot.py.help b/scripts/.hidden/scil_viz_gradients_screenshot.py.help deleted file mode 100644 index 074c16285..000000000 --- a/scripts/.hidden/scil_viz_gradients_screenshot.py.help +++ /dev/null @@ -1,38 +0,0 @@ -usage: scil_viz_gradients_screenshot.py [-h] - (--in_gradient_scheme IN_GRADIENT_SCHEME [IN_GRADIENT_SCHEME ...] | --dipy_sphere {symmetric362,symmetric642,symmetric724,repulsion724,repulsion100,repulsion200}) - [--dis-sym] - [--out_basename OUT_BASENAME] - [--res RES] [--dis-sphere] - [--dis-proj] [--plot_shells] - [--same-color] [--opacity OPACITY] - [-v [{DEBUG,INFO,WARNING}]] [-f] - -Vizualisation for directions on a sphere, either from a gradient sampling (i.e. -a list of b-vectors) or from a Dipy sphere. - -options: - -h, --help show this help message and exit - --in_gradient_scheme IN_GRADIENT_SCHEME [IN_GRADIENT_SCHEME ...] - Gradient sampling filename. (only accepts .bvec and - .bval together or only .b). - --dipy_sphere {symmetric362,symmetric642,symmetric724,repulsion724,repulsion100,repulsion200} - Dipy sphere choice. - --dis-sym Disable antipodal symmetry. - --out_basename OUT_BASENAME - Output file name picture without extension (will be - png file(s)). - --res RES Resolution of the output picture(s). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided - level. Default level is warning, default when using -v - is info. - -f Force overwriting of the output files. - -Enable/Disable renderings.: - --dis-sphere Disable the rendering of the sphere. - --dis-proj Disable rendering of the projection supershell. - --plot_shells Enable rendering each shell individually. - -Rendering options.: - --same-color Use same color for all shell. - --opacity OPACITY Opacity for the shells. diff --git a/scripts/.hidden/scil_viz_tractogram_seeds.py.help b/scripts/.hidden/scil_viz_tractogram_seeds.py.help deleted file mode 100644 index 7b8e9c702..000000000 --- a/scripts/.hidden/scil_viz_tractogram_seeds.py.help +++ /dev/null @@ -1,21 +0,0 @@ -usage: scil_viz_tractogram_seeds.py [-h] [--save SAVE] - [-v [{DEBUG,INFO,WARNING}]] [-f] - tractogram - -Visualize seeds used to generate the tractogram or bundle. -When tractography was run, each streamline produced by the tracking algorithm -saved its seeding point (its origin). - -The tractogram must have been generated from scil_tracking_local.py or -scil_tracking_pft.py with the --save_seeds option. - -positional arguments: - tractogram Tractogram file (must be trk) - -options: - -h, --help show this help message and exit - --save SAVE If set, save a screenshot of the result in the specified filename - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_viz_tractogram_seeds_3d.py.help b/scripts/.hidden/scil_viz_tractogram_seeds_3d.py.help deleted file mode 100644 index 352dbfcb7..000000000 --- a/scripts/.hidden/scil_viz_tractogram_seeds_3d.py.help +++ /dev/null @@ -1,46 +0,0 @@ -usage: scil_viz_tractogram_seeds_3d.py [-h] [--tractogram TRACTOGRAM] - [--colormap COLORMAP] - [--seed_opacity SEED_OPACITY] - [--tractogram_shape {line,tube}] - [--tractogram_opacity TRACTOGRAM_OPACITY] - [--tractogram_width TRACTOGRAM_WIDTH] - [--tractogram_color R G B [R G B ...]] - [--background R G B [R G B ...]] - [-v [{DEBUG,INFO,WARNING}]] - in_seed_map - -Visualize seeds as 3D points, with heatmaps corresponding to seed density - -Example usages: - ->>> scil_viz_tractogram_seeds_3d.py seeds.nii.gz --tractogram tractogram.trk - -positional arguments: - in_seed_map Seed density map. - -options: - -h, --help show this help message and exit - --tractogram TRACTOGRAM - Tractogram coresponding to the seeds. - --colormap COLORMAP Name of the map for the density coloring. Can be any colormap that matplotlib offers. - [Default: bone] - --seed_opacity SEED_OPACITY - Opacity of the contour generated. - [Default: 0.5] - --tractogram_shape {line,tube} - Display streamlines either as lines or tubes. - [Default: tube] - --tractogram_opacity TRACTOGRAM_OPACITY - Opacity of the streamlines. - [Default: 0.5] - --tractogram_width TRACTOGRAM_WIDTH - Width of tubes or lines representing streamlines. - [Default: 0.05] - --tractogram_color R G B [R G B ...] - Color for the tractogram. - --background R G B [R G B ...] - RBG values [0, 255] of the color of the background. - [Default: [0, 0, 0]] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. diff --git a/scripts/.hidden/scil_viz_volume_histogram.py.help b/scripts/.hidden/scil_viz_volume_histogram.py.help deleted file mode 100644 index 7ca783940..000000000 --- a/scripts/.hidden/scil_viz_volume_histogram.py.help +++ /dev/null @@ -1,30 +0,0 @@ -usage: scil_viz_volume_histogram.py [-h] [--title TITLE] [--x_label X_LABEL] - [--colors COLORS] [--show_only] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_metric in_mask n_bins out_png - -Script to display a histogram of a metric (FA, MD, etc.) from a binary mask -(wm mask, vascular mask, ect.). -These two images must be coregister with each other. - ->>> scil_viz_volume_histogram.py metric.nii.gz mask_bin.nii.gz 8 - out_filename_image.png - -positional arguments: - in_metric Metric map ex : FA, MD,... . - in_mask Binary mask data to extract value. - n_bins Number of bins to use for the histogram. - out_png Output filename for the figure. - -options: - -h, --help show this help message and exit - --show_only Do not save the figure, only display. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Histogram options: - --title TITLE Use the provided info for the histogram title. [Histogram] - --x_label X_LABEL Use the provided info for the x axis name. - --colors COLORS Use the provided info for the bars color. [#0504aa] diff --git a/scripts/.hidden/scil_viz_volume_scatterplot.py.help b/scripts/.hidden/scil_viz_volume_scatterplot.py.help deleted file mode 100644 index 0cd1eb402..000000000 --- a/scripts/.hidden/scil_viz_volume_scatterplot.py.help +++ /dev/null @@ -1,94 +0,0 @@ -usage: scil_viz_volume_scatterplot.py [-h] [--out_dir OUT_DIR] [--thr THR] - [--not_exclude_zero] - [--in_bin_mask IN_BIN_MASK | --in_prob_maps IN_PROB_MAPS IN_PROB_MAPS | --in_atlas IN_ATLAS] - [--atlas_lut ATLAS_LUT] - [--specific_label SPECIFIC_LABEL [SPECIFIC_LABEL ...]] - [--in_folder] [--title TITLE] - [--x_label X_LABEL] [--y_label Y_LABEL] - [--label LABEL] - [--label_prob LABEL_PROB] - [--marker MARKER] - [--marker_size MARKER_SIZE] - [--transparency TRANSPARENCY] - [--dpi DPI] [--colors color1 color2] - [--show_only] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_x_map in_y_map out_name - -Script to display scatter plot between two maps (ex. FA and MD, ihMT and MT). -By default, no mask is applied to the data. -Different options are available to mask or threshold data: - - a binary mask - - two probability maps, which can be used to threshold maps with - --in_prob_maps. A same threshold is applied on these two maps (--thr). - - parcellation, which can be used to plot values for each region of - an atlas (--in_atlas) or a subset of regions (--specific_label). - Atlas option required a json file (--atlas_lut) with indices and - names of each label corresponding to the atlas as following: - "1": "lh_A8m", - "2": "rh_A8m", - The numbers must be corresponding to the label indices in the json file. - -Be careful, you can not use all of them at the same time. - -For general scatter plot without mask: ->>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png - -For scatter plot with mask: ->>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png - --in_bin_mask mask_wm.nii.gz - -For tissue probability scatter plot: ->>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png - --prob_maps wm_map.nii.gz gm_map.nii.gz - -For scatter plot using atlas: ->>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png - --in_atlas atlas.nii.gz --atlas_lut atlas.json - ->>> scil_viz_volume_scatterplot.py FA.nii.gz MD.nii.gz out_filename_image.png - --in_atlas atlas.nii.gz --atlas_lut atlas.json - --specific_label 34 67 87 - -positional arguments: - in_x_map Map in x axis, FA for example. - in_y_map Map in y axis, MD for example. - out_name Output filename for the figure without extension. - -options: - -h, --help show this help message and exit - --out_dir OUT_DIR Output directory to save scatter plot. - --thr THR Use to apply threshold only on probability maps (same for both map) with --in_prob_maps option. [0.9] - --not_exclude_zero Keep zero value in data. - --in_bin_mask IN_BIN_MASK - Binary mask. Use this option to extract x and y maps value from specific mask or region: wm_mask or roi_mask for example. - --in_prob_maps IN_PROB_MAPS IN_PROB_MAPS - Probability maps, WM and GW for example. - --in_atlas IN_ATLAS Path to the input atlas image. - --show_only Do not save the figure, only display. Not avalaible with --in_atlas option. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Atlas options: - --atlas_lut ATLAS_LUT - Path of the LUT file corresponding to atlas used to name the regions of interest. - --specific_label SPECIFIC_LABEL [SPECIFIC_LABEL ...] - Label list to use to do scatter plot. Label must corresponding to atlas LUT file. [None] - --in_folder Save label plots in subfolder "Label_plots". - -Scatter plot options: - --title TITLE Use the provided info for the title name. [Scatter Plot] - --x_label X_LABEL Use the provided info for the x axis name. [x] - --y_label Y_LABEL Use the provided info for the y axis name. [y] - --label LABEL Use the provided info for the legend box corresponding to mask or first probability map. [None] - --label_prob LABEL_PROB - Use the provided info for the legend box corresponding to the second probability map. [Threshold prob_map 2] - --marker MARKER Use the provided info for the marker shape. [.] - --marker_size MARKER_SIZE - Use the provided info for the marker size. [15] - --transparency TRANSPARENCY - Use the provided info for the point transparency. [0.4] - --dpi DPI Use the provided info for the dpi resolution. [300] - --colors color1 color2 diff --git a/scripts/.hidden/scil_viz_volume_screenshot.py.help b/scripts/.hidden/scil_viz_volume_screenshot.py.help deleted file mode 100644 index e59e04640..000000000 --- a/scripts/.hidden/scil_viz_volume_screenshot.py.help +++ /dev/null @@ -1,118 +0,0 @@ -usage: scil_viz_volume_screenshot.py [-h] - [--volume_cmap_name VOLUME_CMAP_NAME] - [--volume_opacity VOLUME_OPACITY] - [--transparency TRANSPARENCY] - [--slices SID [SID ...]] - [--axis {sagittal,coronal,axial}] - [--size WIDTH HEIGHT] - [--display_slice_number] [--display_lr] - [--labelmap LABELMAP] - [--labelmap_cmap_name LABELMAP_CMAP_NAME] - [--labelmap_opacity LABELMAP_OPACITY] - [--overlays OVERLAYS [OVERLAYS ...]] - [--overlays_as_contours] - [--overlays_colors R G B [R G B ...]] - [--overlays_opacity OVERLAYS_OPACITY] - [--peaks PEAKS [PEAKS ...]] - [--peaks_width PEAKS_WIDTH] - [--peaks_opacity PEAKS_OPACITY] - [-v [{DEBUG,INFO,WARNING}]] - volume out_fname - -Take screenshot(s) of one or more slices in a given image volume along the -requested axis. If slice indices are not provided, all slices in the volume -are used. The name of the output images are suffixed with _slice_{id}, with -id being the slice number in the volume. If a labelmap image is provided (e.g. -a tissue segmentation map), it is overlaid on the volume slices. Same goes if -a mask is provided, with the difference that it can be rendered as a -transparency overlay as well as a contour. - -A labelmap image can be provided as the image volume, without requiring it as -the optional argument if only the former needs to be plot. - -Example: ->>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_annotated.png - --display_slice_number --display_lr - ->>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_masked.png - --transparency brainmask.nii.gz - ->>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial.png - --slices 30 40 50 60 70 80 90 100 - ->>> scil_viz_volume_screenshot.py t1.nii.gz t1_sagittal.png --axis sagittal - ->>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_plasma_cmap.png - --slices 30 40 50 60 70 80 90 100 --volume_cmap_name plasma - ->>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_overlay.png - --slices 30 40 50 60 70 80 90 100 --overlays brain_mask.nii.gz - ->>> scil_viz_volume_screenshot.py t1.nii.gz t1_mask_contour.png - --slices 30 40 50 60 70 80 90 100 - --overlays brain_mask.nii.gz --overlays_as_contours - ->>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_map.png - --slices 30 40 50 60 70 80 90 100 --labelmap tissue_map.nii.gz - ->>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_tissue_viridis_cmap.png - --slices 30 40 50 60 70 80 90 100 - --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis - ->>> scil_viz_volume_screenshot.py t1.nii.gz t1_axial_peaks.png - --slices 30 40 50 60 70 80 90 100 --peaks peaks.nii.gz --volume_opacity 0.5 - -positional arguments: - volume Input 3D Nifti file (.nii/.nii.gz). - out_fname Name of the output image(s). If multiple slices are provided (or none), their index will be append to the name (e.g. volume.jpg, volume.png becomes volume_slice_0.jpg, volume_slice_0.png). - -options: - -h, --help show this help message and exit - --transparency TRANSPARENCY - Transparency Nifti image (.nii/.nii.gz). Can either be a binary mask or a scalar image in the range [0, 1]. - --size WIDTH HEIGHT Size of the output image. [(768, 768)] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -Slicing: - --slices SID [SID ...] - Slice indices to screenshot. If None are supplied, all slices inside the transparency mask are selected. - --axis {sagittal,coronal,axial} - Name of the axis to visualize. [axial] - -Input overlays: - --labelmap LABELMAP Input labelmap file (.nii/.nii.gz). - --overlays OVERLAYS [OVERLAYS ...] - 3D Nifti image(s) to overlay (.nii/.nii.gz). - --peaks PEAKS [PEAKS ...] - Peaks Nifti image (.nii/.nii.gz). - -Volume rendering: - --volume_cmap_name VOLUME_CMAP_NAME - Colormap name for the 3D Nifti image data. [None] - --volume_opacity VOLUME_OPACITY - Opacity value for the 3D Nifti image data. [1.0] - --labelmap_cmap_name LABELMAP_CMAP_NAME - Colormap name for the labelmap image data. [viridis] - --labelmap_opacity LABELMAP_OPACITY - Opacity value for the labelmap image data. [0.5] - -Peaks rendering: - --peaks_width PEAKS_WIDTH - Width of the peaks lines. [3.0] - --peaks_opacity PEAKS_OPACITY - Opacity value for the peaks overlay. [1.0] - -Overlay rendering: - --overlays_as_contours - Display overlays contours and reduce the opacity of their inner region (see the `--overlays_opacity` argument). - --overlays_colors R G B [R G B ...] - Colors for the overlays or contours. You may provide a single color, for all overlays/contours, or one color for each. Each color is given as three values: R G B - --overlays_opacity OVERLAYS_OPACITY - Opacity value for the masks overlays. When combined with `--overlays_as_contours`, this will be the opacity of the region inside the computed contours. [0.5] - -Annotations: - --display_slice_number - If true, displays the slice number in the upper left corner. - --display_lr If true, add left and right annotations to the images. diff --git a/scripts/.hidden/scil_viz_volume_screenshot_mosaic.py.help b/scripts/.hidden/scil_viz_volume_screenshot_mosaic.py.help deleted file mode 100644 index a780553e0..000000000 --- a/scripts/.hidden/scil_viz_volume_screenshot_mosaic.py.help +++ /dev/null @@ -1,96 +0,0 @@ -usage: scil_viz_volume_screenshot_mosaic.py [-h] - [--volume_cmap_name VOLUME_CMAP_NAME] - [--volume_opacity VOLUME_OPACITY] - [--axis {sagittal,coronal,axial}] - [--size WIDTH HEIGHT] - [--labelmap LABELMAP] - [--labelmap_cmap_name LABELMAP_CMAP_NAME] - [--labelmap_opacity LABELMAP_OPACITY] - [--overlays OVERLAYS [OVERLAYS ...]] - [--overlays_as_contours] - [--overlays_colors R G B [R G B ...]] - [--overlays_opacity OVERLAYS_OPACITY] - [--overlap rWIDTH rHEIGHT] - [-v [{DEBUG,INFO,WARNING}]] [-f] - rows cols volume transparency - out_fname SID [SID ...] - -Compose a mosaic of screenshots of the given image volume slices along the -requested axis. The provided transparency mask (e.g. a brain mask volume) is -used to set the screenshot values outside the mask non-zero values to full -transparency. Additionally, if a labelmap image is provided (e.g. a tissue -segmentation map), it is overlaid on the volume slices. Also, a series of -masks can be provided and will be used to generate contours overlaid on each -volume slice. - -A labelmap image can be provided as the image volume, without requiring it as -the optional argument if only the former needs to be plot. - -The screenshots are overlapped according to the given factors. - -The mosaic supports either horizontal, vertical or matrix arrangements. - -Example: ->>> scil_viz_volume_screenshot_mosaic.py 1 8 t1.nii.gz brain_mask.nii.gz - mosaic_overlap_t1_axial.png 30 40 50 60 70 80 90 100 - ->>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz - mosaic_overlap_t1_axial_plasma_cmap.png 30 40 50 60 70 80 90 100 - --overlap_factor 0.6 0.5 --volume_cmap_name plasma - ->>> scil_viz_volume_screenshot_mosaic.py 2 4 tissues.nii.gz brain_mask.nii.gz - mosaic_overlap_tissues_axial_plasma_cmap.png 30 40 50 60 70 80 90 100 - --volume_cmap_name plasma - ->>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz - mosaic_overlap_t1_sagittal_tissue_viridis_cmap.png - 30 40 50 60 70 80 90 100 --axis sagittal - --labelmap tissue_map.nii.gz --labelmap_cmap_name viridis - ->>> scil_viz_volume_screenshot_mosaic.py 2 4 t1.nii.gz brain_mask.nii.gz - mosaic_overlap_t1_sagittal_tissue_contours.png - 30 40 50 60 70 80 90 100 --axis sagittal - --overlays wm_mask.nii.gz gm_mask.nii.gz csf_mask.nii.gz - -positional arguments: - rows The mosaic row count. - cols The mosaic column count. - volume Input 3D Nifti file (.nii/.nii.gz). - transparency Transparency Nifti image (.nii/.nii.gz). Can either be a binary mask or a scalar image in the range [0, 1]. - out_fname Name of the output image (e.g. img.jpg, img.png). - SID Slice indices to screenshot. - -options: - -h, --help show this help message and exit - --axis {sagittal,coronal,axial} - Name of the axis to visualize. [axial] - --size WIDTH HEIGHT Size of the output image. [(768, 768)] - --overlap rWIDTH rHEIGHT - The overlap factor as a ratio of each image dimension. [(0.6, 0.0)] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Input overlays: - --labelmap LABELMAP Input labelmap file (.nii/.nii.gz). - --overlays OVERLAYS [OVERLAYS ...] - 3D Nifti image(s) to overlay (.nii/.nii.gz). - -Volume rendering: - --volume_cmap_name VOLUME_CMAP_NAME - Colormap name for the 3D Nifti image data. [None] - --volume_opacity VOLUME_OPACITY - Opacity value for the 3D Nifti image data. [1.0] - --labelmap_cmap_name LABELMAP_CMAP_NAME - Colormap name for the labelmap image data. [viridis] - --labelmap_opacity LABELMAP_OPACITY - Opacity value for the labelmap image data. [0.5] - -Overlay rendering: - --overlays_as_contours - Display overlays contours and reduce the opacity of their inner region (see the `--overlays_opacity` argument). - --overlays_colors R G B [R G B ...] - Colors for the overlays or contours. You may provide a single color, for all overlays/contours, or one color for each. Each color is given as three values: R G B - --overlays_opacity OVERLAYS_OPACITY - Opacity value for the masks overlays. When combined with `--overlays_as_contours`, this will be the opacity of the region inside the computed contours. [0.5] diff --git a/scripts/.hidden/scil_volume_apply_transform.py.help b/scripts/.hidden/scil_volume_apply_transform.py.help deleted file mode 100644 index 753ce1883..000000000 --- a/scripts/.hidden/scil_volume_apply_transform.py.help +++ /dev/null @@ -1,27 +0,0 @@ -usage: scil_volume_apply_transform.py [-h] [--inverse] [--keep_dtype] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_file in_target_file in_transfo - out_name - -Transform Nifti (.nii.gz) using an affine/rigid transformation. - -For more information on how to use the registration script, follow this link: -https://scilpy.readthedocs.io/en/latest/documentation/tractogram_registration.html - -Formerly: scil_apply_transform_to_image.py. - -positional arguments: - in_file Path of the file to be transformed (nii or nii.gz) - in_target_file Path of the reference target file (.nii.gz). - in_transfo Path of the file containing the 4x4 - transformation, matrix (.txt, .npy or .mat). - out_name Output filename of the transformed data. - -options: - -h, --help show this help message and exit - --inverse Apply the inverse transformation. - --keep_dtype If True, keeps the data_type of the input image (in_file) when saving the output image (out_name). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_volume_b0_synthesis.py.help b/scripts/.hidden/scil_volume_b0_synthesis.py.help deleted file mode 100644 index 0510f7432..000000000 --- a/scripts/.hidden/scil_volume_b0_synthesis.py.help +++ /dev/null @@ -1,34 +0,0 @@ -usage: scil_volume_b0_synthesis.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] - in_b0 in_b0_mask in_t1 in_t1_mask out_b0 - -Wrapper for SyNb0 available in Dipy, to run it on a single subject. -Requires Skull-Strip b0 and t1w images as input, the script will normalize the -t1w's WM to 110, co-register both images, then register it to the appropriate -template, run SyNb0 and then transform the result back to the original space. - -SyNb0 is a deep learning model that predicts a synthetic a distortion-free -b0 image from a distorted b0 and T1w. - -This script must be used carefully, as it is meant to be used in an -environment with the following dependencies already installed (not installed by -default in Scilpy): -- tensorflow-addons -- tensorrt -- tensorflow - -positional arguments: - in_b0 Input b0 image. - in_b0_mask Input b0 mask. - in_t1 Input t1w image. - in_t1_mask Input t1w mask. - out_b0 Output b0 image without distortion. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -[1] Schilling, Kurt G., et al. "Synthesized b0 for diffusion distortion - correction (Synb0-DisCo)." Magnetic resonance imaging 64 (2019): 62-70. diff --git a/scripts/.hidden/scil_volume_count_non_zero_voxels.py.help b/scripts/.hidden/scil_volume_count_non_zero_voxels.py.help deleted file mode 100644 index df2606d66..000000000 --- a/scripts/.hidden/scil_volume_count_non_zero_voxels.py.help +++ /dev/null @@ -1,31 +0,0 @@ -usage: scil_volume_count_non_zero_voxels.py [-h] [--out OUT_FILE] [--stats] - [--id VALUE_ID] - [-v [{DEBUG,INFO,WARNING}]] [-f] - IN_FILE - -Count the number of non-zero voxels in an image file. - -If you give it an image with more than 3 dimensions, it will summarize the 4th -(or more) dimension to one voxel, and then find non-zero voxels over this. -This means that if there is at least one non-zero voxel in the 4th dimension, -this voxel of the 3D volume will be considered as non-zero. - -Formerly: scil_count_non_zero_voxels.py - -positional arguments: - IN_FILE Input file name, in nifti format. - -options: - -h, --help show this help message and exit - --out OUT_FILE Name of the output file, which will be saved as a text file. - --stats If set, output the value using a stats format. Using this synthax will append - a line to the output file, instead of creating a file with only one line. - This is useful to create a file to be used as the source of data for a graph. - Can be combined with --id - --id VALUE_ID Id of the current count. If used, the value of this argument will be - output (followed by a ":") before the count value. - Mostly useful with --stats. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_volume_crop.py.help b/scripts/.hidden/scil_volume_crop.py.help deleted file mode 100644 index 71c5789c5..000000000 --- a/scripts/.hidden/scil_volume_crop.py.help +++ /dev/null @@ -1,30 +0,0 @@ -usage: scil_volume_crop.py [-h] [--ignore_voxel_size] - [-v [{DEBUG,INFO,WARNING}]] [-f] - [--input_bbox INPUT_BBOX | --output_bbox OUTPUT_BBOX] - in_image out_image - -Crop a volume using a given or an automatically computed bounding box. If a -previously computed bounding box file is given, the cropping will be applied -and the affine fixed accordingly. - -Warning: This works well on masked images (like with FSL-Bet) volumes since -it's looking for non-zero data. Therefore, you should validate the results on -other types of images that haven't been masked. - -Formerly: scil_crop_volume.py - -positional arguments: - in_image Path of the nifti file to crop. - out_image Path of the cropped nifti file to write. - -options: - -h, --help show this help message and exit - --ignore_voxel_size Ignore voxel size compatibility test between input bounding box and data. Warning, use only if you know what you are doing. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - --input_bbox INPUT_BBOX - Path of the pickle file from which to take the bounding box to crop input file. - --output_bbox OUTPUT_BBOX - Path of the pickle file where to write the computed bounding box. (.pickle extension) diff --git a/scripts/.hidden/scil_volume_flip.py.help b/scripts/.hidden/scil_volume_flip.py.help deleted file mode 100644 index 54134b2bd..000000000 --- a/scripts/.hidden/scil_volume_flip.py.help +++ /dev/null @@ -1,18 +0,0 @@ -usage: scil_volume_flip.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] - in_image out_image dimension [dimension ...] - -Flip the volume according to the specified axis. - -Formerly: scil_flip_volume.py - -positional arguments: - in_image Path of the input volume (nifti). - out_image Path of the output volume (nifti). - dimension The axes you want to flip. eg: to flip the x and y axes use: x y. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_volume_math.py.help b/scripts/.hidden/scil_volume_math.py.help deleted file mode 100644 index 1d8aa074e..000000000 --- a/scripts/.hidden/scil_volume_math.py.help +++ /dev/null @@ -1,176 +0,0 @@ -usage: scil_volume_math.py [-h] [--data_type DATA_TYPE] [--exclude_background] - [-v [{DEBUG,INFO,WARNING}]] [-f] - {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference,concatenate,dilation,erosion,closing,opening,blur} - in_args [in_args ...] out_image - -Performs an operation on a list of images. The supported operations are -listed below. - -This script is loading all images in memory, will often crash after a few -hundred images. - -Some operations such as multiplication or addition accept float value as -parameters instead of images. -> scil_volume_math.py multiplication img.nii.gz 10 mult_10.nii.gz - -Formerly: scil_image_math.py - - lower_threshold: IMG THRESHOLD - All values below the threshold will be set to zero. - All values above the threshold will be set to one. - - upper_threshold: IMG THRESHOLD - All values below the threshold will be set to one. - All values above the threshold will be set to zero. - Equivalent to lower_threshold followed by an inversion. - - lower_threshold_eq: IMG THRESHOLD - All values below the threshold will be set to zero. - All values above or equal the threshold will be set to one. - - upper_threshold_eq: IMG THRESHOLD - All values below or equal the threshold will be set to one. - All values above the threshold will be set to zero. - Equivalent to lower_threshold followed by an inversion. - - lower_threshold_otsu: IMG - All values below or equal to the Otsu threshold will be set to zero. - All values above the Otsu threshold will be set to one. - (Otsu's method is an algorithm to perform automatic image thresholding - of the background.) - - upper_threshold_otsu: IMG - All values below the Otsu threshold will be set to one. - All values above or equal to the Otsu threshold will be set to zero. - Equivalent to lower_threshold_otsu followed by an inversion. - - lower_clip: IMG THRESHOLD - All values below the threshold will be set to threshold. - - upper_clip: IMG THRESHOLD - All values above the threshold will be set to threshold. - - absolute_value: IMG - All negative values will become positive. - - round: IMG - Round all decimal values to the closest integer. - - ceil: IMG - Ceil all decimal values to the next integer. - - floor: IMG - Floor all decimal values to the previous integer. - - normalize_sum: IMG - Normalize the image so the sum of all values is one. - - normalize_max: IMG - Normalize the image so the maximum value is one. - - log_10: IMG - Apply a log (base 10) to all non zeros values of an image. - - log_e: IMG - Apply a natural log to all non zeros values of an image. - - convert: IMG - Perform no operation, but simply change the data type. - - invert: IMG - Operation on binary image to interchange 0s and 1s in a binary mask. - - addition: IMGs - Add multiple images together. - - subtraction: IMG_1 IMG_2 - Subtract first image by the second (IMG_1 - IMG_2). - - multiplication: IMGs - Multiply multiple images together (danger of underflow and overflow) - - division: IMG_1 IMG_2 - Divide first image by the second (danger of underflow and overflow) - Ignore zeros values, excluded from the operation. - - mean: IMGs - Compute the mean of images. - If a single 4D image is provided, average along the last dimension. - - std: IMGs - Compute the standard deviation average of multiple images. - If a single 4D image is provided, compute the STD along the last - dimension. - - correlation: IMGs - Computes the correlation of the 3x3x3 neighborhood of each voxel, for - all pair of input images. The final image is the average correlation - (through all pairs). - For a given pair of images - - Background is considered as 0. May lead to very high correlations - close to the border of the background regions, or very poor ones if the - background in both images differ. - - Images are zero-padded. For the same reason as higher, may lead to - very high correlations if you have data close to the border of the - image. - - NaN values (if a voxel's neighborhood is entirely uniform; std 0) are - replaced by - - 0 if at least one neighborhood was entirely containing background. - - 1 if the voxel's neighborhoods are uniform in both images - - 0 if the voxel's neighborhoods is uniform in one image, but not - the other. - - UPDATE AS OF VERSION 2.0: Random noise was previously added in the - process to help avoid NaN values. Now replaced by either 0 or 1 as - explained above. - - union: IMGs - Operation on binary image to keep voxels, that are non-zero, in at - least one file. - - intersection: IMGs - Operation on binary image to keep the voxels, that are non-zero, - are present in all files. - - difference: IMG_1 IMG_2 - Operation on binary image to keep voxels from the first file that are - not in the second file (non-zeros). - - concatenate: IMGs - Concatenate a list of 3D and 4D images into a single 4D image. - - dilation: IMG, VALUE - Binary morphological operation to spatially extend the values of an - image to their neighbors. VALUE is in voxels: an integer > 0. - - erosion: IMG, VALUE - Binary morphological operation to spatially shrink the volume contained - in a binary image. VALUE is in voxels: an integer > 0. - - closing: IMG, VALUE - Binary morphological operation, dilation followed by an erosion. - - opening: IMG, VALUE - Binary morphological operation, erosion followed by a dilation. - - blur: IMG, VALUE - Apply a gaussian blur to a single image. VALUE is sigma, the standard - deviation of the Gaussian kernel. - - -positional arguments: - {lower_threshold,upper_threshold,lower_threshold_eq,upper_threshold_eq,lower_threshold_otsu,upper_threshold_otsu,lower_clip,upper_clip,absolute_value,round,ceil,floor,normalize_sum,normalize_max,log_10,log_e,convert,invert,addition,subtraction,multiplication,division,mean,std,correlation,union,intersection,difference,concatenate,dilation,erosion,closing,opening,blur} - The type of operation to be performed on the images. - in_args The list of image files or parameters. Refer to each operation's documentation of the expected arguments. - out_image Output image path. - -options: - -h, --help show this help message and exit - --data_type DATA_TYPE - Data type of the output image. Use the format: - uint8, int16, int/float32, int/float64. - --exclude_background Does not affect the background of the original images. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_volume_remove_outliers_ransac.py.help b/scripts/.hidden/scil_volume_remove_outliers_ransac.py.help deleted file mode 100644 index 572a6734b..000000000 --- a/scripts/.hidden/scil_volume_remove_outliers_ransac.py.help +++ /dev/null @@ -1,26 +0,0 @@ -usage: scil_volume_remove_outliers_ransac.py [-h] [--min_fit MIN_FIT] - [--max_iter MAX_ITER] - [--fit_thr FIT_THR] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_image out_image - -Remove outliers from image using the RANSAC algorithm. -The RANSAC algorithm parameters are sensitive to the input data. - -NOTE: Current default parameters are tuned for ad/md/rd images only. - -Formerly: scil_remove_outliers_ransac.py - -positional arguments: - in_image Nifti image. - out_image Corrected Nifti image. - -options: - -h, --help show this help message and exit - --min_fit MIN_FIT The minimum number of data values required to fit the model. [50] - --max_iter MAX_ITER The maximum number of iterations allowed in the algorithm. [1000] - --fit_thr FIT_THR Threshold value for determining when a data point fits a model. [0.01] - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_volume_resample.py.help b/scripts/.hidden/scil_volume_resample.py.help deleted file mode 100644 index 984352569..000000000 --- a/scripts/.hidden/scil_volume_resample.py.help +++ /dev/null @@ -1,36 +0,0 @@ -usage: scil_volume_resample.py [-h] - (--ref REF | --volume_size VOLUME_SIZE [VOLUME_SIZE ...] | --voxel_size VOXEL_SIZE [VOXEL_SIZE ...] | --iso_min) - [--interp {nn,lin,quad,cubic}] - [--enforce_dimensions] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_image out_image - -Script to resample a dataset to match the resolution of another -reference dataset or to the resolution specified as in argument. - -Formerly: scil_resample_volume.py - -positional arguments: - in_image Path of the input volume. - out_image Path of the resampled volume. - -options: - -h, --help show this help message and exit - --ref REF Reference volume to resample to. - --volume_size VOLUME_SIZE [VOLUME_SIZE ...] - Sets the size for the volume. If the value is set to is Y, it will resample to a shape of Y x Y x Y. - --voxel_size VOXEL_SIZE [VOXEL_SIZE ...] - Sets the voxel size. If the value is set to is Y, it will set a voxel size of Y x Y x Y. - --iso_min Resample the volume to R x R x R with R being the smallest current voxel dimension. - --interp {nn,lin,quad,cubic} - Interpolation mode. - nn: nearest neighbour - lin: linear - quad: quadratic - cubic: cubic - Defaults to linear - --enforce_dimensions Enforce the reference volume dimension. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_volume_reshape_to_reference.py.help b/scripts/.hidden/scil_volume_reshape_to_reference.py.help deleted file mode 100644 index 4cf6dfc08..000000000 --- a/scripts/.hidden/scil_volume_reshape_to_reference.py.help +++ /dev/null @@ -1,29 +0,0 @@ -usage: scil_volume_reshape_to_reference.py [-h] - [--interpolation {linear,nearest}] - [--keep_dtype] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_file in_ref_file out_file - -Reshape / reslice / resample *.nii or *.nii.gz using a reference. -This script can be used to align freesurfer/civet output, as .mgz, -to the original input image. - ->>> scil_volume_reshape_to_reference.py wmparc.mgz t1.nii.gz wmparc_t1.nii.gz\ - --interpolation nearest - -Formerly: scil_reshape_to_reference.py - -positional arguments: - in_file Path of the image (.nii or .mgz) to be reshaped. - in_ref_file Path of the reference image (.nii). - out_file Output filename of the reshaped image (.nii). - -options: - -h, --help show this help message and exit - --interpolation {linear,nearest} - Interpolation: "linear" or "nearest". [linear] - --keep_dtype If True, keeps the data_type of the input image (in_file) when saving the output image (out_file). - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. diff --git a/scripts/.hidden/scil_volume_stats_in_ROI.py.help b/scripts/.hidden/scil_volume_stats_in_ROI.py.help deleted file mode 100644 index 0ea80791c..000000000 --- a/scripts/.hidden/scil_volume_stats_in_ROI.py.help +++ /dev/null @@ -1,39 +0,0 @@ -usage: scil_volume_stats_in_ROI.py [-h] - (--metrics_dir dir | --metrics file [file ...]) - [--bin] [--normalize_weights] - [--indent INDENT] [--sort_keys] - [-v [{DEBUG,INFO,WARNING}]] [-f] - in_mask - -Compute the statistics (mean, std) of scalar maps, which can represent -diffusion metrics, in a ROI. Prints the results. - -The mask can either be a binary mask, or a weighting mask. If the mask is -a weighting mask it should either contain floats between 0 and 1 or should be -normalized with --normalize_weights. IMPORTANT: if the mask contains weights -(and not 0 and 1 exclusively), the standard deviation will also be weighted. - -positional arguments: - in_mask Mask volume filename. - Can be a binary mask or a weighted mask. - -options: - -h, --help show this help message and exit - --bin If set, will consider every value of the mask higherthan 0 to be - part of the mask (equivalent weighting for every voxel). - --normalize_weights If set, the weights will be normalized to the [0,1] range. - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. - -Metrics input options: - --metrics_dir dir Name of the directory containing metrics files: we will - load all nifti files. - --metrics file [file ...] - Metrics nifti filename. List of the names of the metrics file, - in nifti format. - -Json options: - --indent INDENT Indent for json pretty print. - --sort_keys Sort keys in output json. diff --git a/scripts/.hidden/scil_volume_stats_in_labels.py.help b/scripts/.hidden/scil_volume_stats_in_labels.py.help deleted file mode 100644 index 2a70a383e..000000000 --- a/scripts/.hidden/scil_volume_stats_in_labels.py.help +++ /dev/null @@ -1,22 +0,0 @@ -usage: scil_volume_stats_in_labels.py [-h] [-v [{DEBUG,INFO,WARNING}]] [-f] - in_labels in_labels_lut in_map - -Computes the information from the input map for each cortical region -(corresponding to an atlas). - -Hint: For instance, this script could be useful if you have a seed map from a -specific bundle, to know from which regions it originated. - -Formerly: scil_compute_seed_by_labels.py - -positional arguments: - in_labels Path of the input label file. - in_labels_lut Path of the LUT file corresponding to labels,used to name the regions of interest. - in_map Path of the input map file. Expecting a 3D file. - -options: - -h, --help show this help message and exit - -v [{DEBUG,INFO,WARNING}] - Produces verbose output depending on the provided level. - Default level is warning, default when using -v is info. - -f Force overwriting of the output files. From e6fc235a21e15b2ea489d8d35a0bdb6ffbc30608 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Thu, 1 Aug 2024 14:00:58 -0400 Subject: [PATCH 35/69] added a try-except block for importing nltk with a message if the package is not installed --- scripts/scil_search_keywords.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 4e770b5c8..f43c46087 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -25,7 +25,13 @@ import logging import pathlib -import nltk +try: + import nltk + nltk.download('punkt', quiet=True) +except ImportError: + print("You must install the 'nltk' package to use this script. Please run 'pip install nltk'.") + exit(1) + from colorama import Fore, Style import json From c95762a4d45018efd13039a923911397d5620e90 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Thu, 1 Aug 2024 14:44:42 -0400 Subject: [PATCH 36/69] delete generate_help_files script and add it as a function to scilpy_bot.py --- scilpy/utils/generate_help_files.py | 44 -------------------- scilpy/utils/scilpy_bot.py | 62 ++++++++++++++++++++++++++--- 2 files changed, 57 insertions(+), 49 deletions(-) delete mode 100644 scilpy/utils/generate_help_files.py diff --git a/scilpy/utils/generate_help_files.py b/scilpy/utils/generate_help_files.py deleted file mode 100644 index 36991e258..000000000 --- a/scilpy/utils/generate_help_files.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -This script iterates over all Python scripts in the 'scripts' directory, -runs each script with the '--h' flag to generate help text, -and saves the output to corresponding hidden files in the '.hidden' directory. - -By doing this, we can precompute the help outputs for each script, -which can be useful for faster searches. - -If a help file already exists for a script, the script is skipped, -and the existing help file is left unchanged. - -The help output is saved in a hidden directory to avoid clutter in the main scripts directory. -""" - -import subprocess -import pathlib - -scripts_dir= pathlib.Path(__file__).parent.parent.parent /'scripts' - - -# Hidden directory to store help files -hidden_dir = scripts_dir / '.hidden' -hidden_dir.mkdir(exist_ok=True) - -# Iterate over all scripts and generate help files -for script in scripts_dir.glob('*.py'): - if script.name == '__init__.py' or script.name == 'scil_search_keywords.py': - continue - help_file = hidden_dir / f'{script.name}.help' - # Check if help file already exists - if help_file.exists(): - print(f'Help file for {script.name} already exists. Skipping.') - continue - - # Run the script with --h and capture the output - result = subprocess.run(['python', script, '--h'], capture_output=True, text=True) - - # Save the output to the hidden file - with open(help_file, 'w') as f: - f.write(result.stdout) - - print(f'Help output for {script.name} saved to {help_file}') \ No newline at end of file diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index a56c6d2e7..be87e353c 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -156,12 +156,64 @@ def _stem_phrase(phrase): def _generate_help_files(): """ - Call the external script generate_help_files to generate help files + This function iterates over all Python scripts in the 'scripts' directory, + runs each script with the '--h' flag to generate help text, + and saves the output to corresponding hidden files in the '.hidden' directory. + + By doing this, we can precompute the help outputs for each script, + which can be useful for faster searches. + + If a help file already exists for a script, the script is skipped, + and the existing help file is left unchanged. + + The help output is saved in a hidden directory to avoid clutter in the main scripts directory. """ - script_path = pathlib.Path(__file__).parent /'generate_help_files.py' - #calling the extrernal script generate_help_files - subprocess.run(['python', script_path], check=True) - + + scripts_dir= pathlib.Path(__file__).parent.parent.parent /'scripts' + + + # Hidden directory to store help files + hidden_dir = scripts_dir / '.hidden' + hidden_dir.mkdir(exist_ok=True) + + # Iterate over all scripts and generate help files + for script in scripts_dir.glob('*.py'): + if script.name == '__init__.py' or script.name == 'scil_search_keywords.py': + continue + help_file = hidden_dir / f'{script.name}.help' + # Check if help file already exists + if help_file.exists(): + print(f'Help file for {script.name} already exists. Skipping.') + continue + + # Run the script with --h and capture the output + result = subprocess.run(['python', script, '--h'], capture_output=True, text=True) + + # Save the output to the hidden file + with open(help_file, 'w') as f: + f.write(result.stdout) + + print(f'Help file saved to {help_file}') + + + # Check if any help files are missing and regenerate them + for script in scripts_dir.glob('*.py'): + if script.name == '__init__.py' or script.name == 'scil_search_keywords.py': + continue + help_file = hidden_dir / f'{script.name}.help' + if not help_file.exists(): + # Run the script with --h and capture the output + result = subprocess.run(['python', script, '--h'], capture_output=True, text=True) + + # Save the output to the hidden file + with open(help_file, 'w') as f: + f.write(result.stdout) + + print(f'Regenerated help output for {script.name}') + + + + def _highlight_keywords(text, stemmed_keywords): """ Highlight the stemmed keywords in the given text using colorama. From 1e02c5e9d134d2084374e2f969f15be4027ee9a6 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Thu, 1 Aug 2024 15:32:26 -0400 Subject: [PATCH 37/69] Reverse output of the result --- scripts/scil_search_keywords.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index f43c46087..0a85e82dd 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -192,7 +192,7 @@ def update_matches_and_scores(filename, score_details): # Sort matches by score and display them else: - sorted_matches = sorted(matches, key=lambda x: scores[x]['total_score'], reverse=True) + sorted_matches = sorted(matches, key=lambda x: scores[x]['total_score'], reverse=False) logging.info(_make_title(' Results Ordered by Score ')) for match in sorted_matches: @@ -209,6 +209,7 @@ def update_matches_and_scores(filename, score_details): logging.info(f"Total Score: {scores[match]['total_score']}") logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info("\n") + logging.info(_make_title(' Results Ordered by Score (Best results at the bottom) ')) From 6990cda87d9f6b3055a0bb0b5c13573547fa864f Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Thu, 1 Aug 2024 19:26:49 -0400 Subject: [PATCH 38/69] merge synonyms acronyms and keywords in the same json file --- scilpy/utils/Vocabulary/Vocabulary.json | 2418 +++++++++++++++++++++++ 1 file changed, 2418 insertions(+) create mode 100644 scilpy/utils/Vocabulary/Vocabulary.json diff --git a/scilpy/utils/Vocabulary/Vocabulary.json b/scilpy/utils/Vocabulary/Vocabulary.json new file mode 100644 index 000000000..84babcad5 --- /dev/null +++ b/scilpy/utils/Vocabulary/Vocabulary.json @@ -0,0 +1,2418 @@ +{ + "scripts": [ + { + "name": "scil_bids_validate.py", + "keywords": [] + }, + { + "name": "scil_bingham_metrics.py", + "keywords": [ + "fiber density", + "fiber spread", + "fiber fraction", + "fixel" + ] + }, + { + "name": "scil_btensor_metrics.py", + "keywords": [ + "b-tensor", + "b-tensor encoding", + "tensor-valued diffusion MRI", + "micro-FA", + "uFA", + "order parameter", + "OP", + "DIVIDE", + "microstructure", + "linear tensor encoding (LTE)", + "planar tensor encoding (PTE)", + "spherical tensor encoding (STE)", + "multidimensional diffusion MRI" + ] + }, + { + "name": "scil_bundle_clean_qbx_clusters.py", + "keywords": [] + }, + { + "name": "scil_bundle_compute_centroid.py", + "keywords": [] + }, + { + "name": "scil_bundle_compute_endpoints_map.py", + "keywords": [] + }, + { + "name": "scil_bundle_diameter.py", + "keywords": [] + }, + { + "name": "scil_bundle_filter_by_occurence.py", + "keywords": [] + }, + { + "name": "scil_bundle_generate_priors.py", + "keywords": [] + }, + { + "name": "scil_bundle_label_map.py", + "keywords": [ + "parcellate", + "subdivide", + "split" + ] + }, + { + "name": "scil_bundle_mean_fixel_afd_from_hdf5.py", + "keywords": [] + }, + { + "name": "scil_bundle_mean_fixel_afd.py", + "keywords": [] + }, + { + "name": "scil_bundle_mean_fixel_bingham_metric.py", + "keywords": [ + "tractometry", + "lobe metrics", + "fiber density", + "fiber spread", + "fiber fraction", + "mean along bundle" + ] + }, + { + "name": "scil_bundle_mean_std.py", + "keywords": [] + }, + { + "name": "scil_bundle_pairwise_comparison.py", + "keywords": [] + }, + { + "name": "scil_bundle_reject_outliers.py", + "keywords": [] + }, + { + "name": "scil_bundle_score_many_bundles_one_tractogram.py", + "keywords": [] + }, + { + "name": "scil_bundle_score_same_bundle_many_segmentations.py", + "keywords": [] + }, + { + "name": "scil_bundle_shape_measures.py", + "keywords": [ + "geometry" + ] + }, + { + "name": "scil_bundle_uniformize_endpoints.py", + "keywords": [] + }, + { + "name": "scil_bundle_volume_per_label.py", + "keywords": [] + }, + { + "name": "scil_connectivity_compare_populations.py", + "keywords": [] + }, + { + "name": "scil_connectivity_compute_matrices.py", + "keywords": [] + }, + { + "name": "scil_connectivity_compute_pca.py", + "keywords": [] + }, + { + "name": "scil_connectivity_filter.py", + "keywords": [] + }, + { + "name": "scil_connectivity_graph_measures.py", + "keywords": [] + }, + { + "name": "scil_connectivity_hdf5_average_density_map.py", + "keywords": [] + }, + { + "name": "scil_connectivity_math.py", + "keywords": [] + }, + { + "name": "scil_connectivity_normalize.py", + "keywords": [] + }, + { + "name": "scil_connectivity_pairwise_agreement.py", + "keywords": [] + }, + { + "name": "scil_connectivity_print_filenames.py", + "keywords": [] + }, + { + "name": "scil_connectivity_reorder_rois.py", + "keywords": [] + }, + { + "name": "scil_denoising_nlmeans.py", + "keywords": [] + }, + { + "name": "scil_dki_metrics.py", + "keywords": [] + }, + { + "name": "scil_dti_convert_tensors.py", + "keywords": [ + "tensor", + "tensors", + "triangular matrix", + "fsl format", + "nifti format", + "mrtrix format", + "dipy format" + ] + }, + { + "name": "scil_dti_metrics.py", + "keywords": [ + "dti", + "metrics", + "diffusion tensor", + "FA", + "MD", + "AD", + "RD", + "RGB", + "eigenvector", + "eigenvalue", + "diffusivity" + ] + }, + { + "name": "scil_dwi_apply_bias_field.py", + "keywords": [] + }, + { + "name": "scil_dwi_compute_snr.py", + "keywords": [] + }, + { + "name": "scil_dwi_concatenate.py", + "keywords": [ + "merge", + "fuse", + "concatenate", + "diffusion data", + "DWI" + ] + }, + { + "name": "scil_dwi_convert_FDF.py", + "keywords": [] + }, + { + "name": "scil_dwi_detect_volume_outliers.py", + "keywords": [] + }, + { + "name": "scil_dwi_extract_b0.py", + "keywords": [ + "b0 extraction", + "b0", + "b-value 0", + "extract" + ] + }, + { + "name": "scil_dwi_extract_shell.py", + "keywords": [ + "shell extraction", + "b-value extraction", + "DWI", + "shell/b-value selection", + "extract", + "DWI split", + "DWI splitting", + "multiple shells" + ] + }, + { + "name": "scil_dwi_powder_average.py", + "keywords": [ + "powder average", + "DWI" + ] + }, + { + "name": "scil_dwi_prepare_eddy_command.py", + "keywords": [] + }, + { + "name": "scil_dwi_prepare_topup_command.py", + "keywords": [] + }, + { + "name": "scil_dwi_reorder_philips.py", + "keywords": [ + "Philips", + "DWI reorder", + "original gradient table" + ] + }, + { + "name": "scil_dwi_split_by_indices.py", + "keywords": [ + "DWI splitting", + "DWI split", + "indices" + ] + }, + { + "name": "scil_dwi_to_sh.py", + "keywords": [ + "signal", + "spherical harmonics" + ] + }, + { + "name": "scil_fodf_max_in_ventricles.py", + "keywords": [ + "ventricles", + "maximum fODF", + "absolute threshold" + ] + }, + { + "name": "scil_fodf_memsmt.py", + "keywords": [ + "b-tensor", + "b-tensor encoding", + "multi-encoding", + "multi-shell", + "multi-tissue", + "memsmt", + "linear tensor encoding (LTE)", + "planar tensor encoding (PTE)", + "spherical tensor encoding (STE)", + "multidimensional diffusion MRI", + "volume fraction", + "CSD", + "constrained spherical deconvolution", + "fODF" + ] + }, + { + "name": "scil_fodf_metrics.py", + "keywords": [ + "fODF metrics", + "NuFO", + "peaks", + "directions", + "peak values", + "peak indices", + "rgb", + "afd" + ] + }, + { + "name": "scil_fodf_msmt.py", + "keywords": [ + "CSD", + "constrained spherical deconvolution", + "multi-shell", + "multi-tissue", + "msmt", + "volume fraction", + "fODF" + ] + }, + { + "name": "scil_fodf_ssst.py", + "keywords": [ + "CSD", + "constrained spherical deconvolution", + "single-shell", + "single-tissue", + "ssst", + "fODF" + ] + }, + { + "name": "scil_fodf_to_bingham.py", + "keywords": [ + "lobe", + "lobe-specific", + "bingham-odf" + ] + }, + { + "name": "scil_freewater_maps.py", + "keywords": [] + }, + { + "name": "scil_freewater_priors.py", + "keywords": [] + }, + { + "name": "scil_frf_mean.py", + "keywords": [ + "fiber response function", + "response function", + "RF", + "FRF", + "mean", + "mean FRF" + ] + }, + { + "name": "scil_frf_memsmt.py", + "keywords": [ + "fiber response function", + "response function", + "RF", + "FRF", + "b-tensor", + "b-tensor encoding", + "multi-encoding", + "multi-shell", + "multi-tissue", + "memsmt", + "linear tensor encoding (LTE)", + "planar tensor encoding (PTE)", + "spherical tensor encoding (STE)", + "multidimensional diffusion MRI" + ] + }, + { + "name": "scil_frf_msmt.py", + "keywords": [ + "fiber response function", + "response function", + "RF", + "FRF", + "multi-shell", + "multi-tissue", + "msmt" + ] + }, + { + "name": "scil_frf_set_diffusivities.py", + "keywords": [ + "fiber response function", + "response function", + "RF", + "FRF", + "diffusivity", + "diffusivities", + "fixed FRF" + ] + }, + { + "name": "scil_frf_ssst.py", + "keywords": [ + "fiber response function", + "response function", + "RF", + "FRF", + "single-shell", + "single-tissue", + "ssst" + ] + }, + { + "name": "scil_get_version.py", + "keywords": [] + }, + { + "name": "scil_gradients_apply_transform.py", + "keywords": [ + "gradients", + "bvecs", + "b-vectors", + "transformation", + "transform" + ] + }, + { + "name": "scil_gradients_convert.py", + "keywords": [ + "gradients", + "gradient table", + "fsl format", + "mrtrix format", + "bval", + "bvec" + ] + }, + { + "name": "scil_gradients_generate_sampling.py", + "keywords": [ + "gradients", + "gradient table", + "sampling scheme", + "sampling", + "hardi", + "multi-shell", + "caruyer", + "optimized gradients" + ] + }, + { + "name": "scil_gradients_modify_axes.py", + "keywords": [] + }, + { + "name": "scil_gradients_round_bvals.py", + "keywords": [ + "bvals", + "b-value", + "round bvals", + "shell" + ] + }, + { + "name": "scil_gradients_validate_correct_eddy.py", + "keywords": [] + }, + { + "name": "scil_gradients_validate_correct.py", + "keywords": [ + "fiber coherence index", + "coherence" + ] + }, + { + "name": "scil_header_print_info.py", + "keywords": [] + }, + { + "name": "scil_header_validate_compatibility.py", + "keywords": [] + }, + { + "name": "scil_json_convert_entries_to_xlsx.py", + "keywords": [] + }, + { + "name": "scil_json_harmonize_entries.py", + "keywords": [] + }, + { + "name": "scil_json_merge_entries.py", + "keywords": [] + }, + { + "name": "scil_labels_combine.py", + "keywords": [] + }, + { + "name": "scil_labels_dilate.py", + "keywords": [] + }, + { + "name": "scil_labels_remove.py", + "keywords": [] + }, + { + "name": "scil_labels_split_volume_by_ids.py", + "keywords": [] + }, + { + "name": "scil_labels_split_volume_from_lut.py", + "keywords": [] + }, + { + "name": "scil_lesions_info.py", + "keywords": [] + }, + { + "name": "scil_mti_adjust_B1_header.py", + "keywords": [ + "MTI", + "magnetization transfer", + "MT", + "B1 map", + "header", + "B1" + ] + }, + { + "name": "scil_mti_maps_ihMT.py", + "keywords": [ + "MTI", + "magnetization transfer", + "MT", + "ihMT", + "ihMTR", + "ihMTsat", + "myelin", + "MTR", + "MTsat" + ] + }, + { + "name": "scil_mti_maps_MT.py", + "keywords": [ + "MTI", + "magnetization transfer", + "MT", + "MTR", + "MTsat", + "myelin" + ] + }, + { + "name": "scil_NODDI_maps.py", + "keywords": [] + }, + { + "name": "scil_NODDI_priors.py", + "keywords": [] + }, + { + "name": "scil_plot_stats_per_point.py", + "keywords": [] + }, + { + "name": "scil_qball_metrics.py", + "keywords": [ + "CSA", + "QBI", + "q-ball imaging", + "diffusion odf" + ] + }, + { + "name": "scil_rgb_convert.py", + "keywords": [] + }, + { + "name": "scil_sh_convert.py", + "keywords": [ + "spherical harmonics", + "tournier", + "mrtrix", + "descoteaux", + "dipy", + "modern", + "legacy" + ] + }, + { + "name": "scil_sh_fusion.py", + "keywords": [ + "spherical harmonics", + "SH", + "fusion", + "largest magnitude", + "merge", + "coefficients" + ] + }, + { + "name": "scil_sh_to_aodf.py", + "keywords": [ + "asymmetric", + "asymmetries", + "filtering", + "full basis" + ] + }, + { + "name": "scil_sh_to_rish.py", + "keywords": [ + "rotation invariant spherical harmonics", + "features" + ] + }, + { + "name": "scil_sh_to_sf.py", + "keywords": [ + "spherical harmonics", + "spherical functions", + "SH", + "SF", + "convertion", + "conversion" + ] + }, + { + "name": "scil_stats_group_comparison.py", + "keywords": [] + }, + { + "name": "scil_surface_apply_transform.py", + "keywords": [ + "registration", + "warp", + "transformation", + "surface", + "mesh", + "vtk FreeSurfer" + ] + }, + { + "name": "scil_surface_convert.py", + "keywords": [ + "surface", + "mesh", + "vtk FreeSurfer" + ] + }, + { + "name": "scil_surface_flip.py", + "keywords": [ + "surface", + "mesh", + "vtk FreeSurfer" + ] + }, + { + "name": "scil_surface_smooth.py", + "keywords": [ + "surface", + "mesh", + "vtk FreeSurfer" + ] + }, + { + "name": "scil_tracking_local_dev.py", + "keywords": [ + "development", + "runge-kutta", + "pure-python", + "onboarding", + "tractography", + "dipy" + ] + }, + { + "name": "scil_tracking_local.py", + "keywords": [ + "eudx", + "tractography", + "tracking", + "peak tracking", + "local tracking", + "probabilistic", + "deterministic", + "prob", + "det" + ] + }, + { + "name": "scil_tracking_pft_maps_edit.py", + "keywords": [ + "particule filtering tractography", + "cmc" + ] + }, + { + "name": "scil_tracking_pft_maps.py", + "keywords": [ + "particle filter tractography", + "continuous map criterion", + "tracking", + "fodf", + "cmc", + "particle filtering tractography" + ] + }, + { + "name": "scil_tracking_pft.py", + "keywords": [ + "particle filter tractography", + "continuous map criterion", + "tracking", + "fodf" + ] + }, + { + "name": "scil_tractogram_alter.py", + "keywords": [] + }, + { + "name": "scil_tractogram_apply_transform.py", + "keywords": [ + "ants", + "registration", + "affine", + "linear", + "nonlinear" + ] + }, + { + "name": "scil_tractogram_apply_transform_to_hdf5.py", + "keywords": [] + }, + { + "name": "scil_tractogram_assign_custom_color.py", + "keywords": [] + }, + { + "name": "scil_tractogram_assign_uniform_color.py", + "keywords": [] + }, + { + "name": "scil_tractogram_commit.py", + "keywords": [ + "microstructure informed", + "filtering", + "mit" + ] + }, + { + "name": "scil_tractogram_compress.py", + "keywords": [] + }, + { + "name": "scil_tractogram_compute_density_map.py", + "keywords": [ + "TDI", + "track density imaging", + "streamline count" + ] + }, + { + "name": "scil_tractogram_compute_TODI.py", + "keywords": [ + "track orientation density imaging", + "track density imaging", + "TDI" + ] + }, + { + "name": "scil_tractogram_convert_hdf5_to_trk.py", + "keywords": [] + }, + { + "name": "scil_tractogram_convert.py", + "keywords": [] + }, + { + "name": "scil_tractogram_count_streamlines.py", + "keywords": [] + }, + { + "name": "scil_tractogram_cut_streamlines.py", + "keywords": [] + }, + { + "name": "scil_tractogram_detect_loops.py", + "keywords": [] + }, + { + "name": "scil_tractogram_dpp_math.py", + "keywords": [ + "tractogram", + "data per point" + ] + }, + { + "name": "scil_tractogram_extract_ushape.py", + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_anatomy.py", + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_length.py", + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_orientation.py", + "keywords": [] + }, + { + "name": "scil_tractogram_filter_by_roi.py", + "keywords": [ + "segment", + "atlas" + ] + }, + { + "name": "scil_tractogram_fix_trk.py", + "keywords": [] + }, + { + "name": "scil_tractogram_flip.py", + "keywords": [] + }, + { + "name": "scil_tractogram_math.py", + "keywords": [] + }, + { + "name": "scil_tractogram_pairwise_comparison.py", + "keywords": [] + }, + { + "name": "scil_tractogram_print_info.py", + "keywords": [] + }, + { + "name": "scil_tractogram_project_map_to_streamlines.py", + "keywords": [] + }, + { + "name": "scil_tractogram_project_streamlines_to_map.py", + "keywords": [] + }, + { + "name": "scil_tractogram_qbx.py", + "keywords": [ + "clustering" + ] + }, + { + "name": "scil_tractogram_register.py", + "keywords": [] + }, + { + "name": "scil_tractogram_remove_invalid.py", + "keywords": [] + }, + { + "name": "scil_tractogram_resample_nb_points.py", + "keywords": [] + }, + { + "name": "scil_tractogram_resample.py", + "keywords": [] + }, + { + "name": "scil_tractogram_seed_density_map.py", + "keywords": [] + }, + { + "name": "scil_tractogram_segment_and_score.py", + "keywords": [] + }, + { + "name": "scil_tractogram_segment_bundles_for_connectivity.py", + "keywords": [] + }, + { + "name": "scil_tractogram_segment_bundles.py", + "keywords": [] + }, + { + "name": "scil_tractogram_segment_one_bundle.py", + "keywords": [] + }, + { + "name": "scil_tractogram_shuffle.py", + "keywords": [] + }, + { + "name": "scil_tractogram_smooth.py", + "keywords": [] + }, + { + "name": "scil_tractogram_split.py", + "keywords": [] + }, + { + "name": "scil_viz_bingham_fit.py", + "keywords": [ + "visualisation", + "bingham distributions", + "bingham odf" + ] + }, + { + "name": "scil_viz_bundle.py", + "keywords": [ + "visualisation", + "bundle", + "tractogram", + "streamlines" + ] + }, + { + "name": "scil_viz_bundle_screenshot_mni.py", + "keywords": [] + }, + { + "name": "scil_viz_bundle_screenshot_mosaic.py", + "keywords": [] + }, + { + "name": "scil_viz_connectivity.py", + "keywords": [] + }, + { + "name": "scil_viz_dti_screenshot.py", + "keywords": [] + }, + { + "name": "scil_viz_fodf.py", + "keywords": [ + "visualize", + "fiber odf", + "odf", + "sh", + "peaks", + "background" + ] + }, + { + "name": "scil_viz_gradients_screenshot.py", + "keywords": [] + }, + { + "name": "scil_viz_tractogram_seeds_3d.py", + "keywords": [ + "visualize", + "seed", + "density", + "3D", + "seed density" + ] + }, + { + "name": "scil_viz_tractogram_seeds.py", + "keywords": [ + "visualize", + "seed", + "streamline", + "streamline origin" + ] + }, + { + "name": "scil_viz_volume_histogram.py", + "keywords": [ + "visualize", + "histogram", + "metric" + ] + }, + { + "name": "scil_viz_volume_scatterplot.py", + "keywords": [ + "visualize", + "scatterplot", + "distribution", + "metric" + ] + }, + { + "name": "scil_viz_volume_screenshot_mosaic.py", + "keywords": [] + }, + { + "name": "scil_viz_volume_screenshot.py", + "keywords": [] + }, + { + "name": "scil_volume_apply_transform.py", + "keywords": [] + }, + { + "name": "scil_volume_b0_synthesis.py", + "keywords": [] + }, + { + "name": "scil_volume_count_non_zero_voxels.py", + "keywords": [] + }, + { + "name": "scil_volume_crop.py", + "keywords": [] + }, + { + "name": "scil_volume_flip.py", + "keywords": [] + }, + { + "name": "scil_volume_math.py", + "keywords": [] + }, + { + "name": "scil_volume_remove_outliers_ransac.py", + "keywords": [] + }, + { + "name": "scil_volume_resample.py", + "keywords": [] + }, + { + "name": "scil_volume_reshape_to_reference.py", + "keywords": [] + }, + { + "name": "scil_volume_stats_in_labels.py", + "keywords": [] + }, + { + "name": "scil_volume_stats_in_ROI.py", + "keywords": [] + } + ], + "synonyms": [ + [ + "Bundle", + "tract", + "pathway", + "fasciculus" + ], + [ + "multi-shells", + "multishell", + "multi shell", + "msmt" + ], + [ + "SH", + "Spherical Harmonics" + ], + + [ + "single-shell", + "single shell", + "ssst" + ], + [ + "ODF", + "Orientation Distribution Function" + ], + [ + "DWI", + "Diffusion Weighted Imaging" + ], + [ + "shell", + "bval", + "b-value", + "bvals" + ], + [ + "b-tensor encoding", + "tensor-valued" + ], + [ + "surface", + "mesh" + ], + [ + "merge", + "fuse", + "concatenate", + "add" + ], + [ + "parcellate", + "subdivide", + "split", + "divide" + ] + ], + "acronyms": + [ + { + "abbreviation": "k-nn", + "Description": "k-nearest neighbors" + }, + { + "abbreviation": "1d", + "Description": "one-dimensional" + }, + { + "abbreviation": "2d", + "Description": "two-dimensional" + }, + { + "abbreviation": "3d", + "Description": "three-dimensional" + }, + { + "abbreviation": "ac", + "Description": "anterior commissure" + }, + { + "abbreviation": "ae", + "Description": "autoencoder" + }, + { + "abbreviation": "af", + "Description": "arcuate fascicle" + }, + { + "abbreviation": "ai", + "Description": "artificial intelligence" + }, + { + "abbreviation": "ann", + "Description": "artificial neural network" + }, + { + "abbreviation": "ar", + "Description": "acoustic radiation" + }, + { + "abbreviation": "atr", + "Description": "anterior thalamic radiation" + }, + { + "abbreviation": "cc", + "Description": "corpus callosum" + }, + { + "abbreviation": "cing", + "Description": "cingulum" + }, + { + "abbreviation": "cinta", + "Description": "clustering in tractography using autoencoders" + }, + { + "abbreviation": "cnn", + "Description": "convolutional neural network" + }, + { + "abbreviation": "csd", + "Description": "constrained spherical deconvolution" + }, + { + "abbreviation": "csf", + "Description": "cerebrospinal fluid" + }, + { + "abbreviation": "cst", + "Description": "corticospinal tract" + }, + { + "abbreviation": "dl", + "Description": "deep learning" + }, + { + "abbreviation": "dmri", + "Description": "diffusion magnetic resonance imaging" + }, + { + "abbreviation": "dodf", + "Description": "diffusion orientation distribution function" + }, + { + "abbreviation": "dt", + "Description": "diffusion tensor" + }, + { + "abbreviation": "dti", + "Description": "diffusion tensor imaging" + }, + { + "abbreviation": "dw-mri", + "Description": "diffusion-weighted magnetic resonance imaging" + }, + { + "abbreviation": "dwi", + "Description": "diffusion-weighted imaging" + }, + { + "abbreviation": "dwm", + "Description": "deep white matter" + }, + { + "abbreviation": "eap", + "Description": "ensemble average (diffusion) propagator" + }, + { + "abbreviation": "fa", + "Description": "fractional anisotropy" + }, + { + "abbreviation": "fat", + "Description": "frontal aslant tract" + }, + { + "abbreviation": "fc", + "Description": "fully connected" + }, + { + "abbreviation": "finta", + "Description": "filtering in tractography using autoencoders" + }, + { + "abbreviation": "fmri", + "Description": "functional magnetic resonance imaging" + }, + { + "abbreviation": "fod", + "Description": "fiber orientation distribution" + }, + { + "abbreviation": "fodf", + "Description": "fiber orientation distribution function" + }, + { + "abbreviation": "ft", + "Description": "fourier transform" + }, + { + "abbreviation": "fx", + "Description": "fornix" + }, + { + "abbreviation": "ge", + "Description": "gradient echo" + }, + { + "abbreviation": "gesta", + "Description": "generative sampling in bundle tractography using autoencoders" + }, + { + "abbreviation": "gm", + "Description": "gray matter" + }, + { + "abbreviation": "hardi", + "Description": "high angular resolution diffusion imaging" + }, + { + "abbreviation": "ic", + "Description": "internal capsule" + }, + { + "abbreviation": "icp", + "Description": "inferior cerebellar peduncle" + }, + { + "abbreviation": "ifof", + "Description": "inferior fronto-occipital fascicle" + }, + { + "abbreviation": "ils", + "Description": "inferior longitudinal system" + }, + { + "abbreviation": "jif", + "Description": "journal impact factor" + }, + { + "abbreviation": "mcp", + "Description": "middle cerebellar peduncle" + }, + { + "abbreviation": "ml", + "Description": "machine learning" + }, + { + "abbreviation": "mlp", + "Description": "multilayer perceptron" + }, + { + "abbreviation": "mls", + "Description": "middle longitudinal system" + }, + { + "abbreviation": "mr", + "Description": "magnetic resonance" + }, + { + "abbreviation": "mri", + "Description": "magnetic resonance imaging" + }, + { + "abbreviation": "nn", + "Description": "neural network" + }, + { + "abbreviation": "nos", + "Description": "number of streamlines" + }, + { + "abbreviation": "odf", + "Description": "orientation distribution function (also referred to as orientation density function)" + }, + { + "abbreviation": "or", + "Description": "optic radiation" + }, + { + "abbreviation": "pc", + "Description": "posterior commissure" + }, + { + "abbreviation": "pca", + "Description": "principal component analysis" + }, + { + "abbreviation": "pdf", + "Description": "probability density function" + }, + { + "abbreviation": "pgse", + "Description": "pulsed-gradient spin echo" + }, + { + "abbreviation": "pli", + "Description": "polarized light imaging" + }, + { + "abbreviation": "popt", + "Description": "parieto-occipital pontine tract" + }, + { + "abbreviation": "ps-oct", + "Description": "polarization-sensitive optical coherence tomography" + }, + { + "abbreviation": "rf", + "Description": "radio frequency" + }, + { + "abbreviation": "rnn", + "Description": "recurrent neural network" + }, + { + "abbreviation": "roc", + "Description": "receiver operating characteristic" + }, + { + "abbreviation": "scp", + "Description": "superior cerebellar peduncle" + }, + { + "abbreviation": "sd", + "Description": "spherical deconvolution" + }, + { + "abbreviation": "se", + "Description": "spin echo" + }, + { + "abbreviation": "set", + "Description": "surface-enhanced tractography" + }, + { + "abbreviation": "sls", + "Description": "superior longitudinal system" + }, + { + "abbreviation": "smri", + "Description": "structural magnetic resonance imaging" + }, + { + "abbreviation": "swm", + "Description": "superficial white matter" + }, + { + "abbreviation": "t1-w", + "Description": "t1-weighted image" + }, + { + "abbreviation": "te", + "Description": "echo time" + }, + { + "abbreviation": "tr", + "Description": "repetition time" + }, + { + "abbreviation": "uf", + "Description": "uncinate fascicle" + }, + { + "abbreviation": "vae", + "Description": "variational autoencoder" + }, + { + "abbreviation": "wm", + "Description": "white matter" + }, + { + "abbreviation": "3d", + "Description": "three dimensions" + }, + { + "abbreviation": "4d", + "Description": "four dimensions" + }, + { + "abbreviation": "act", + "Description": "anatomically-constrained tractography" + }, + { + "abbreviation": "amico", + "Description": "accelerated microstructure imaging via convex optimization" + }, + { + "abbreviation": "apm", + "Description": "average pathlength map" + }, + { + "abbreviation": "bet", + "Description": "brain extraction tool" + }, + { + "abbreviation": "cdmri", + "Description": "computational diffusion mri" + }, + { + "abbreviation": "cg", + "Description": "cingulum" + }, + { + "abbreviation": "cmc", + "Description": "continuous maps criterion" + }, + { + "abbreviation": "commit", + "Description": "convex optimization modeling for microstructure informed tractography" + }, + { + "abbreviation": "csa", + "Description": "constant solid-angle" + }, + { + "abbreviation": "csf/lcs/lcr", + "Description": "cerebrospinal fluid" + }, + { + "abbreviation": "c-dec", + "Description": "connectivity directionally-encoded color" + }, + { + "abbreviation": "dec", + "Description": "directionally-encoded color" + }, + { + "abbreviation": "dwi", + "Description": "diffusion-weighted imaging" + }, + { + "abbreviation": "dmri", + "Description": "diffusion-weighted imaging" + }, + { + "abbreviation": "irmd", + "Description": "diffusion-weighted imaging" + }, + { + "abbreviation": "eap", + "Description": "ensemble average propagator" + }, + { + "abbreviation": "epi", + "Description": "echo-planar imaging" + }, + { + "abbreviation": "fast", + "Description": "fmrib\u2019s automated segmentation tool" + }, + { + "abbreviation": "flirt", + "Description": "fmrib\u2019s linear image registration tool" + }, + { + "abbreviation": "fmt", + "Description": "fast marching tractography" + }, + { + "abbreviation": "fsl", + "Description": "fmrib software library" + }, + { + "abbreviation": "grappa", + "Description": "generalized autocalibrating partially parallel acquisition" + }, + { + "abbreviation": "ifof", + "Description": "inferior fronto-occipital fasciculus" + }, + { + "abbreviation": "ipmi", + "Description": "information processing in medical imaging" + }, + { + "abbreviation": "ismrm", + "Description": "international society for magnetic resonance in medicine" + }, + { + "abbreviation": "miccai", + "Description": "medical image computing and computer assisted intervention" + }, + { + "abbreviation": "mprage", + "Description": "magnetization-prepared rapid acquisition with gradient-echo" + }, + { + "abbreviation": "irm", + "Description": "magnetic resonance imaging" + }, + { + "abbreviation": "mri", + "Description": "magnetic resonance imaging" + }, + { + "abbreviation": "odf", + "Description": "orientation distribution function" + }, + { + "abbreviation": "ohbm", + "Description": "organization for human brain mapping" + }, + { + "abbreviation": "pve", + "Description": "partial volume estimation" + }, + { + "abbreviation": "roi", + "Description": "region of interest" + }, + { + "abbreviation": "rtt", + "Description": "real-time tractography" + }, + { + "abbreviation": "sh", + "Description": "spherical harmonics" + }, + { + "abbreviation": "slf", + "Description": "superior longitudinal fasciculus" + }, + { + "abbreviation": "snr", + "Description": "signal-to-noise ratio" + }, + { + "abbreviation": "twi", + "Description": "track-weighted imaging" + }, + { + "abbreviation": "voi", + "Description": "volume of interest" + }, + { + "abbreviation": "ats", + "Description": "anterior traverse system" + }, + { + "abbreviation": "a123", + "Description": "area 1/2/3 (upper limb, head, and face region)" + }, + { + "abbreviation": "a4hf", + "Description": "area 4 (head and face region)" + }, + { + "abbreviation": "a4ul", + "Description": "area 4 (upper limb region)" + }, + { + "abbreviation": "a46", + "Description": "area 46" + }, + { + "abbreviation": "af", + "Description": "arcuate fasciculus" + }, + { + "abbreviation": "bls", + "Description": "basal longitudinal system" + }, + { + "abbreviation": "ca39", + "Description": "caudal area 39" + }, + { + "abbreviation": "cdl", + "Description": "caudal dorsolateral area" + }, + { + "abbreviation": "cvl", + "Description": "caudal ventrolateral area" + }, + { + "abbreviation": "cdl", + "Description": "caudolateral of area " + }, + { + "abbreviation": "csf", + "Description": "cerebral spinal fluid" + }, + { + "abbreviation": "ctoi", + "Description": "conservative tract of interest" + }, + { + "abbreviation": "da9/36", + "Description": "dorsal area 9/46" + }, + { + "abbreviation": "ddi", + "Description": "dorsal dysgranular insula" + }, + { + "abbreviation": "dl6", + "Description": "dorsolateral area 6" + }, + { + "abbreviation": "dl37", + "Description": "dorsolateral area 37 region" + }, + { + "abbreviation": "efc", + "Description": "extreme/external capsule fibre system" + }, + { + "abbreviation": "fodfs", + "Description": "fibre orientation distribution functions" + }, + { + "abbreviation": "fus", + "Description": "fusiform gyrus" + }, + { + "abbreviation": "hcp", + "Description": "human connectome project" + }, + { + "abbreviation": "ifg", + "Description": "inferior frontal gyrus" + }, + { + "abbreviation": "ils ", + "Description": "inferior longitudinal system " + }, + { + "abbreviation": "ipl", + "Description": "inferior parietal lobe" + }, + { + "abbreviation": "itg", + "Description": "inferior temporal gyrus" + }, + { + "abbreviation": "ins", + "Description": "insula" + }, + { + "abbreviation": "ipa ", + "Description": "intraparietal area " + }, + { + "abbreviation": "la", + "Description": "lateral area" + }, + { + "abbreviation": "locc", + "Description": "lateral occipital cortex" + }, + { + "abbreviation": "cin", + "Description": "limbic lobe/cingulum" + }, + { + "abbreviation": "mme", + "Description": "mean millimetre error" + }, + { + "abbreviation": "mvocc", + "Description": "medioventral occipital cortex" + }, + { + "abbreviation": "mlf", + "Description": "medial longitudinal fasciculus" + }, + { + "abbreviation": "mesls", + "Description": "mesial longitudinal system" + }, + { + "abbreviation": "mfg", + "Description": "middle frontal gyrus" + }, + { + "abbreviation": "midls", + "Description": "middle longitudinal systems" + }, + { + "abbreviation": "mlf", + "Description": "middle longitudinal fasciculus" + }, + { + "abbreviation": "mtg", + "Description": "middle temporal gyrus" + }, + { + "abbreviation": "mni", + "Description": "montreal neurological institute" + }, + { + "abbreviation": "opa", + "Description": "opercular area" + }, + { + "abbreviation": "ofg", + "Description": "orbital frontal gyrus" + }, + { + "abbreviation": "pag", + "Description": "paracentral gyrus" + }, + { + "abbreviation": "pft", + "Description": "particle-filter tractography" + }, + { + "abbreviation": "pocg", + "Description": "postcentral gyrus" + }, + { + "abbreviation": "pts", + "Description": "posterior traverse system" + }, + { + "abbreviation": "pcg", + "Description": "precentral gyrus" + }, + { + "abbreviation": "pcun", + "Description": "precuneus" + }, + { + "abbreviation": "rois", + "Description": "regions of interest" + }, + { + "abbreviation": "rda", + "Description": "rostrodorsal area" + }, + { + "abbreviation": "rva", + "Description": "rostroventral area" + }, + { + "abbreviation": "stoi", + "Description": "sublobe tract of interest" + }, + { + "abbreviation": "sfg", + "Description": "superior frontal gyrus" + }, + { + "abbreviation": "slf ", + "Description": "superior longitudinal fasciculus " + }, + { + "abbreviation": "sls ", + "Description": "superior longitudinal system" + }, + { + "abbreviation": "spl", + "Description": "superior parietal lobule" + }, + { + "abbreviation": "stl", + "Description": "superior temporal lobe" + }, + { + "abbreviation": "sma", + "Description": "supplementary motor area" + }, + { + "abbreviation": "tois", + "Description": "tracts of interest" + }, + { + "abbreviation": "t", + "Description": "tesla" + }, + { + "abbreviation": "uf", + "Description": "uncinate fasciculus" + }, + { + "abbreviation": "vtois", + "Description": "variable tract of interest" + }, + { + "abbreviation": "abs", + "Description": "atlas based segmentation" + }, + { + "abbreviation": "afd", + "Description": "apparent fibre density" + }, + { + "abbreviation": "ad", + "Description": "axial diffusivity" + }, + { + "abbreviation": "bids", + "Description": "brain imaging data structure" + }, + { + "abbreviation": "lcs", + "Description": "cerebrospinal fluid" + }, + { + "abbreviation": "dodf", + "Description": "diffusion orientation distribution function" + }, + { + "abbreviation": "flair", + "Description": "fluid-attenuated inversion recovery" + }, + { + "abbreviation": "frf", + "Description": "fibre response function" + }, + { + "abbreviation": "rd", + "Description": "radial diffusivity" + }, + { + "abbreviation": "rf", + "Description": "radio frequency" + }, + { + "abbreviation": "scil", + "Description": "sherbrooke connectivity imaging laboratory" + }, + { + "abbreviation": "sp", + "Description": "multiple sclerosis" + }, + { + "abbreviation": "cpu", + "Description": "central processing unit" + }, + { + "abbreviation": "frt", + "Description": "funk-radon transform" + }, + { + "abbreviation": "go", + "Description": "gigabyte" + }, + { + "abbreviation": "gpu", + "Description": "graphical processing unit" + }, + { + "abbreviation": "gru", + "Description": "gated recurrent unit" + }, + { + "abbreviation": "irm", + "Description": "magnetic resonance imaging" + }, + { + "abbreviation": "irmd", + "Description": "diffusion-weighted magnetic resonance imaging" + }, + { + "abbreviation": "lstm", + "Description": "long short-term memory network" + }, + { + "abbreviation": "md", + "Description": "mean diffusivity" + }, + { + "abbreviation": "ram", + "Description": "random access memory" + }, + { + "abbreviation": "rnn", + "Description": "recurrent neural network" + }, + { + "abbreviation": "3d-shore", + "Description": "three dimensional simple harmonic oscillator based reconstruction and estimation model" + }, + { + "abbreviation": "ae", + "Description": "angular error metric" + }, + { + "abbreviation": "cdf", + "Description": "cohen-daubechies-feauveau" + }, + { + "abbreviation": "cdsi", + "Description": "classical diffusion spectrum imaging model" + }, + { + "abbreviation": "cs", + "Description": "compressive sensing" + }, + { + "abbreviation": "csa", + "Description": "constant solid angle q-ball model" + }, + { + "abbreviation": "csd", + "Description": "constrained spherical deconvolution model" + }, + { + "abbreviation": "cv", + "Description": "cross validation" + }, + { + "abbreviation": "ddsi", + "Description": "diffusion spectrum imaging deconvolution model" + }, + { + "abbreviation": "dipy", + "Description": "diffusion in python software" + }, + { + "abbreviation": "dnc", + "Description": "difference in the number of fiber compartments metric" + }, + { + "abbreviation": "dsi", + "Description": "diffusion spectrum imaging model" + }, + { + "abbreviation": "dsi515", + "Description": "classical diffusion spectrum imaging acquisition scheme with 515 samples" + }, + { + "abbreviation": "dsistudio", + "Description": "dsi studio software" + }, + { + "abbreviation": "dti", + "Description": "diffusion tensor imaging model" + }, + { + "abbreviation": "dtk", + "Description": "diffusion toolkit software" + }, + { + "abbreviation": "dtwt", + "Description": "dual tree wavelet transform" + }, + { + "abbreviation": "dw", + "Description": "diffusion weighted" + }, + { + "abbreviation": "dwi", + "Description": "diffusion weighted imaging" + }, + { + "abbreviation": "dwt", + "Description": "discrete wavelet transform" + }, + { + "abbreviation": "fft", + "Description": "fast fourier transform" + }, + { + "abbreviation": "fodf", + "Description": "fiber orientation distribution function" + }, + { + "abbreviation": "ib", + "Description": "invalib bundles metric" + }, + { + "abbreviation": "idft", + "Description": "inverse discrete fourier transform" + }, + { + "abbreviation": "isbi", + "Description": "ieee international symposium on biomedical imaging" + }, + { + "abbreviation": "isbi2013", + "Description": "subset of the dataset from the hardi challenge at the conference isbi2013" + }, + { + "abbreviation": "isbi2013-full", + "Description": "dataset from the hardi challenge at the conference isbi2013" + }, + { + "abbreviation": "mgh-ucla hcp", + "Description": "(massachusetts general hospital - university of california, los angeles) human connectome project" + }, + { + "abbreviation": "nmse", + "Description": "normalized mean square error" + }, + { + "abbreviation": "odsi", + "Description": "optimal diffusion spectrum imaging model" + }, + { + "abbreviation": "pccoeff", + "Description": "pearson correlation coefficient" + }, + { + "abbreviation": "pdsi", + "Description": "plain diffusion spectrum imaging model" + }, + { + "abbreviation": "pgse", + "Description": "pulse-gradient spin-echo" + }, + { + "abbreviation": "qbi", + "Description": "q-ball imaging model" + }, + { + "abbreviation": "rip", + "Description": "restricted isometry property" + }, + { + "abbreviation": "sc", + "Description": "sampling scheme" + }, + { + "abbreviation": "sense", + "Description": "sensitivity encoding algorithm" + }, + { + "abbreviation": "swt", + "Description": "stationary wavelet transform" + }, + { + "abbreviation": "tv", + "Description": "total variation" + }, + { + "abbreviation": "vb", + "Description": "valid bundles metric" + }, + { + "abbreviation": "vccr", + "Description": "valid connections to connection ratio" + }, + { + "abbreviation": "wu-minn hcp", + "Description": "(washington university, university of minnesota, and oxford university) human connectome project" + }, + { + "abbreviation": "2d", + "Description": "two dimensions" + }, + { + "abbreviation": "adc", + "Description": "apparent diffusion coefficient" + }, + { + "abbreviation": "aim", + "Description": "medical imaging axis" + }, + { + "abbreviation": "eeg", + "Description": "electroencephalography" + }, + { + "abbreviation": "chus", + "Description": "centre hospitalier universitaire de sherbrooke" + }, + { + "abbreviation": "cims", + "Description": "centre d\u2019imagerie mol\u00e9culaire de sherbrooke" + }, + { + "abbreviation": "crchus", + "Description": "centre de recherche du centre hospitalier universitaire de sherbrooke" + }, + { + "abbreviation": "fmr", + "Description": "mass-stiffness flow" + }, + { + "abbreviation": "fcm", + "Description": "mean-curvature flow" + }, + { + "abbreviation": "hr", + "Description": "high resolution" + }, + { + "abbreviation": "irmf", + "Description": "functional magnetic resonance imaging" + }, + { + "abbreviation": "meg", + "Description": "magnetoencephalography" + }, + { + "abbreviation": "psf", + "Description": "point spread function" + }, + { + "abbreviation": "roi", + "Description": "regions of interest" + }, + { + "abbreviation": "rgb", + "Description": "red, green and blue" + }, + { + "abbreviation": "rmn", + "Description": "nuclear magnetic resonance" + }, + { + "abbreviation": "sdp", + "Description": "semi-definite positive" + }, + { + "abbreviation": "snr", + "Description": "signal to noise ratio" + }, + { + "abbreviation": "tms", + "Description": "transcranial magnetic stimulation" + }, + { + "abbreviation": "wm", + "Description": "white matter" + }, + { + "abbreviation": "ad", + "Description": "alzheimer\u2019s disease" + }, + { + "abbreviation": "adni", + "Description": "alzheimer\u2019s disease neuroimaging initiative" + }, + { + "abbreviation": "bst", + "Description": "bundle-specific tractography" + }, + { + "abbreviation": "cnn", + "Description": "convolutional neural network" + }, + { + "abbreviation": "csc", + "Description": "continuous map criterion" + }, + { + "abbreviation": "dci", + "Description": "diffusion compartment imaging" + }, + { + "abbreviation": "dki", + "Description": "diffusion kurtosis imaging" + }, + { + "abbreviation": "edp", + "Description": "partial differential equation" + }, + { + "abbreviation": "mact", + "Description": "mesh anatomically-constrained tractography" + }, + { + "abbreviation": "mci", + "Description": "mild cognitive impairment" + }, + { + "abbreviation": "nc", + "Description": "normal control group" + }, + { + "abbreviation": "pft", + "Description": "particle filtering tractography" + }, + { + "abbreviation": "ping", + "Description": "pediatric imaging, neurocognition, and genetics" + }, + { + "abbreviation": "pve", + "Description": "partial volume effect" + }, + { + "abbreviation": "se", + "Description": "special euclidean group" + }, + { + "abbreviation": "sc", + "Description": "subcortical structures" + }, + { + "abbreviation": "sf", + "Description": "spherical function" + }, + { + "abbreviation": "tod", + "Description": "tract orientation density" + }, + { + "abbreviation": "act", + "Description": "anatomically constrained tractography" + }, + { + "abbreviation": "ad", + "Description": "alzheimer's disease" + }, + { + "abbreviation": "adni", + "Description": "alzheimer's disease neuroimaging initiative" + }, + { + "abbreviation": "ba", + "Description": "bundle adjacency" + }, + { + "abbreviation": "balsa", + "Description": "brain analysis library of spatial maps and atlases" + }, + { + "abbreviation": "boi", + "Description": "bundle of interest" + }, + { + "abbreviation": "clarity", + "Description": "clear lipid-exchanged acrylamide-hybridized rigid imaging / immunostaining / in situ-hybridization-compatible tissue hydrogel" + }, + { + "abbreviation": "cmc", + "Description": "continuous map criterion" + }, + { + "abbreviation": "cpu", + "Description": "central processing unit" + }, + { + "abbreviation": "dsc", + "Description": "dice score coefficient" + }, + { + "abbreviation": "eadc", + "Description": "european alzheimer\u2019s disease consortium" + }, + { + "abbreviation": "fat", + "Description": "fronto aslant tract" + }, + { + "abbreviation": "frf", + "Description": "fiber response function" + }, + { + "abbreviation": "efod", + "Description": "enhanced fiber orientation distribution" + }, + { + "abbreviation": "harp", + "Description": "harmonized hippocampal protocol" + }, + { + "abbreviation": "hc", + "Description": "healthy control" + }, + { + "abbreviation": "hcp", + "Description": "human connectome project" + }, + { + "abbreviation": "ifof", + "Description": "inferior fronto-occipital fasciculus" + }, + { + "abbreviation": "iqr", + "Description": "interquartile range" + }, + { + "abbreviation": "mri", + "Description": "magnetic resonance imaging" + }, + { + "abbreviation": "irm", + "Description": "magnetic resonance imaging" + }, + { + "abbreviation": "dmri", + "Description": "diffusion magnetic resonance imaging" + }, + { + "abbreviation": "irmd", + "Description": "diffusion magnetic resonance imaging" + }, + { + "abbreviation": "fmri", + "Description": "functional magnetic resonance imaging" + }, + { + "abbreviation": "irmf", + "Description": "functional magnetic resonance imaging" + }, + { + "abbreviation": "mdf", + "Description": "minimal direct-flip distance" + }, + { + "abbreviation": "ml", + "Description": "machine learning" + }, + { + "abbreviation": "oct", + "Description": "optical coherence tomography" + }, + { + "abbreviation": "pft", + "Description": "particle filter tractography" + }, + { + "abbreviation": "pve", + "Description": "partial volume effect" + }, + { + "abbreviation": "pyt", + "Description": "pyramidal tract" + }, + { + "abbreviation": "qb", + "Description": "quickbundles" + }, + { + "abbreviation": "ram", + "Description": "random access memory" + }, + { + "abbreviation": "rb(x)", + "Description": "recobundles(x)" + }, + { + "abbreviation": "mci", + "Description": "mild cognitive impairment" + }, + { + "abbreviation": "nmr", + "Description": "nuclear magnetic resonance" + }, + { + "abbreviation": "set", + "Description": "surface enhanced tractography" + }, + { + "abbreviation": "sfof", + "Description": "superior fronto-occipital fasciculus" + }, + { + "abbreviation": "slr", + "Description": "streamlines linear registration" + }, + { + "abbreviation": "tdi", + "Description": "tract density imaging" + }, + { + "abbreviation": "todi", + "Description": "tract orientation density imaging" + } + ] +} \ No newline at end of file From 0d421906f10ecd015ddf4d7f07a131fd52c2ca88 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Thu, 1 Aug 2024 20:19:01 -0400 Subject: [PATCH 39/69] change vocabulary json file's path and adapt the scripts to that --- .../utils => data}/Vocabulary/Vocabulary.json | 0 scilpy/utils/Vocabulary/Keywords.json | 1061 -------------- scilpy/utils/Vocabulary/Synonyms.json | 60 - scilpy/utils/Vocabulary/acronyms.json | 1298 ----------------- scilpy/utils/scilpy_bot.py | 8 +- scripts/scil_search_keywords.py | 16 +- 6 files changed, 10 insertions(+), 2433 deletions(-) rename {scilpy/utils => data}/Vocabulary/Vocabulary.json (100%) delete mode 100644 scilpy/utils/Vocabulary/Keywords.json delete mode 100644 scilpy/utils/Vocabulary/Synonyms.json delete mode 100644 scilpy/utils/Vocabulary/acronyms.json diff --git a/scilpy/utils/Vocabulary/Vocabulary.json b/data/Vocabulary/Vocabulary.json similarity index 100% rename from scilpy/utils/Vocabulary/Vocabulary.json rename to data/Vocabulary/Vocabulary.json diff --git a/scilpy/utils/Vocabulary/Keywords.json b/scilpy/utils/Vocabulary/Keywords.json deleted file mode 100644 index 98875e3bd..000000000 --- a/scilpy/utils/Vocabulary/Keywords.json +++ /dev/null @@ -1,1061 +0,0 @@ -{ - "scripts": [ - { - "name": "scil_bids_validate.py", - "keywords": [] - }, - { - "name": "scil_bingham_metrics.py", - "keywords": [ - "fiber density", - "fiber spread", - "fiber fraction", - "fixel" - ] - }, - { - "name": "scil_btensor_metrics.py", - "keywords": [ - "b-tensor", - "b-tensor encoding", - "tensor-valued diffusion MRI", - "micro-FA", - "uFA", - "order parameter", - "OP", - "DIVIDE", - "microstructure", - "linear tensor encoding (LTE)", - "planar tensor encoding (PTE)", - "spherical tensor encoding (STE)", - "multidimensional diffusion MRI" - ] - }, - { - "name": "scil_bundle_clean_qbx_clusters.py", - "keywords": [] - }, - { - "name": "scil_bundle_compute_centroid.py", - "keywords": [] - }, - { - "name": "scil_bundle_compute_endpoints_map.py", - "keywords": [] - }, - { - "name": "scil_bundle_diameter.py", - "keywords": [] - }, - { - "name": "scil_bundle_filter_by_occurence.py", - "keywords": [] - }, - { - "name": "scil_bundle_generate_priors.py", - "keywords": [] - }, - { - "name": "scil_bundle_label_map.py", - "keywords": [ - "parcellate", - "subdivide", - "split" - ] - }, - { - "name": "scil_bundle_mean_fixel_afd_from_hdf5.py", - "keywords": [] - }, - { - "name": "scil_bundle_mean_fixel_afd.py", - "keywords": [] - }, - { - "name": "scil_bundle_mean_fixel_bingham_metric.py", - "keywords": [ - "tractometry", - "lobe metrics", - "fiber density", - "fiber spread", - "fiber fraction", - "mean along bundle" - ] - }, - { - "name": "scil_bundle_mean_std.py", - "keywords": [] - }, - { - "name": "scil_bundle_pairwise_comparison.py", - "keywords": [] - }, - { - "name": "scil_bundle_reject_outliers.py", - "keywords": [] - }, - { - "name": "scil_bundle_score_many_bundles_one_tractogram.py", - "keywords": [] - }, - { - "name": "scil_bundle_score_same_bundle_many_segmentations.py", - "keywords": [] - }, - { - "name": "scil_bundle_shape_measures.py", - "keywords": [ - "geometry" - ] - }, - { - "name": "scil_bundle_uniformize_endpoints.py", - "keywords": [] - }, - { - "name": "scil_bundle_volume_per_label.py", - "keywords": [] - }, - { - "name": "scil_connectivity_compare_populations.py", - "keywords": [] - }, - { - "name": "scil_connectivity_compute_matrices.py", - "keywords": [] - }, - { - "name": "scil_connectivity_compute_pca.py", - "keywords": [] - }, - { - "name": "scil_connectivity_filter.py", - "keywords": [] - }, - { - "name": "scil_connectivity_graph_measures.py", - "keywords": [] - }, - { - "name": "scil_connectivity_hdf5_average_density_map.py", - "keywords": [] - }, - { - "name": "scil_connectivity_math.py", - "keywords": [] - }, - { - "name": "scil_connectivity_normalize.py", - "keywords": [] - }, - { - "name": "scil_connectivity_pairwise_agreement.py", - "keywords": [] - }, - { - "name": "scil_connectivity_print_filenames.py", - "keywords": [] - }, - { - "name": "scil_connectivity_reorder_rois.py", - "keywords": [] - }, - { - "name": "scil_denoising_nlmeans.py", - "keywords": [] - }, - { - "name": "scil_dki_metrics.py", - "keywords": [] - }, - { - "name": "scil_dti_convert_tensors.py", - "keywords": [ - "tensor", - "tensors", - "triangular matrix", - "fsl format", - "nifti format", - "mrtrix format", - "dipy format" - ] - }, - { - "name": "scil_dti_metrics.py", - "keywords": [ - "dti", - "metrics", - "diffusion tensor", - "FA", - "MD", - "AD", - "RD", - "RGB", - "eigenvector", - "eigenvalue", - "diffusivity" - ] - }, - { - "name": "scil_dwi_apply_bias_field.py", - "keywords": [] - }, - { - "name": "scil_dwi_compute_snr.py", - "keywords": [] - }, - { - "name": "scil_dwi_concatenate.py", - "keywords": [ - "merge", - "fuse", - "concatenate", - "diffusion data", - "DWI" - ] - }, - { - "name": "scil_dwi_convert_FDF.py", - "keywords": [] - }, - { - "name": "scil_dwi_detect_volume_outliers.py", - "keywords": [] - }, - { - "name": "scil_dwi_extract_b0.py", - "keywords": [ - "b0 extraction", - "b0", - "b-value 0", - "extract" - ] - }, - { - "name": "scil_dwi_extract_shell.py", - "keywords": [ - "shell extraction", - "b-value extraction", - "DWI", - "shell/b-value selection", - "extract", - "DWI split", - "DWI splitting", - "multiple shells" - ] - }, - { - "name": "scil_dwi_powder_average.py", - "keywords": [ - "powder average", - "DWI" - ] - }, - { - "name": "scil_dwi_prepare_eddy_command.py", - "keywords": [] - }, - { - "name": "scil_dwi_prepare_topup_command.py", - "keywords": [] - }, - { - "name": "scil_dwi_reorder_philips.py", - "keywords": [ - "Philips", - "DWI reorder", - "original gradient table" - ] - }, - { - "name": "scil_dwi_split_by_indices.py", - "keywords": [ - "DWI splitting", - "DWI split", - "indices" - ] - }, - { - "name": "scil_dwi_to_sh.py", - "keywords": [ - "signal", - "spherical harmonics" - ] - }, - { - "name": "scil_fodf_max_in_ventricles.py", - "keywords": [ - "ventricles", - "maximum fODF", - "absolute threshold" - ] - }, - { - "name": "scil_fodf_memsmt.py", - "keywords": [ - "b-tensor", - "b-tensor encoding", - "multi-encoding", - "multi-shell", - "multi-tissue", - "memsmt", - "linear tensor encoding (LTE)", - "planar tensor encoding (PTE)", - "spherical tensor encoding (STE)", - "multidimensional diffusion MRI", - "volume fraction", - "CSD", - "constrained spherical deconvolution", - "fODF" - ] - }, - { - "name": "scil_fodf_metrics.py", - "keywords": [ - "fODF metrics", - "NuFO", - "peaks", - "directions", - "peak values", - "peak indices", - "rgb", - "afd" - ] - }, - { - "name": "scil_fodf_msmt.py", - "keywords": [ - "CSD", - "constrained spherical deconvolution", - "multi-shell", - "multi-tissue", - "msmt", - "volume fraction", - "fODF" - ] - }, - { - "name": "scil_fodf_ssst.py", - "keywords": [ - "CSD", - "constrained spherical deconvolution", - "single-shell", - "single-tissue", - "ssst", - "fODF" - ] - }, - { - "name": "scil_fodf_to_bingham.py", - "keywords": [ - "lobe", - "lobe-specific", - "bingham-odf" - ] - }, - { - "name": "scil_freewater_maps.py", - "keywords": [] - }, - { - "name": "scil_freewater_priors.py", - "keywords": [] - }, - { - "name": "scil_frf_mean.py", - "keywords": [ - "fiber response function", - "response function", - "RF", - "FRF", - "mean", - "mean FRF" - ] - }, - { - "name": "scil_frf_memsmt.py", - "keywords": [ - "fiber response function", - "response function", - "RF", - "FRF", - "b-tensor", - "b-tensor encoding", - "multi-encoding", - "multi-shell", - "multi-tissue", - "memsmt", - "linear tensor encoding (LTE)", - "planar tensor encoding (PTE)", - "spherical tensor encoding (STE)", - "multidimensional diffusion MRI" - ] - }, - { - "name": "scil_frf_msmt.py", - "keywords": [ - "fiber response function", - "response function", - "RF", - "FRF", - "multi-shell", - "multi-tissue", - "msmt" - ] - }, - { - "name": "scil_frf_set_diffusivities.py", - "keywords": [ - "fiber response function", - "response function", - "RF", - "FRF", - "diffusivity", - "diffusivities", - "fixed FRF" - ] - }, - { - "name": "scil_frf_ssst.py", - "keywords": [ - "fiber response function", - "response function", - "RF", - "FRF", - "single-shell", - "single-tissue", - "ssst" - ] - }, - { - "name": "scil_get_version.py", - "keywords": [] - }, - { - "name": "scil_gradients_apply_transform.py", - "keywords": [ - "gradients", - "bvecs", - "b-vectors", - "transformation", - "transform" - ] - }, - { - "name": "scil_gradients_convert.py", - "keywords": [ - "gradients", - "gradient table", - "fsl format", - "mrtrix format", - "bval", - "bvec" - ] - }, - { - "name": "scil_gradients_generate_sampling.py", - "keywords": [ - "gradients", - "gradient table", - "sampling scheme", - "sampling", - "hardi", - "multi-shell", - "caruyer", - "optimized gradients" - ] - }, - { - "name": "scil_gradients_modify_axes.py", - "keywords": [] - }, - { - "name": "scil_gradients_round_bvals.py", - "keywords": [ - "bvals", - "b-value", - "round bvals", - "shell" - ] - }, - { - "name": "scil_gradients_validate_correct_eddy.py", - "keywords": [] - }, - { - "name": "scil_gradients_validate_correct.py", - "keywords": [ - "fiber coherence index", - "coherence" - ] - }, - { - "name": "scil_header_print_info.py", - "keywords": [] - }, - { - "name": "scil_header_validate_compatibility.py", - "keywords": [] - }, - { - "name": "scil_json_convert_entries_to_xlsx.py", - "keywords": [] - }, - { - "name": "scil_json_harmonize_entries.py", - "keywords": [] - }, - { - "name": "scil_json_merge_entries.py", - "keywords": [] - }, - { - "name": "scil_labels_combine.py", - "keywords": [] - }, - { - "name": "scil_labels_dilate.py", - "keywords": [] - }, - { - "name": "scil_labels_remove.py", - "keywords": [] - }, - { - "name": "scil_labels_split_volume_by_ids.py", - "keywords": [] - }, - { - "name": "scil_labels_split_volume_from_lut.py", - "keywords": [] - }, - { - "name": "scil_lesions_info.py", - "keywords": [] - }, - { - "name": "scil_mti_adjust_B1_header.py", - "keywords": [ - "MTI", - "magnetization transfer", - "MT", - "B1 map", - "header", - "B1" - ] - }, - { - "name": "scil_mti_maps_ihMT.py", - "keywords": [ - "MTI", - "magnetization transfer", - "MT", - "ihMT", - "ihMTR", - "ihMTsat", - "myelin", - "MTR", - "MTsat" - ] - }, - { - "name": "scil_mti_maps_MT.py", - "keywords": [ - "MTI", - "magnetization transfer", - "MT", - "MTR", - "MTsat", - "myelin" - ] - }, - { - "name": "scil_NODDI_maps.py", - "keywords": [] - }, - { - "name": "scil_NODDI_priors.py", - "keywords": [] - }, - { - "name": "scil_plot_stats_per_point.py", - "keywords": [] - }, - { - "name": "scil_qball_metrics.py", - "keywords": [ - "CSA", - "QBI", - "q-ball imaging", - "diffusion odf" - ] - }, - { - "name": "scil_rgb_convert.py", - "keywords": [] - }, - { - "name": "scil_sh_convert.py", - "keywords": [ - "spherical harmonics", - "tournier", - "mrtrix", - "descoteaux", - "dipy", - "modern", - "legacy" - ] - }, - { - "name": "scil_sh_fusion.py", - "keywords": [ - "spherical harmonics", - "SH", - "fusion", - "largest magnitude", - "merge", - "coefficients" - ] - }, - { - "name": "scil_sh_to_aodf.py", - "keywords": [ - "asymmetric", - "asymmetries", - "filtering", - "full basis" - ] - }, - { - "name": "scil_sh_to_rish.py", - "keywords": [ - "rotation invariant spherical harmonics", - "features" - ] - }, - { - "name": "scil_sh_to_sf.py", - "keywords": [ - "spherical harmonics", - "spherical functions", - "SH", - "SF", - "convertion", - "conversion" - ] - }, - { - "name": "scil_stats_group_comparison.py", - "keywords": [] - }, - { - "name": "scil_surface_apply_transform.py", - "keywords": [ - "registration", - "warp", - "transformation", - "surface", - "mesh", - "vtk FreeSurfer" - ] - }, - { - "name": "scil_surface_convert.py", - "keywords": [ - "surface", - "mesh", - "vtk FreeSurfer" - ] - }, - { - "name": "scil_surface_flip.py", - "keywords": [ - "surface", - "mesh", - "vtk FreeSurfer" - ] - }, - { - "name": "scil_surface_smooth.py", - "keywords": [ - "surface", - "mesh", - "vtk FreeSurfer" - ] - }, - { - "name": "scil_tracking_local_dev.py", - "keywords": [ - "development", - "runge-kutta", - "pure-python", - "onboarding", - "tractography", - "dipy" - ] - }, - { - "name": "scil_tracking_local.py", - "keywords": [ - "eudx", - "tractography", - "tracking", - "peak tracking", - "local tracking", - "probabilistic", - "deterministic", - "prob", - "det" - ] - }, - { - "name": "scil_tracking_pft_maps_edit.py", - "keywords": [ - "particule filtering tractography", - "cmc" - ] - }, - { - "name": "scil_tracking_pft_maps.py", - "keywords": [ - "particle filter tractography", - "continuous map criterion", - "tracking", - "fodf", - "cmc", - "particle filtering tractography" - ] - }, - { - "name": "scil_tracking_pft.py", - "keywords": [ - "particle filter tractography", - "continuous map criterion", - "tracking", - "fodf" - ] - }, - { - "name": "scil_tractogram_alter.py", - "keywords": [] - }, - { - "name": "scil_tractogram_apply_transform.py", - "keywords": [ - "ants", - "registration", - "affine", - "linear", - "nonlinear" - ] - }, - { - "name": "scil_tractogram_apply_transform_to_hdf5.py", - "keywords": [] - }, - { - "name": "scil_tractogram_assign_custom_color.py", - "keywords": [] - }, - { - "name": "scil_tractogram_assign_uniform_color.py", - "keywords": [] - }, - { - "name": "scil_tractogram_commit.py", - "keywords": [ - "microstructure informed", - "filtering", - "mit" - ] - }, - { - "name": "scil_tractogram_compress.py", - "keywords": [] - }, - { - "name": "scil_tractogram_compute_density_map.py", - "keywords": [ - "TDI", - "track density imaging", - "streamline count" - ] - }, - { - "name": "scil_tractogram_compute_TODI.py", - "keywords": [ - "track orientation density imaging", - "track density imaging", - "TDI" - ] - }, - { - "name": "scil_tractogram_convert_hdf5_to_trk.py", - "keywords": [] - }, - { - "name": "scil_tractogram_convert.py", - "keywords": [] - }, - { - "name": "scil_tractogram_count_streamlines.py", - "keywords": [] - }, - { - "name": "scil_tractogram_cut_streamlines.py", - "keywords": [] - }, - { - "name": "scil_tractogram_detect_loops.py", - "keywords": [] - }, - { - "name": "scil_tractogram_dpp_math.py", - "keywords": [ - "tractogram", - "data per point" - ] - }, - { - "name": "scil_tractogram_extract_ushape.py", - "keywords": [] - }, - { - "name": "scil_tractogram_filter_by_anatomy.py", - "keywords": [] - }, - { - "name": "scil_tractogram_filter_by_length.py", - "keywords": [] - }, - { - "name": "scil_tractogram_filter_by_orientation.py", - "keywords": [] - }, - { - "name": "scil_tractogram_filter_by_roi.py", - "keywords": [ - "segment", - "atlas" - ] - }, - { - "name": "scil_tractogram_fix_trk.py", - "keywords": [] - }, - { - "name": "scil_tractogram_flip.py", - "keywords": [] - }, - { - "name": "scil_tractogram_math.py", - "keywords": [] - }, - { - "name": "scil_tractogram_pairwise_comparison.py", - "keywords": [] - }, - { - "name": "scil_tractogram_print_info.py", - "keywords": [] - }, - { - "name": "scil_tractogram_project_map_to_streamlines.py", - "keywords": [] - }, - { - "name": "scil_tractogram_project_streamlines_to_map.py", - "keywords": [] - }, - { - "name": "scil_tractogram_qbx.py", - "keywords": [ - "clustering" - ] - }, - { - "name": "scil_tractogram_register.py", - "keywords": [] - }, - { - "name": "scil_tractogram_remove_invalid.py", - "keywords": [] - }, - { - "name": "scil_tractogram_resample_nb_points.py", - "keywords": [] - }, - { - "name": "scil_tractogram_resample.py", - "keywords": [] - }, - { - "name": "scil_tractogram_seed_density_map.py", - "keywords": [] - }, - { - "name": "scil_tractogram_segment_and_score.py", - "keywords": [] - }, - { - "name": "scil_tractogram_segment_bundles_for_connectivity.py", - "keywords": [] - }, - { - "name": "scil_tractogram_segment_bundles.py", - "keywords": [] - }, - { - "name": "scil_tractogram_segment_one_bundle.py", - "keywords": [] - }, - { - "name": "scil_tractogram_shuffle.py", - "keywords": [] - }, - { - "name": "scil_tractogram_smooth.py", - "keywords": [] - }, - { - "name": "scil_tractogram_split.py", - "keywords": [] - }, - { - "name": "scil_viz_bingham_fit.py", - "keywords": [ - "visualisation", - "bingham distributions", - "bingham odf" - ] - }, - { - "name": "scil_viz_bundle.py", - "keywords": [ - "visualisation", - "bundle", - "tractogram", - "streamlines" - ] - }, - { - "name": "scil_viz_bundle_screenshot_mni.py", - "keywords": [] - }, - { - "name": "scil_viz_bundle_screenshot_mosaic.py", - "keywords": [] - }, - { - "name": "scil_viz_connectivity.py", - "keywords": [] - }, - { - "name": "scil_viz_dti_screenshot.py", - "keywords": [] - }, - { - "name": "scil_viz_fodf.py", - "keywords": [ - "visualize", - "fiber odf", - "odf", - "sh", - "peaks", - "background" - ] - }, - { - "name": "scil_viz_gradients_screenshot.py", - "keywords": [] - }, - { - "name": "scil_viz_tractogram_seeds_3d.py", - "keywords": [ - "visualize", - "seed", - "density", - "3D", - "seed density" - ] - }, - { - "name": "scil_viz_tractogram_seeds.py", - "keywords": [ - "visualize", - "seed", - "streamline", - "streamline origin" - ] - }, - { - "name": "scil_viz_volume_histogram.py", - "keywords": [ - "visualize", - "histogram", - "metric" - ] - }, - { - "name": "scil_viz_volume_scatterplot.py", - "keywords": [ - "visualize", - "scatterplot", - "distribution", - "metric" - ] - }, - { - "name": "scil_viz_volume_screenshot_mosaic.py", - "keywords": [] - }, - { - "name": "scil_viz_volume_screenshot.py", - "keywords": [] - }, - { - "name": "scil_volume_apply_transform.py", - "keywords": [] - }, - { - "name": "scil_volume_b0_synthesis.py", - "keywords": [] - }, - { - "name": "scil_volume_count_non_zero_voxels.py", - "keywords": [] - }, - { - "name": "scil_volume_crop.py", - "keywords": [] - }, - { - "name": "scil_volume_flip.py", - "keywords": [] - }, - { - "name": "scil_volume_math.py", - "keywords": [] - }, - { - "name": "scil_volume_remove_outliers_ransac.py", - "keywords": [] - }, - { - "name": "scil_volume_resample.py", - "keywords": [] - }, - { - "name": "scil_volume_reshape_to_reference.py", - "keywords": [] - }, - { - "name": "scil_volume_stats_in_labels.py", - "keywords": [] - }, - { - "name": "scil_volume_stats_in_ROI.py", - "keywords": [] - } - ] -} \ No newline at end of file diff --git a/scilpy/utils/Vocabulary/Synonyms.json b/scilpy/utils/Vocabulary/Synonyms.json deleted file mode 100644 index ab34aab8c..000000000 --- a/scilpy/utils/Vocabulary/Synonyms.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "synonyms": [ - [ - "Bundle", - "tract", - "pathway", - "fasciculus" - ], - [ - "multi-shells", - "multishell", - "multi shell", - "msmt" - ], - [ - "SH", - "Spherical Harmonics" - ], - - [ - "single-shell", - "single shell", - "ssst" - ], - [ - "ODF", - "Orientation Distribution Function" - ], - [ - "DWI", - "Diffusion Weighted Imaging" - ], - [ - "shell", - "bval", - "b-value", - "bvals" - ], - [ - "b-tensor encoding", - "tensor-valued" - ], - [ - "surface", - "mesh" - ], - [ - "merge", - "fuse", - "concatenate", - "add" - ], - [ - "parcellate", - "subdivide", - "split", - "divide" - ] - ] - } \ No newline at end of file diff --git a/scilpy/utils/Vocabulary/acronyms.json b/scilpy/utils/Vocabulary/acronyms.json deleted file mode 100644 index ed35b9834..000000000 --- a/scilpy/utils/Vocabulary/acronyms.json +++ /dev/null @@ -1,1298 +0,0 @@ -[ - { - "abbreviation": "k-nn", - "Description": "k-nearest neighbors" - }, - { - "abbreviation": "1d", - "Description": "one-dimensional" - }, - { - "abbreviation": "2d", - "Description": "two-dimensional" - }, - { - "abbreviation": "3d", - "Description": "three-dimensional" - }, - { - "abbreviation": "ac", - "Description": "anterior commissure" - }, - { - "abbreviation": "ae", - "Description": "autoencoder" - }, - { - "abbreviation": "af", - "Description": "arcuate fascicle" - }, - { - "abbreviation": "ai", - "Description": "artificial intelligence" - }, - { - "abbreviation": "ann", - "Description": "artificial neural network" - }, - { - "abbreviation": "ar", - "Description": "acoustic radiation" - }, - { - "abbreviation": "atr", - "Description": "anterior thalamic radiation" - }, - { - "abbreviation": "cc", - "Description": "corpus callosum" - }, - { - "abbreviation": "cing", - "Description": "cingulum" - }, - { - "abbreviation": "cinta", - "Description": "clustering in tractography using autoencoders" - }, - { - "abbreviation": "cnn", - "Description": "convolutional neural network" - }, - { - "abbreviation": "csd", - "Description": "constrained spherical deconvolution" - }, - { - "abbreviation": "csf", - "Description": "cerebrospinal fluid" - }, - { - "abbreviation": "cst", - "Description": "corticospinal tract" - }, - { - "abbreviation": "dl", - "Description": "deep learning" - }, - { - "abbreviation": "dmri", - "Description": "diffusion magnetic resonance imaging" - }, - { - "abbreviation": "dodf", - "Description": "diffusion orientation distribution function" - }, - { - "abbreviation": "dt", - "Description": "diffusion tensor" - }, - { - "abbreviation": "dti", - "Description": "diffusion tensor imaging" - }, - { - "abbreviation": "dw-mri", - "Description": "diffusion-weighted magnetic resonance imaging" - }, - { - "abbreviation": "dwi", - "Description": "diffusion-weighted imaging" - }, - { - "abbreviation": "dwm", - "Description": "deep white matter" - }, - { - "abbreviation": "eap", - "Description": "ensemble average (diffusion) propagator" - }, - { - "abbreviation": "fa", - "Description": "fractional anisotropy" - }, - { - "abbreviation": "fat", - "Description": "frontal aslant tract" - }, - { - "abbreviation": "fc", - "Description": "fully connected" - }, - { - "abbreviation": "finta", - "Description": "filtering in tractography using autoencoders" - }, - { - "abbreviation": "fmri", - "Description": "functional magnetic resonance imaging" - }, - { - "abbreviation": "fod", - "Description": "fiber orientation distribution" - }, - { - "abbreviation": "fodf", - "Description": "fiber orientation distribution function" - }, - { - "abbreviation": "ft", - "Description": "fourier transform" - }, - { - "abbreviation": "fx", - "Description": "fornix" - }, - { - "abbreviation": "ge", - "Description": "gradient echo" - }, - { - "abbreviation": "gesta", - "Description": "generative sampling in bundle tractography using autoencoders" - }, - { - "abbreviation": "gm", - "Description": "gray matter" - }, - { - "abbreviation": "hardi", - "Description": "high angular resolution diffusion imaging" - }, - { - "abbreviation": "ic", - "Description": "internal capsule" - }, - { - "abbreviation": "icp", - "Description": "inferior cerebellar peduncle" - }, - { - "abbreviation": "ifof", - "Description": "inferior fronto-occipital fascicle" - }, - { - "abbreviation": "ils", - "Description": "inferior longitudinal system" - }, - { - "abbreviation": "jif", - "Description": "journal impact factor" - }, - { - "abbreviation": "mcp", - "Description": "middle cerebellar peduncle" - }, - { - "abbreviation": "ml", - "Description": "machine learning" - }, - { - "abbreviation": "mlp", - "Description": "multilayer perceptron" - }, - { - "abbreviation": "mls", - "Description": "middle longitudinal system" - }, - { - "abbreviation": "mr", - "Description": "magnetic resonance" - }, - { - "abbreviation": "mri", - "Description": "magnetic resonance imaging" - }, - { - "abbreviation": "nn", - "Description": "neural network" - }, - { - "abbreviation": "nos", - "Description": "number of streamlines" - }, - { - "abbreviation": "odf", - "Description": "orientation distribution function (also referred to as orientation density function)" - }, - { - "abbreviation": "or", - "Description": "optic radiation" - }, - { - "abbreviation": "pc", - "Description": "posterior commissure" - }, - { - "abbreviation": "pca", - "Description": "principal component analysis" - }, - { - "abbreviation": "pdf", - "Description": "probability density function" - }, - { - "abbreviation": "pgse", - "Description": "pulsed-gradient spin echo" - }, - { - "abbreviation": "pli", - "Description": "polarized light imaging" - }, - { - "abbreviation": "popt", - "Description": "parieto-occipital pontine tract" - }, - { - "abbreviation": "ps-oct", - "Description": "polarization-sensitive optical coherence tomography" - }, - { - "abbreviation": "rf", - "Description": "radio frequency" - }, - { - "abbreviation": "rnn", - "Description": "recurrent neural network" - }, - { - "abbreviation": "roc", - "Description": "receiver operating characteristic" - }, - { - "abbreviation": "scp", - "Description": "superior cerebellar peduncle" - }, - { - "abbreviation": "sd", - "Description": "spherical deconvolution" - }, - { - "abbreviation": "se", - "Description": "spin echo" - }, - { - "abbreviation": "set", - "Description": "surface-enhanced tractography" - }, - { - "abbreviation": "sls", - "Description": "superior longitudinal system" - }, - { - "abbreviation": "smri", - "Description": "structural magnetic resonance imaging" - }, - { - "abbreviation": "swm", - "Description": "superficial white matter" - }, - { - "abbreviation": "t1-w", - "Description": "t1-weighted image" - }, - { - "abbreviation": "te", - "Description": "echo time" - }, - { - "abbreviation": "tr", - "Description": "repetition time" - }, - { - "abbreviation": "uf", - "Description": "uncinate fascicle" - }, - { - "abbreviation": "vae", - "Description": "variational autoencoder" - }, - { - "abbreviation": "wm", - "Description": "white matter" - }, - { - "abbreviation": "3d", - "Description": "three dimensions" - }, - { - "abbreviation": "4d", - "Description": "four dimensions" - }, - { - "abbreviation": "act", - "Description": "anatomically-constrained tractography" - }, - { - "abbreviation": "amico", - "Description": "accelerated microstructure imaging via convex optimization" - }, - { - "abbreviation": "apm", - "Description": "average pathlength map" - }, - { - "abbreviation": "bet", - "Description": "brain extraction tool" - }, - { - "abbreviation": "cdmri", - "Description": "computational diffusion mri" - }, - { - "abbreviation": "cg", - "Description": "cingulum" - }, - { - "abbreviation": "cmc", - "Description": "continuous maps criterion" - }, - { - "abbreviation": "commit", - "Description": "convex optimization modeling for microstructure informed tractography" - }, - { - "abbreviation": "csa", - "Description": "constant solid-angle" - }, - { - "abbreviation": "csf/lcs/lcr", - "Description": "cerebrospinal fluid" - }, - { - "abbreviation": "c-dec", - "Description": "connectivity directionally-encoded color" - }, - { - "abbreviation": "dec", - "Description": "directionally-encoded color" - }, - { - "abbreviation": "dwi", - "Description": "diffusion-weighted imaging" - }, - { - "abbreviation": "dmri", - "Description": "diffusion-weighted imaging" - }, - { - "abbreviation": "irmd", - "Description": "diffusion-weighted imaging" - }, - { - "abbreviation": "eap", - "Description": "ensemble average propagator" - }, - { - "abbreviation": "epi", - "Description": "echo-planar imaging" - }, - { - "abbreviation": "fast", - "Description": "fmrib\u2019s automated segmentation tool" - }, - { - "abbreviation": "flirt", - "Description": "fmrib\u2019s linear image registration tool" - }, - { - "abbreviation": "fmt", - "Description": "fast marching tractography" - }, - { - "abbreviation": "fsl", - "Description": "fmrib software library" - }, - { - "abbreviation": "grappa", - "Description": "generalized autocalibrating partially parallel acquisition" - }, - { - "abbreviation": "ifof", - "Description": "inferior fronto-occipital fasciculus" - }, - { - "abbreviation": "ipmi", - "Description": "information processing in medical imaging" - }, - { - "abbreviation": "ismrm", - "Description": "international society for magnetic resonance in medicine" - }, - { - "abbreviation": "miccai", - "Description": "medical image computing and computer assisted intervention" - }, - { - "abbreviation": "mprage", - "Description": "magnetization-prepared rapid acquisition with gradient-echo" - }, - { - "abbreviation": "irm", - "Description": "magnetic resonance imaging" - }, - { - "abbreviation": "mri", - "Description": "magnetic resonance imaging" - }, - { - "abbreviation": "odf", - "Description": "orientation distribution function" - }, - { - "abbreviation": "ohbm", - "Description": "organization for human brain mapping" - }, - { - "abbreviation": "pve", - "Description": "partial volume estimation" - }, - { - "abbreviation": "roi", - "Description": "region of interest" - }, - { - "abbreviation": "rtt", - "Description": "real-time tractography" - }, - { - "abbreviation": "sh", - "Description": "spherical harmonics" - }, - { - "abbreviation": "slf", - "Description": "superior longitudinal fasciculus" - }, - { - "abbreviation": "snr", - "Description": "signal-to-noise ratio" - }, - { - "abbreviation": "twi", - "Description": "track-weighted imaging" - }, - { - "abbreviation": "voi", - "Description": "volume of interest" - }, - { - "abbreviation": "ats", - "Description": "anterior traverse system" - }, - { - "abbreviation": "a123", - "Description": "area 1/2/3 (upper limb, head, and face region)" - }, - { - "abbreviation": "a4hf", - "Description": "area 4 (head and face region)" - }, - { - "abbreviation": "a4ul", - "Description": "area 4 (upper limb region)" - }, - { - "abbreviation": "a46", - "Description": "area 46" - }, - { - "abbreviation": "af", - "Description": "arcuate fasciculus" - }, - { - "abbreviation": "bls", - "Description": "basal longitudinal system" - }, - { - "abbreviation": "ca39", - "Description": "caudal area 39" - }, - { - "abbreviation": "cdl", - "Description": "caudal dorsolateral area" - }, - { - "abbreviation": "cvl", - "Description": "caudal ventrolateral area" - }, - { - "abbreviation": "cdl", - "Description": "caudolateral of area " - }, - { - "abbreviation": "csf", - "Description": "cerebral spinal fluid" - }, - { - "abbreviation": "ctoi", - "Description": "conservative tract of interest" - }, - { - "abbreviation": "da9/36", - "Description": "dorsal area 9/46" - }, - { - "abbreviation": "ddi", - "Description": "dorsal dysgranular insula" - }, - { - "abbreviation": "dl6", - "Description": "dorsolateral area 6" - }, - { - "abbreviation": "dl37", - "Description": "dorsolateral area 37 region" - }, - { - "abbreviation": "efc", - "Description": "extreme/external capsule fibre system" - }, - { - "abbreviation": "fodfs", - "Description": "fibre orientation distribution functions" - }, - { - "abbreviation": "fus", - "Description": "fusiform gyrus" - }, - { - "abbreviation": "hcp", - "Description": "human connectome project" - }, - { - "abbreviation": "ifg", - "Description": "inferior frontal gyrus" - }, - { - "abbreviation": "ils ", - "Description": "inferior longitudinal system " - }, - { - "abbreviation": "ipl", - "Description": "inferior parietal lobe" - }, - { - "abbreviation": "itg", - "Description": "inferior temporal gyrus" - }, - { - "abbreviation": "ins", - "Description": "insula" - }, - { - "abbreviation": "ipa ", - "Description": "intraparietal area " - }, - { - "abbreviation": "la", - "Description": "lateral area" - }, - { - "abbreviation": "locc", - "Description": "lateral occipital cortex" - }, - { - "abbreviation": "cin", - "Description": "limbic lobe/cingulum" - }, - { - "abbreviation": "mme", - "Description": "mean millimetre error" - }, - { - "abbreviation": "mvocc", - "Description": "medioventral occipital cortex" - }, - { - "abbreviation": "mlf", - "Description": "medial longitudinal fasciculus" - }, - { - "abbreviation": "mesls", - "Description": "mesial longitudinal system" - }, - { - "abbreviation": "mfg", - "Description": "middle frontal gyrus" - }, - { - "abbreviation": "midls", - "Description": "middle longitudinal systems" - }, - { - "abbreviation": "mlf", - "Description": "middle longitudinal fasciculus" - }, - { - "abbreviation": "mtg", - "Description": "middle temporal gyrus" - }, - { - "abbreviation": "mni", - "Description": "montreal neurological institute" - }, - { - "abbreviation": "opa", - "Description": "opercular area" - }, - { - "abbreviation": "ofg", - "Description": "orbital frontal gyrus" - }, - { - "abbreviation": "pag", - "Description": "paracentral gyrus" - }, - { - "abbreviation": "pft", - "Description": "particle-filter tractography" - }, - { - "abbreviation": "pocg", - "Description": "postcentral gyrus" - }, - { - "abbreviation": "pts", - "Description": "posterior traverse system" - }, - { - "abbreviation": "pcg", - "Description": "precentral gyrus" - }, - { - "abbreviation": "pcun", - "Description": "precuneus" - }, - { - "abbreviation": "rois", - "Description": "regions of interest" - }, - { - "abbreviation": "rda", - "Description": "rostrodorsal area" - }, - { - "abbreviation": "rva", - "Description": "rostroventral area" - }, - { - "abbreviation": "stoi", - "Description": "sublobe tract of interest" - }, - { - "abbreviation": "sfg", - "Description": "superior frontal gyrus" - }, - { - "abbreviation": "slf ", - "Description": "superior longitudinal fasciculus " - }, - { - "abbreviation": "sls ", - "Description": "superior longitudinal system" - }, - { - "abbreviation": "spl", - "Description": "superior parietal lobule" - }, - { - "abbreviation": "stl", - "Description": "superior temporal lobe" - }, - { - "abbreviation": "sma", - "Description": "supplementary motor area" - }, - { - "abbreviation": "tois", - "Description": "tracts of interest" - }, - { - "abbreviation": "t", - "Description": "tesla" - }, - { - "abbreviation": "uf", - "Description": "uncinate fasciculus" - }, - { - "abbreviation": "vtois", - "Description": "variable tract of interest" - }, - { - "abbreviation": "abs", - "Description": "atlas based segmentation" - }, - { - "abbreviation": "afd", - "Description": "apparent fibre density" - }, - { - "abbreviation": "ad", - "Description": "axial diffusivity" - }, - { - "abbreviation": "bids", - "Description": "brain imaging data structure" - }, - { - "abbreviation": "lcs", - "Description": "cerebrospinal fluid" - }, - { - "abbreviation": "dodf", - "Description": "diffusion orientation distribution function" - }, - { - "abbreviation": "flair", - "Description": "fluid-attenuated inversion recovery" - }, - { - "abbreviation": "frf", - "Description": "fibre response function" - }, - { - "abbreviation": "rd", - "Description": "radial diffusivity" - }, - { - "abbreviation": "rf", - "Description": "radio frequency" - }, - { - "abbreviation": "scil", - "Description": "sherbrooke connectivity imaging laboratory" - }, - { - "abbreviation": "sp", - "Description": "multiple sclerosis" - }, - { - "abbreviation": "cpu", - "Description": "central processing unit" - }, - { - "abbreviation": "frt", - "Description": "funk-radon transform" - }, - { - "abbreviation": "go", - "Description": "gigabyte" - }, - { - "abbreviation": "gpu", - "Description": "graphical processing unit" - }, - { - "abbreviation": "gru", - "Description": "gated recurrent unit" - }, - { - "abbreviation": "irm", - "Description": "magnetic resonance imaging" - }, - { - "abbreviation": "irmd", - "Description": "diffusion-weighted magnetic resonance imaging" - }, - { - "abbreviation": "lstm", - "Description": "long short-term memory network" - }, - { - "abbreviation": "md", - "Description": "mean diffusivity" - }, - { - "abbreviation": "ram", - "Description": "random access memory" - }, - { - "abbreviation": "rnn", - "Description": "recurrent neural network" - }, - { - "abbreviation": "3d-shore", - "Description": "three dimensional simple harmonic oscillator based reconstruction and estimation model" - }, - { - "abbreviation": "ae", - "Description": "angular error metric" - }, - { - "abbreviation": "cdf", - "Description": "cohen-daubechies-feauveau" - }, - { - "abbreviation": "cdsi", - "Description": "classical diffusion spectrum imaging model" - }, - { - "abbreviation": "cs", - "Description": "compressive sensing" - }, - { - "abbreviation": "csa", - "Description": "constant solid angle q-ball model" - }, - { - "abbreviation": "csd", - "Description": "constrained spherical deconvolution model" - }, - { - "abbreviation": "cv", - "Description": "cross validation" - }, - { - "abbreviation": "ddsi", - "Description": "diffusion spectrum imaging deconvolution model" - }, - { - "abbreviation": "dipy", - "Description": "diffusion in python software" - }, - { - "abbreviation": "dnc", - "Description": "difference in the number of fiber compartments metric" - }, - { - "abbreviation": "dsi", - "Description": "diffusion spectrum imaging model" - }, - { - "abbreviation": "dsi515", - "Description": "classical diffusion spectrum imaging acquisition scheme with 515 samples" - }, - { - "abbreviation": "dsistudio", - "Description": "dsi studio software" - }, - { - "abbreviation": "dti", - "Description": "diffusion tensor imaging model" - }, - { - "abbreviation": "dtk", - "Description": "diffusion toolkit software" - }, - { - "abbreviation": "dtwt", - "Description": "dual tree wavelet transform" - }, - { - "abbreviation": "dw", - "Description": "diffusion weighted" - }, - { - "abbreviation": "dwi", - "Description": "diffusion weighted imaging" - }, - { - "abbreviation": "dwt", - "Description": "discrete wavelet transform" - }, - { - "abbreviation": "fft", - "Description": "fast fourier transform" - }, - { - "abbreviation": "fodf", - "Description": "fiber orientation distribution function" - }, - { - "abbreviation": "ib", - "Description": "invalib bundles metric" - }, - { - "abbreviation": "idft", - "Description": "inverse discrete fourier transform" - }, - { - "abbreviation": "isbi", - "Description": "ieee international symposium on biomedical imaging" - }, - { - "abbreviation": "isbi2013", - "Description": "subset of the dataset from the hardi challenge at the conference isbi2013" - }, - { - "abbreviation": "isbi2013-full", - "Description": "dataset from the hardi challenge at the conference isbi2013" - }, - { - "abbreviation": "mgh-ucla hcp", - "Description": "(massachusetts general hospital - university of california, los angeles) human connectome project" - }, - { - "abbreviation": "nmse", - "Description": "normalized mean square error" - }, - { - "abbreviation": "odsi", - "Description": "optimal diffusion spectrum imaging model" - }, - { - "abbreviation": "pccoeff", - "Description": "pearson correlation coefficient" - }, - { - "abbreviation": "pdsi", - "Description": "plain diffusion spectrum imaging model" - }, - { - "abbreviation": "pgse", - "Description": "pulse-gradient spin-echo" - }, - { - "abbreviation": "qbi", - "Description": "q-ball imaging model" - }, - { - "abbreviation": "rip", - "Description": "restricted isometry property" - }, - { - "abbreviation": "sc", - "Description": "sampling scheme" - }, - { - "abbreviation": "sense", - "Description": "sensitivity encoding algorithm" - }, - { - "abbreviation": "swt", - "Description": "stationary wavelet transform" - }, - { - "abbreviation": "tv", - "Description": "total variation" - }, - { - "abbreviation": "vb", - "Description": "valid bundles metric" - }, - { - "abbreviation": "vccr", - "Description": "valid connections to connection ratio" - }, - { - "abbreviation": "wu-minn hcp", - "Description": "(washington university, university of minnesota, and oxford university) human connectome project" - }, - { - "abbreviation": "2d", - "Description": "two dimensions" - }, - { - "abbreviation": "adc", - "Description": "apparent diffusion coefficient" - }, - { - "abbreviation": "aim", - "Description": "medical imaging axis" - }, - { - "abbreviation": "eeg", - "Description": "electroencephalography" - }, - { - "abbreviation": "chus", - "Description": "centre hospitalier universitaire de sherbrooke" - }, - { - "abbreviation": "cims", - "Description": "centre d\u2019imagerie mol\u00e9culaire de sherbrooke" - }, - { - "abbreviation": "crchus", - "Description": "centre de recherche du centre hospitalier universitaire de sherbrooke" - }, - { - "abbreviation": "fmr", - "Description": "mass-stiffness flow" - }, - { - "abbreviation": "fcm", - "Description": "mean-curvature flow" - }, - { - "abbreviation": "hr", - "Description": "high resolution" - }, - { - "abbreviation": "irmf", - "Description": "functional magnetic resonance imaging" - }, - { - "abbreviation": "meg", - "Description": "magnetoencephalography" - }, - { - "abbreviation": "psf", - "Description": "point spread function" - }, - { - "abbreviation": "roi", - "Description": "regions of interest" - }, - { - "abbreviation": "rgb", - "Description": "red, green and blue" - }, - { - "abbreviation": "rmn", - "Description": "nuclear magnetic resonance" - }, - { - "abbreviation": "sdp", - "Description": "semi-definite positive" - }, - { - "abbreviation": "snr", - "Description": "signal to noise ratio" - }, - { - "abbreviation": "tms", - "Description": "transcranial magnetic stimulation" - }, - { - "abbreviation": "wm", - "Description": "white matter" - }, - { - "abbreviation": "ad", - "Description": "alzheimer\u2019s disease" - }, - { - "abbreviation": "adni", - "Description": "alzheimer\u2019s disease neuroimaging initiative" - }, - { - "abbreviation": "bst", - "Description": "bundle-specific tractography" - }, - { - "abbreviation": "cnn", - "Description": "convolutional neural network" - }, - { - "abbreviation": "csc", - "Description": "continuous map criterion" - }, - { - "abbreviation": "dci", - "Description": "diffusion compartment imaging" - }, - { - "abbreviation": "dki", - "Description": "diffusion kurtosis imaging" - }, - { - "abbreviation": "edp", - "Description": "partial differential equation" - }, - { - "abbreviation": "mact", - "Description": "mesh anatomically-constrained tractography" - }, - { - "abbreviation": "mci", - "Description": "mild cognitive impairment" - }, - { - "abbreviation": "nc", - "Description": "normal control group" - }, - { - "abbreviation": "pft", - "Description": "particle filtering tractography" - }, - { - "abbreviation": "ping", - "Description": "pediatric imaging, neurocognition, and genetics" - }, - { - "abbreviation": "pve", - "Description": "partial volume effect" - }, - { - "abbreviation": "se", - "Description": "special euclidean group" - }, - { - "abbreviation": "sc", - "Description": "subcortical structures" - }, - { - "abbreviation": "sf", - "Description": "spherical function" - }, - { - "abbreviation": "tod", - "Description": "tract orientation density" - }, - { - "abbreviation": "act", - "Description": "anatomically constrained tractography" - }, - { - "abbreviation": "ad", - "Description": "alzheimer's disease" - }, - { - "abbreviation": "adni", - "Description": "alzheimer's disease neuroimaging initiative" - }, - { - "abbreviation": "ba", - "Description": "bundle adjacency" - }, - { - "abbreviation": "balsa", - "Description": "brain analysis library of spatial maps and atlases" - }, - { - "abbreviation": "boi", - "Description": "bundle of interest" - }, - { - "abbreviation": "clarity", - "Description": "clear lipid-exchanged acrylamide-hybridized rigid imaging / immunostaining / in situ-hybridization-compatible tissue hydrogel" - }, - { - "abbreviation": "cmc", - "Description": "continuous map criterion" - }, - { - "abbreviation": "cpu", - "Description": "central processing unit" - }, - { - "abbreviation": "dsc", - "Description": "dice score coefficient" - }, - { - "abbreviation": "eadc", - "Description": "european alzheimer\u2019s disease consortium" - }, - { - "abbreviation": "fat", - "Description": "fronto aslant tract" - }, - { - "abbreviation": "frf", - "Description": "fiber response function" - }, - { - "abbreviation": "efod", - "Description": "enhanced fiber orientation distribution" - }, - { - "abbreviation": "harp", - "Description": "harmonized hippocampal protocol" - }, - { - "abbreviation": "hc", - "Description": "healthy control" - }, - { - "abbreviation": "hcp", - "Description": "human connectome project" - }, - { - "abbreviation": "ifof", - "Description": "inferior fronto-occipital fasciculus" - }, - { - "abbreviation": "iqr", - "Description": "interquartile range" - }, - { - "abbreviation": "mri", - "Description": "magnetic resonance imaging" - }, - { - "abbreviation": "irm", - "Description": "magnetic resonance imaging" - }, - { - "abbreviation": "dmri", - "Description": "diffusion magnetic resonance imaging" - }, - { - "abbreviation": "irmd", - "Description": "diffusion magnetic resonance imaging" - }, - { - "abbreviation": "fmri", - "Description": "functional magnetic resonance imaging" - }, - { - "abbreviation": "irmf", - "Description": "functional magnetic resonance imaging" - }, - { - "abbreviation": "mdf", - "Description": "minimal direct-flip distance" - }, - { - "abbreviation": "ml", - "Description": "machine learning" - }, - { - "abbreviation": "oct", - "Description": "optical coherence tomography" - }, - { - "abbreviation": "pft", - "Description": "particle filter tractography" - }, - { - "abbreviation": "pve", - "Description": "partial volume effect" - }, - { - "abbreviation": "pyt", - "Description": "pyramidal tract" - }, - { - "abbreviation": "qb", - "Description": "quickbundles" - }, - { - "abbreviation": "ram", - "Description": "random access memory" - }, - { - "abbreviation": "rb(x)", - "Description": "recobundles(x)" - }, - { - "abbreviation": "mci", - "Description": "mild cognitive impairment" - }, - { - "abbreviation": "nmr", - "Description": "nuclear magnetic resonance" - }, - { - "abbreviation": "set", - "Description": "surface enhanced tractography" - }, - { - "abbreviation": "sfof", - "Description": "superior fronto-occipital fasciculus" - }, - { - "abbreviation": "slr", - "Description": "streamlines linear registration" - }, - { - "abbreviation": "tdi", - "Description": "tract density imaging" - }, - { - "abbreviation": "todi", - "Description": "tract orientation density imaging" - } -] \ No newline at end of file diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index be87e353c..5cc9b5fe3 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -14,9 +14,7 @@ SPACING_LEN = 80 # Path to the JSON file containing script information and keywords -KEYWORDS_FILE_PATH = pathlib.Path(__file__).parent /'Vocabulary'/'Keywords.json' -SYNONYMS_FILE_PATH = pathlib.Path(__file__).parent /'Vocabulary'/'Synonyms.json' - +VOCAB_FILE_PATH = pathlib.Path(__file__).parent.parent.parent/'data' /'Vocabulary'/'Vocabulary.json' @@ -256,7 +254,9 @@ def _get_synonyms(keyword, synonyms_data): list of str List of synonyms for the given keyword. """ - for synonym_set in synonyms_data['synonyms']: + keyword = keyword.lower() + for synonym_set in synonyms_data: + synonym_set = [synonym.lower() for synonym in synonym_set] if keyword in synonym_set: return synonym_set return [] diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 0a85e82dd..b66609506 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -39,7 +39,7 @@ _get_docstring_from_script_path, _stem_keywords, _stem_phrase, _generate_help_files, _get_synonyms, _extract_keywords_and_phrases, _calculate_score, _make_title, prompt_user_for_object ) -from scilpy.utils.scilpy_bot import SPACING_LEN, KEYWORDS_FILE_PATH, SYNONYMS_FILE_PATH +from scilpy.utils.scilpy_bot import SPACING_LEN, VOCAB_FILE_PATH from scilpy.io.utils import add_verbose_arg nltk.download('punkt', quiet=True) @@ -151,10 +151,10 @@ def update_matches_and_scores(filename, score_details): # Search in keywords file - with open(KEYWORDS_FILE_PATH, 'r') as f: - keywords_data = json.load(f) + with open(VOCAB_FILE_PATH, 'r') as f: + vocab_data = json.load(f) - for script in keywords_data['scripts']: + for script in vocab_data['scripts']: script_name = script['name'] if selected_object and not script_name.startswith(f'scil_{selected_object}_'): continue @@ -165,13 +165,9 @@ def update_matches_and_scores(filename, score_details): # Search in synonyms file if not args.no_synonyms is not specified - if not args.no_synonyms: - with open(SYNONYMS_FILE_PATH, 'r') as f: - synonyms_data = json.load(f) - + if not args.no_synonyms: for keyword in keywords + phrases: - synonyms = _get_synonyms(keyword, synonyms_data) - + synonyms = _get_synonyms(keyword, vocab_data['synonyms']) for script in sorted(script_dir.glob(search_pattern.format(selected_object))): filename = script.stem if filename == '__init__' or filename == 'scil_search_keywords': From 663c28adb91ff51993d162811b0d51274b6d528d Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Mon, 5 Aug 2024 09:50:27 -0400 Subject: [PATCH 40/69] change the _calculate_score function to use regex to match the whole words only (words between 2 whitespaces) --- scilpy/utils/scilpy_bot.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index 5cc9b5fe3..86823acaf 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -4,6 +4,7 @@ import subprocess from nltk.stem import PorterStemmer from colorama import init, Fore, Style +import re stemmer = PorterStemmer() @@ -311,7 +312,9 @@ def _calculate_score(keywords, phrases, text, filename): for keyword in keywords: keyword = keyword.lower() - keyword_score = stemmed_text.count(keyword) + stemmed_filename.count(keyword) + # Use regular expressions to match whole words only + keyword_pattern = re.compile(r'\b' + re.escape(keyword) + r'\b') + keyword_score = len(keyword_pattern.findall(stemmed_text)) + len(keyword_pattern.findall(stemmed_filename)) score_details[keyword] = keyword_score score_details['total_score'] += keyword_score From ade5f42f27a9150e3e3bac9775393e629a526d38 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Mon, 5 Aug 2024 09:58:03 -0400 Subject: [PATCH 41/69] modify docstring --- scripts/scil_search_keywords.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index b66609506..292d3a263 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -9,16 +9,14 @@ - By default, the search includes synonyms for the keywords. - Use --no_synonyms to exclude synonyms from the search. - Use --search_category to limit the search to a specific category of scripts. -- Use --verbose to display the full docstring. - Words enclosed in quotes will be searched as phrases, ensuring the words appear next to each other in the text. Examples: scil_search_keywords.py tractogram filtering - scil_search_keywords.py --search_parser tractogram filtering -v scil_search_keywords.py "Spherical Harmonics" convert scil_search_keywords.py --no_synonyms tractogram filtering - scil_search_keywords.py --search_category --verbose tractogram filtering + scil_search_keywords.py --search_category tractogram filtering """ import argparse From 6588d1a78037e2309175484b837f7e727d8612de Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Mon, 5 Aug 2024 10:19:00 -0400 Subject: [PATCH 42/69] change the _generate_help_files function to display progress of files generation --- scilpy/utils/scilpy_bot.py | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index 86823acaf..ab0e387b0 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -170,15 +170,15 @@ def _generate_help_files(): scripts_dir= pathlib.Path(__file__).parent.parent.parent /'scripts' + scripts = [script for script in scripts_dir.glob('*.py') if script.name not in ['__init__.py', 'scil_search_keywords.py']] + total_scripts = len(scripts) # Hidden directory to store help files hidden_dir = scripts_dir / '.hidden' hidden_dir.mkdir(exist_ok=True) # Iterate over all scripts and generate help files - for script in scripts_dir.glob('*.py'): - if script.name == '__init__.py' or script.name == 'scil_search_keywords.py': - continue + for idx, script in enumerate(scripts, start=1): help_file = hidden_dir / f'{script.name}.help' # Check if help file already exists if help_file.exists(): @@ -192,23 +192,23 @@ def _generate_help_files(): with open(help_file, 'w') as f: f.write(result.stdout) - print(f'Help file saved to {help_file}') + print(f'Help file saved to {help_file}({idx}/{total_scripts})') - # Check if any help files are missing and regenerate them - for script in scripts_dir.glob('*.py'): - if script.name == '__init__.py' or script.name == 'scil_search_keywords.py': - continue - help_file = hidden_dir / f'{script.name}.help' - if not help_file.exists(): - # Run the script with --h and capture the output - result = subprocess.run(['python', script, '--h'], capture_output=True, text=True) - - # Save the output to the hidden file - with open(help_file, 'w') as f: - f.write(result.stdout) - - print(f'Regenerated help output for {script.name}') + # Check if any help files are missing and regenerate them + for script in scripts_dir.glob('*.py'): + if script.name == '__init__.py' or script.name == 'scil_search_keywords.py': + continue + help_file = hidden_dir / f'{script.name}.help' + if not help_file.exists(): + # Run the script with --h and capture the output + result = subprocess.run(['python', script, '--h'], capture_output=True, text=True) + + # Save the output to the hidden file + with open(help_file, 'w') as f: + f.write(result.stdout) + + print(f'Regenerated help output for {script.name}') From 2d0f12fdd8029345398882fcf7551acf621a46c6 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Mon, 5 Aug 2024 10:19:37 -0400 Subject: [PATCH 43/69] delete useless files os and subprocess --- os | 0 subprocess | 0 2 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 os delete mode 100644 subprocess diff --git a/os b/os deleted file mode 100644 index e69de29bb..000000000 diff --git a/subprocess b/subprocess deleted file mode 100644 index e69de29bb..000000000 From 2dfa03e092f366411a3e9fc744ec2d7e4cfed5b5 Mon Sep 17 00:00:00 2001 From: bouj1113 Date: Mon, 5 Aug 2024 10:29:47 -0400 Subject: [PATCH 44/69] run autopep8 on the changed and new scripts --- scilpy/utils/scilpy_bot.py | 43 ++++++++++++-------- scripts/scil_search_keywords.py | 72 ++++++++++++++++++--------------- 2 files changed, 67 insertions(+), 48 deletions(-) diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index ab0e387b0..3270b6c9d 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -15,17 +15,18 @@ SPACING_LEN = 80 # Path to the JSON file containing script information and keywords -VOCAB_FILE_PATH = pathlib.Path(__file__).parent.parent.parent/'data' /'Vocabulary'/'Vocabulary.json' - +VOCAB_FILE_PATH = pathlib.Path( + __file__).parent.parent.parent/'data' / 'Vocabulary'/'Vocabulary.json' OBJECTS = [ 'aodf', 'bids', 'bingham', 'btensor', 'bundle', 'connectivity', 'denoising', - 'dki', 'dti','dwi', 'fodf', 'freewater', 'frf', 'gradients', 'header', 'json', + 'dki', 'dti', 'dwi', 'fodf', 'freewater', 'frf', 'gradients', 'header', 'json', 'labels', 'lesions', 'mti', 'NODDI', 'sh', 'surface', 'tracking', 'tractogram', 'viz', 'volume', 'qball', 'rgb', 'lesions' ] + def prompt_user_for_object(): """ Prompts the user to select an object from the list of available objects. @@ -35,7 +36,8 @@ def prompt_user_for_object(): print(f"{idx + 1}. {obj}") while True: try: - choice = int(input("Choose the object you want to work on (enter the number): ")) + choice = int( + input("Choose the object you want to work on (enter the number): ")) if 1 <= choice <= len(OBJECTS): return OBJECTS[choice - 1] else: @@ -103,6 +105,7 @@ def _split_first_sentence(text): remaining = text[split_idx:] if split_idx else "" return sentence, remaining + def _stem_keywords(keywords): """ Stem a list of keywords using PorterStemmer. @@ -119,6 +122,7 @@ def _stem_keywords(keywords): """ return [stemmer.stem(keyword) for keyword in keywords] + def _stem_text(text): """ Stem all words in a text using PorterStemmer. @@ -136,6 +140,7 @@ def _stem_text(text): words = nltk.word_tokenize(text) return ' '.join([stemmer.stem(word) for word in words]) + def _stem_phrase(phrase): """ Stem all words in a phrase using PorterStemmer. @@ -153,6 +158,7 @@ def _stem_phrase(phrase): words = phrase.split() return ' '.join([stemmer.stem(word) for word in words]) + def _generate_help_files(): """ This function iterates over all Python scripts in the 'scripts' directory, @@ -168,9 +174,10 @@ def _generate_help_files(): The help output is saved in a hidden directory to avoid clutter in the main scripts directory. """ - scripts_dir= pathlib.Path(__file__).parent.parent.parent /'scripts' + scripts_dir = pathlib.Path(__file__).parent.parent.parent / 'scripts' - scripts = [script for script in scripts_dir.glob('*.py') if script.name not in ['__init__.py', 'scil_search_keywords.py']] + scripts = [script for script in scripts_dir.glob( + '*.py') if script.name not in ['__init__.py', 'scil_search_keywords.py']] total_scripts = len(scripts) # Hidden directory to store help files @@ -186,7 +193,8 @@ def _generate_help_files(): continue # Run the script with --h and capture the output - result = subprocess.run(['python', script, '--h'], capture_output=True, text=True) + result = subprocess.run( + ['python', script, '--h'], capture_output=True, text=True) # Save the output to the hidden file with open(help_file, 'w') as f: @@ -194,7 +202,6 @@ def _generate_help_files(): print(f'Help file saved to {help_file}({idx}/{total_scripts})') - # Check if any help files are missing and regenerate them for script in scripts_dir.glob('*.py'): if script.name == '__init__.py' or script.name == 'scil_search_keywords.py': @@ -202,7 +209,8 @@ def _generate_help_files(): help_file = hidden_dir / f'{script.name}.help' if not help_file.exists(): # Run the script with --h and capture the output - result = subprocess.run(['python', script, '--h'], capture_output=True, text=True) + result = subprocess.run( + ['python', script, '--h'], capture_output=True, text=True) # Save the output to the hidden file with open(help_file, 'w') as f: @@ -211,8 +219,6 @@ def _generate_help_files(): print(f'Regenerated help output for {script.name}') - - def _highlight_keywords(text, stemmed_keywords): """ Highlight the stemmed keywords in the given text using colorama. @@ -234,11 +240,13 @@ def _highlight_keywords(text, stemmed_keywords): for word in words: stemmed_word = stemmer.stem(word) if stemmed_word in stemmed_keywords: - highlighted_text.append(f'{Fore.RED}{Style.BRIGHT}{word}{Style.RESET_ALL}') + highlighted_text.append( + f'{Fore.RED}{Style.BRIGHT}{word}{Style.RESET_ALL}') else: highlighted_text.append(word) return ' '.join(highlighted_text) + def _get_synonyms(keyword, synonyms_data): """ Get synonyms for a given keyword from the synonyms data. @@ -262,6 +270,7 @@ def _get_synonyms(keyword, synonyms_data): return synonym_set return [] + def _extract_keywords_and_phrases(keywords): """ Extract keywords and phrases from the provided list. @@ -280,12 +289,14 @@ def _extract_keywords_and_phrases(keywords): phrases_list = [] for keyword in keywords: - if ' ' in keyword: #if keyword contain blank space (contains more that 1 word) + # if keyword contain blank space (contains more that 1 word) + if ' ' in keyword: phrases_list.append(keyword) else: keywords_list.append(keyword) return keywords_list, phrases_list + def _calculate_score(keywords, phrases, text, filename): """ Calculate a score for how well the text and filename match the keywords. @@ -307,14 +318,15 @@ def _calculate_score(keywords, phrases, text, filename): Score details based on the frequency of keywords in the text and filename. """ stemmed_text = _stem_text(text.lower()) - stemmed_filename = _stem_text(filename.lower()) + stemmed_filename = _stem_text(filename.lower()) score_details = {'total_score': 0} for keyword in keywords: keyword = keyword.lower() # Use regular expressions to match whole words only keyword_pattern = re.compile(r'\b' + re.escape(keyword) + r'\b') - keyword_score = len(keyword_pattern.findall(stemmed_text)) + len(keyword_pattern.findall(stemmed_filename)) + keyword_score = len(keyword_pattern.findall( + stemmed_text)) + len(keyword_pattern.findall(stemmed_filename)) score_details[keyword] = keyword_score score_details['total_score'] += keyword_score @@ -324,4 +336,3 @@ def _calculate_score(keywords, phrases, text, filename): score_details[phrase] = phrase_score score_details['total_score'] += phrase_score return score_details - diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 292d3a263..f8ddc1669 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -46,16 +46,15 @@ def _build_arg_parser(): p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) - + p.add_argument('keywords', nargs='+', help='Search the provided list of keywords.') - + p.add_argument('--search_category', action='store_true', help='Search within a specific category of scripts.') - - p.add_argument('--no_synonyms', action='store_true', - help='Search without using synonyms.') + p.add_argument('--no_synonyms', action='store_true', + help='Search without using synonyms.') add_verbose_arg(p) @@ -74,14 +73,16 @@ def main(): if args.search_category: selected_object = prompt_user_for_object() - #keywords are single words and phrases are keywords that contain more than one word + # keywords are single words and phrases are keywords that contain more than one word keywords, phrases = _extract_keywords_and_phrases(args.keywords) stemmed_keywords = _stem_keywords(keywords) stemmed_phrases = [_stem_phrase(phrase) for phrase in phrases] # Create a mapping of stemmed to original keywords(will be needed to display the occurence of the keywords) - keyword_mapping = {stem: orig for orig, stem in zip(keywords, stemmed_keywords)} - phrase_mapping = {stem: orig for orig, stem in zip(phrases, stemmed_phrases)} + keyword_mapping = {stem: orig for orig, + stem in zip(keywords, stemmed_keywords)} + phrase_mapping = {stem: orig for orig, + stem in zip(phrases, stemmed_phrases)} script_dir = pathlib.Path(__file__).parent hidden_dir = script_dir / '.hidden' @@ -94,7 +95,6 @@ def main(): 'Generating help files....') _generate_help_files() - matches = [] scores = {} @@ -112,7 +112,7 @@ def update_matches_and_scores(filename, score_details): score_details : dict A dictionary containing the scores for the keywords and phrases found in the script. This dictionary should have a 'total_score' key indicating the cumulative score. - + Returns ------- None @@ -125,29 +125,30 @@ def update_matches_and_scores(filename, score_details): else: for key, value in score_details.items(): if key != 'total_score': - scores[filename][key] = scores[filename].get(key, 0) + value + scores[filename][key] = scores[filename].get( + key, 0) + value scores[filename]['total_score'] += score_details['total_score'] for script in sorted(script_dir.glob(search_pattern.format(selected_object))): filename = script.stem - if filename == '__init__' or filename =='scil_search_keywords': + if filename == '__init__' or filename == 'scil_search_keywords': continue - + # Search through the docstring search_text = _get_docstring_from_script_path(str(script)) - score_details = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, filename=filename) + score_details = _calculate_score( + stemmed_keywords, stemmed_phrases, search_text, filename=filename) update_matches_and_scores(filename, score_details) - # Search in help files help_file = hidden_dir / f"{filename}.py.help" if help_file.exists(): with open(help_file, 'r') as f: search_text = f.read() - score_details = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, filename=filename) + score_details = _calculate_score( + stemmed_keywords, stemmed_phrases, search_text, filename=filename) update_matches_and_scores(filename, score_details) - # Search in keywords file with open(VOCAB_FILE_PATH, 'r') as f: vocab_data = json.load(f) @@ -158,12 +159,12 @@ def update_matches_and_scores(filename, score_details): continue script_keywords = script['keywords'] search_text = ' '.join(script_keywords) - score_details = _calculate_score(stemmed_keywords, stemmed_phrases, search_text, script_name) + score_details = _calculate_score( + stemmed_keywords, stemmed_phrases, search_text, script_name) update_matches_and_scores(script_name, score_details) - # Search in synonyms file if not args.no_synonyms is not specified - if not args.no_synonyms: + if not args.no_synonyms: for keyword in keywords + phrases: synonyms = _get_synonyms(keyword, vocab_data['synonyms']) for script in sorted(script_dir.glob(search_pattern.format(selected_object))): @@ -171,14 +172,17 @@ def update_matches_and_scores(filename, score_details): if filename == '__init__' or filename == 'scil_search_keywords': continue search_text = _get_docstring_from_script_path(str(script)) - score_details = scores.get(filename, {'total_score': 0}) # Initialize or get existing score_details for the script - + # Initialize or get existing score_details for the script + score_details = scores.get(filename, {'total_score': 0}) + for synonym in synonyms: if synonym in search_text: # Update the score_details with the count of each synonym found - score_details[keyword+' synonyms'] = score_details.get(keyword +' synonyms', 0) + search_text.count(synonym) - score_details['total_score'] += search_text.count(synonym) - + score_details[keyword+' synonyms'] = score_details.get( + keyword + ' synonyms', 0) + search_text.count(synonym) + score_details['total_score'] += search_text.count( + synonym) + update_matches_and_scores(filename, score_details) if not matches: @@ -186,7 +190,8 @@ def update_matches_and_scores(filename, score_details): # Sort matches by score and display them else: - sorted_matches = sorted(matches, key=lambda x: scores[x]['total_score'], reverse=False) + sorted_matches = sorted( + matches, key=lambda x: scores[x]['total_score'], reverse=False) logging.info(_make_title(' Results Ordered by Score ')) for match in sorted_matches: @@ -195,17 +200,20 @@ def update_matches_and_scores(filename, score_details): for word, score in scores[match].items(): if word != 'total_score': if word.endswith(' synonyms'): - logging.info(f"{Fore.GREEN}Occurrence of '{word}': {score}{Style.RESET_ALL}") + logging.info( + f"{Fore.GREEN}Occurrence of '{word}': {score}{Style.RESET_ALL}") else: - original_word = keyword_mapping.get(word, phrase_mapping.get(word, word)) - logging.info(f"{Fore.GREEN}Occurrence of '{original_word}': {score}{Style.RESET_ALL}") + original_word = keyword_mapping.get( + word, phrase_mapping.get(word, word)) + logging.info( + f"{Fore.GREEN}Occurrence of '{original_word}': {score}{Style.RESET_ALL}") logging.info(f"Total Score: {scores[match]['total_score']}") logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info("\n") - logging.info(_make_title(' Results Ordered by Score (Best results at the bottom) ')) - + logging.info(_make_title( + ' Results Ordered by Score (Best results at the bottom) ')) if __name__ == '__main__': - main() \ No newline at end of file + main() From 62ac325e75538ecad52f395f53ac1663bd3ee562 Mon Sep 17 00:00:00 2001 From: jinan02 Date: Mon, 5 Aug 2024 20:22:18 -0400 Subject: [PATCH 45/69] add a test for the search_category and no_synonyms args --- scripts/tests/test_search_keywords.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/scripts/tests/test_search_keywords.py b/scripts/tests/test_search_keywords.py index 5eda2b4fc..ba94b9b5b 100644 --- a/scripts/tests/test_search_keywords.py +++ b/scripts/tests/test_search_keywords.py @@ -6,17 +6,15 @@ def test_help_option(script_runner): ret = script_runner.run('scil_search_keywords.py', '--help') assert ret.success +def test_search_category(script_runner): + ret = script_runner.run('scil_search_keywords.py', '--search_category', 'sh') + assert 'Available objects:' in ret.stdout -def test_no_verbose(script_runner): - ret = script_runner.run('scil_search_keywords.py', 'mti') +def test_no_synonyms(script_runner): + ret = script_runner.run('scil_search_keywords.py', 'sh', '--no_synonyms') assert ret.success - -def test_verbose_option(script_runner): - ret = script_runner.run('scil_search_keywords.py', 'mti', '-v') - assert ret.success - - -def test_not_find(script_runner): +def test_not_found(script_runner): ret = script_runner.run('scil_search_keywords.py', 'toto') assert ret.success + assert 'No results found!' in ret.stdout or 'No results found!' in ret.stderr From 8ef24dc8e5df80920f46b372e397286ea11d02be Mon Sep 17 00:00:00 2001 From: jinan02 Date: Mon, 5 Aug 2024 20:48:45 -0400 Subject: [PATCH 46/69] run flake8 on the changed scripts --- scilpy/utils/scilpy_bot.py | 36 ++++++++++++-------- scripts/scil_search_keywords.py | 49 +++++++++++++++++---------- scripts/tests/test_search_keywords.py | 3 ++ 3 files changed, 55 insertions(+), 33 deletions(-) diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index 3270b6c9d..3575874dc 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -3,7 +3,7 @@ import pathlib import subprocess from nltk.stem import PorterStemmer -from colorama import init, Fore, Style +from colorama import Fore, Style import re stemmer = PorterStemmer() @@ -20,10 +20,12 @@ OBJECTS = [ - 'aodf', 'bids', 'bingham', 'btensor', 'bundle', 'connectivity', 'denoising', - 'dki', 'dti', 'dwi', 'fodf', 'freewater', 'frf', 'gradients', 'header', 'json', - 'labels', 'lesions', 'mti', 'NODDI', 'sh', 'surface', 'tracking', - 'tractogram', 'viz', 'volume', 'qball', 'rgb', 'lesions' + 'aodf', 'bids', 'bingham', 'btensor', 'bundle', + 'connectivity', 'denoising', 'dki', 'dti', 'dwi', + 'fodf', 'freewater', 'frf', 'gradients', 'header', + 'json', 'labels', 'lesions', 'mti', 'NODDI', 'sh', + 'surface', 'tracking', 'tractogram', 'viz', 'volume', + 'qball', 'rgb', 'lesions' ] @@ -37,7 +39,8 @@ def prompt_user_for_object(): while True: try: choice = int( - input("Choose the object you want to work on (enter the number): ")) + input("Choose the object you want to work on " + "(enter the number): ")) if 1 <= choice <= len(OBJECTS): return OBJECTS[choice - 1] else: @@ -161,23 +164,25 @@ def _stem_phrase(phrase): def _generate_help_files(): """ - This function iterates over all Python scripts in the 'scripts' directory, - runs each script with the '--h' flag to generate help text, - and saves the output to corresponding hidden files in the '.hidden' directory. + This function iterates over all Python scripts in the 'scripts' directory, + runs each script with the '--h' flag to generate help text, + and saves the output in the '.hidden' directory. - By doing this, we can precompute the help outputs for each script, + By doing this, we can precompute the help outputs for each script, which can be useful for faster searches. - If a help file already exists for a script, the script is skipped, + If a help file already exists for a script, the script is skipped, and the existing help file is left unchanged. - The help output is saved in a hidden directory to avoid clutter in the main scripts directory. + The help output is saved in a hidden directory to avoid clutter in + the main scripts directory. """ scripts_dir = pathlib.Path(__file__).parent.parent.parent / 'scripts' - scripts = [script for script in scripts_dir.glob( - '*.py') if script.name not in ['__init__.py', 'scil_search_keywords.py']] + scripts = [script for script in scripts_dir.glob('*.py') + if script.name not in ['__init__.py', + 'scil_search_keywords.py']] total_scripts = len(scripts) # Hidden directory to store help files @@ -315,7 +320,8 @@ def _calculate_score(keywords, phrases, text, filename): Returns ------- dict - Score details based on the frequency of keywords in the text and filename. + Score details based on the frequency of keywords + in the text and filename. """ stemmed_text = _stem_text(text.lower()) stemmed_filename = _stem_text(filename.lower()) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index f8ddc1669..34e03c1fe 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -2,21 +2,25 @@ # -*- coding: utf-8 -*- """ -Search through all SCILPY scripts and their docstrings to find matches for the provided keywords. -The search will be performed across script names, docstrings, help files, keywords, and optionally synonyms. -The output will list the matching filenames along with the occurrences of each keyword, and their total score. +Search through all SCILPY scripts and their docstrings to find matches for the +provided keywords. +The search will be performed across script names, docstrings, help files, +keywords, and optionally synonyms. +The output will list the matching filenames along with the occurrences of each +keyword, and their total score. - By default, the search includes synonyms for the keywords. - Use --no_synonyms to exclude synonyms from the search. - Use --search_category to limit the search to a specific category of scripts. -- Words enclosed in quotes will be searched as phrases, ensuring the words appear next to each other in the text. +- Words enclosed in quotes will be searched as phrases, ensuring the words +appear next to each other in the text. Examples: - scil_search_keywords.py tractogram filtering - scil_search_keywords.py "Spherical Harmonics" convert - scil_search_keywords.py --no_synonyms tractogram filtering - scil_search_keywords.py --search_category tractogram filtering +- scil_search_keywords.py tractogram filtering +- scil_search_keywords.py "Spherical Harmonics" convert +- scil_search_keywords.py --no_synonyms tractogram filtering +- scil_search_keywords.py --search_category tractogram filtering """ import argparse @@ -27,15 +31,18 @@ import nltk nltk.download('punkt', quiet=True) except ImportError: - print("You must install the 'nltk' package to use this script. Please run 'pip install nltk'.") + print("You must install the 'nltk' package to use this script." + "Please run 'pip install nltk'.") exit(1) from colorama import Fore, Style import json from scilpy.utils.scilpy_bot import ( - _get_docstring_from_script_path, _stem_keywords, _stem_phrase, _generate_help_files, - _get_synonyms, _extract_keywords_and_phrases, _calculate_score, _make_title, prompt_user_for_object + _get_docstring_from_script_path, _stem_keywords, + _stem_phrase, _generate_help_files, + _get_synonyms, _extract_keywords_and_phrases, + _calculate_score, _make_title, prompt_user_for_object ) from scilpy.utils.scilpy_bot import SPACING_LEN, VOCAB_FILE_PATH from scilpy.io.utils import add_verbose_arg @@ -73,12 +80,13 @@ def main(): if args.search_category: selected_object = prompt_user_for_object() - # keywords are single words and phrases are keywords that contain more than one word + # keywords are single words. Phrases are composed keywords keywords, phrases = _extract_keywords_and_phrases(args.keywords) stemmed_keywords = _stem_keywords(keywords) stemmed_phrases = [_stem_phrase(phrase) for phrase in phrases] - # Create a mapping of stemmed to original keywords(will be needed to display the occurence of the keywords) + # Create a mapping of stemmed to original keywords + # This will be needed to display the occurence of the keywords keyword_mapping = {stem: orig for orig, stem in zip(keywords, stemmed_keywords)} phrase_mapping = {stem: orig for orig, @@ -90,7 +98,8 @@ def main(): if not hidden_dir.exists(): hidden_dir.mkdir() logging.info('This is your first time running this script.\n' - 'Generating help files may take a few minutes, please be patient.\n' + 'Generating help files may take a few minutes,' + 'please be patient.\n' 'Subsequent searches will be much faster.\n' 'Generating help files....') _generate_help_files() @@ -103,15 +112,18 @@ def main(): def update_matches_and_scores(filename, score_details): """ - Update the matches and scores for the given filename based on the score details. + Update the matches and scores for the given filename based + on the score details. Parameters ---------- filename : str The name of the script file being analyzed. score_details : dict - A dictionary containing the scores for the keywords and phrases found in the script. - This dictionary should have a 'total_score' key indicating the cumulative score. + A dictionary containing the scores for the keywords + and phrases found in the script. + This dictionary should have a 'total_score' key + indicating the cumulative score. Returns ------- @@ -146,7 +158,8 @@ def update_matches_and_scores(filename, score_details): with open(help_file, 'r') as f: search_text = f.read() score_details = _calculate_score( - stemmed_keywords, stemmed_phrases, search_text, filename=filename) + stemmed_keywords, stemmed_phrases, + search_text, filename=filename) update_matches_and_scores(filename, score_details) # Search in keywords file diff --git a/scripts/tests/test_search_keywords.py b/scripts/tests/test_search_keywords.py index ba94b9b5b..44dd6c907 100644 --- a/scripts/tests/test_search_keywords.py +++ b/scripts/tests/test_search_keywords.py @@ -6,14 +6,17 @@ def test_help_option(script_runner): ret = script_runner.run('scil_search_keywords.py', '--help') assert ret.success + def test_search_category(script_runner): ret = script_runner.run('scil_search_keywords.py', '--search_category', 'sh') assert 'Available objects:' in ret.stdout + def test_no_synonyms(script_runner): ret = script_runner.run('scil_search_keywords.py', 'sh', '--no_synonyms') assert ret.success + def test_not_found(script_runner): ret = script_runner.run('scil_search_keywords.py', 'toto') assert ret.success From c97a583739db7a8285512eba604ee8964964088c Mon Sep 17 00:00:00 2001 From: jinan02 Date: Wed, 7 Aug 2024 15:50:07 -0400 Subject: [PATCH 47/69] add unit tests or scilpy_bot.py --- scilpy/utils/tests/test_scilpy_bot.py | 67 +++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 scilpy/utils/tests/test_scilpy_bot.py diff --git a/scilpy/utils/tests/test_scilpy_bot.py b/scilpy/utils/tests/test_scilpy_bot.py new file mode 100644 index 000000000..147b792dc --- /dev/null +++ b/scilpy/utils/tests/test_scilpy_bot.py @@ -0,0 +1,67 @@ + +from scilpy.utils.scilpy_bot import ( + _make_title, _get_docstring_from_script_path, + _split_first_sentence, _stem_keywords, _stem_text, _stem_phrase, + _highlight_keywords, _get_synonyms, + _extract_keywords_and_phrases, _calculate_score, VOCAB_FILE_PATH +) + +def test_make_title(): + result = _make_title("Test Title") + assert "Test Title" in result + +def test_get_docstring_from_script_path(tmp_path): + script_content = '"""This is a test docstring."""' + script_path = tmp_path / "test_script.py" + script_path.write_text(script_content) + result = _get_docstring_from_script_path(str(script_path)) + assert result == "This is a test docstring." + +def test_split_first_sentence(): + text = "This is the first sentence. This is the second sentence." + first, remaining = _split_first_sentence(text) + assert first == "This is the first sentence." + assert remaining == " This is the second sentence." + +def test_stem_keywords(): + keywords = ["running", "jumps"] + result = _stem_keywords(keywords) + assert result == ["run", "jump"] + +def test_stem_text(): + text = "Running and jumping." + result = _stem_text(text) + assert result == "run and jump ." + +def test_stem_phrase(): + phrase = "Running quickly" + result = _stem_phrase(phrase) + assert result == "run quickli" + + +def test_highlight_keywords(): + text = "Running and jumping." + stemmed_keywords = ["run"] + result = _highlight_keywords(text, stemmed_keywords) + assert "Running" in result + +def test_get_synonyms(): + synonyms_data = [["run", "sprint"], ["jump", "leap"]] + result = _get_synonyms("run", synonyms_data) + assert result == ["run", "sprint"] + +def test_extract_keywords_and_phrases(): + keywords = ["running", "jumps", "quick run"] + result_keywords, result_phrases = _extract_keywords_and_phrases(keywords) + assert result_keywords == ["running", "jumps"] + assert result_phrases == ["quick run"] + +def test_calculate_score(): + keywords = ["run"] + phrases = ["quick run"] + text = "Running quickly is fun. A quick run is good." + filename = "run_script.py" + result = _calculate_score(keywords, phrases, text, filename) + assert result["total_score"] == 3 + assert result["run"] == 2 + assert result["quick run"] == 1 \ No newline at end of file From a39552d67fd339d0e32f4115b801fcd925cea320 Mon Sep 17 00:00:00 2001 From: jinan02 Date: Thu, 8 Aug 2024 13:55:56 -0400 Subject: [PATCH 48/69] pep8 on test_scilpy_bot.py --- scilpy/utils/tests/test_scilpy_bot.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/scilpy/utils/tests/test_scilpy_bot.py b/scilpy/utils/tests/test_scilpy_bot.py index 147b792dc..5574a1ce4 100644 --- a/scilpy/utils/tests/test_scilpy_bot.py +++ b/scilpy/utils/tests/test_scilpy_bot.py @@ -3,13 +3,15 @@ _make_title, _get_docstring_from_script_path, _split_first_sentence, _stem_keywords, _stem_text, _stem_phrase, _highlight_keywords, _get_synonyms, - _extract_keywords_and_phrases, _calculate_score, VOCAB_FILE_PATH + _extract_keywords_and_phrases, _calculate_score ) + def test_make_title(): result = _make_title("Test Title") assert "Test Title" in result + def test_get_docstring_from_script_path(tmp_path): script_content = '"""This is a test docstring."""' script_path = tmp_path / "test_script.py" @@ -17,22 +19,26 @@ def test_get_docstring_from_script_path(tmp_path): result = _get_docstring_from_script_path(str(script_path)) assert result == "This is a test docstring." + def test_split_first_sentence(): text = "This is the first sentence. This is the second sentence." first, remaining = _split_first_sentence(text) assert first == "This is the first sentence." assert remaining == " This is the second sentence." + def test_stem_keywords(): keywords = ["running", "jumps"] result = _stem_keywords(keywords) assert result == ["run", "jump"] + def test_stem_text(): text = "Running and jumping." result = _stem_text(text) assert result == "run and jump ." + def test_stem_phrase(): phrase = "Running quickly" result = _stem_phrase(phrase) @@ -45,17 +51,20 @@ def test_highlight_keywords(): result = _highlight_keywords(text, stemmed_keywords) assert "Running" in result + def test_get_synonyms(): synonyms_data = [["run", "sprint"], ["jump", "leap"]] result = _get_synonyms("run", synonyms_data) assert result == ["run", "sprint"] + def test_extract_keywords_and_phrases(): keywords = ["running", "jumps", "quick run"] result_keywords, result_phrases = _extract_keywords_and_phrases(keywords) assert result_keywords == ["running", "jumps"] assert result_phrases == ["quick run"] + def test_calculate_score(): keywords = ["run"] phrases = ["quick run"] @@ -64,4 +73,4 @@ def test_calculate_score(): result = _calculate_score(keywords, phrases, text, filename) assert result["total_score"] == 3 assert result["run"] == 2 - assert result["quick run"] == 1 \ No newline at end of file + assert result["quick run"] == 1 From 9f7705bfdcc1b4af1552e3707163411c948320ce Mon Sep 17 00:00:00 2001 From: jinan02 Date: Thu, 15 Aug 2024 11:22:27 -0400 Subject: [PATCH 49/69] add progress bar to show progress of help files generation --- scilpy/utils/scilpy_bot.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index 3575874dc..091c80ff7 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -5,6 +5,7 @@ from nltk.stem import PorterStemmer from colorama import Fore, Style import re +from tqdm import tqdm stemmer = PorterStemmer() @@ -190,7 +191,7 @@ def _generate_help_files(): hidden_dir.mkdir(exist_ok=True) # Iterate over all scripts and generate help files - for idx, script in enumerate(scripts, start=1): + for idx, script in enumerate(tqdm(scripts,desc="Generating help files", total=total_scripts), start=1): help_file = hidden_dir / f'{script.name}.help' # Check if help file already exists if help_file.exists(): @@ -208,7 +209,7 @@ def _generate_help_files(): print(f'Help file saved to {help_file}({idx}/{total_scripts})') # Check if any help files are missing and regenerate them - for script in scripts_dir.glob('*.py'): + for script in tqdm(scripts_dir.glob('*.py'), desc="Checking missing help files", total=total_scripts): if script.name == '__init__.py' or script.name == 'scil_search_keywords.py': continue help_file = hidden_dir / f'{script.name}.help' From 7ce9b6cea4e6237164d99a9b96f9d3bccfda0db9 Mon Sep 17 00:00:00 2001 From: jinan02 Date: Thu, 15 Aug 2024 13:20:08 -0400 Subject: [PATCH 50/69] run autopepe on the scilpy_bot.py script --- scilpy/utils/scilpy_bot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index 091c80ff7..b5552be27 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -191,7 +191,7 @@ def _generate_help_files(): hidden_dir.mkdir(exist_ok=True) # Iterate over all scripts and generate help files - for idx, script in enumerate(tqdm(scripts,desc="Generating help files", total=total_scripts), start=1): + for idx, script in enumerate(tqdm(scripts, desc="Generating help files", total=total_scripts), start=1): help_file = hidden_dir / f'{script.name}.help' # Check if help file already exists if help_file.exists(): From c055d6981eff63be09804d16bd30f7e404afdc46 Mon Sep 17 00:00:00 2001 From: jinan02 Date: Sat, 17 Aug 2024 14:32:12 -0400 Subject: [PATCH 51/69] use logging library to display the messages --- scilpy/utils/scilpy_bot.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index b5552be27..dbf9c84f9 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -6,6 +6,7 @@ from colorama import Fore, Style import re from tqdm import tqdm +import logging stemmer = PorterStemmer() @@ -195,7 +196,7 @@ def _generate_help_files(): help_file = hidden_dir / f'{script.name}.help' # Check if help file already exists if help_file.exists(): - print(f'Help file for {script.name} already exists. Skipping.') + logging.debug(f'Help file for {script.name} already exists. Skipping.') continue # Run the script with --h and capture the output @@ -206,7 +207,7 @@ def _generate_help_files(): with open(help_file, 'w') as f: f.write(result.stdout) - print(f'Help file saved to {help_file}({idx}/{total_scripts})') + logging.debug(f'Help file saved to {help_file}({idx}/{total_scripts})') # Check if any help files are missing and regenerate them for script in tqdm(scripts_dir.glob('*.py'), desc="Checking missing help files", total=total_scripts): @@ -222,7 +223,7 @@ def _generate_help_files(): with open(help_file, 'w') as f: f.write(result.stdout) - print(f'Regenerated help output for {script.name}') + logging.debug(f'Regenerated help output for {script.name}') def _highlight_keywords(text, stemmed_keywords): From b4cbaf33b370f8b03c7016ce537d1112dfc4dc97 Mon Sep 17 00:00:00 2001 From: jinan02 Date: Sat, 17 Aug 2024 14:54:13 -0400 Subject: [PATCH 52/69] display one progress bar for the files generation --- scilpy/utils/scilpy_bot.py | 52 +++++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index dbf9c84f9..773b4f1b4 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -192,29 +192,15 @@ def _generate_help_files(): hidden_dir.mkdir(exist_ok=True) # Iterate over all scripts and generate help files - for idx, script in enumerate(tqdm(scripts, desc="Generating help files", total=total_scripts), start=1): - help_file = hidden_dir / f'{script.name}.help' - # Check if help file already exists - if help_file.exists(): - logging.debug(f'Help file for {script.name} already exists. Skipping.') - continue + with tqdm(total=total_scripts, desc="Generating help files") as pbar: + for script in scripts: + help_file = hidden_dir / f'{script.name}.help' + # Check if help file already exists + if help_file.exists(): + logging.debug(f'Help file for {script.name} already exists. Skipping.') + pbar.update(1) + continue - # Run the script with --h and capture the output - result = subprocess.run( - ['python', script, '--h'], capture_output=True, text=True) - - # Save the output to the hidden file - with open(help_file, 'w') as f: - f.write(result.stdout) - - logging.debug(f'Help file saved to {help_file}({idx}/{total_scripts})') - - # Check if any help files are missing and regenerate them - for script in tqdm(scripts_dir.glob('*.py'), desc="Checking missing help files", total=total_scripts): - if script.name == '__init__.py' or script.name == 'scil_search_keywords.py': - continue - help_file = hidden_dir / f'{script.name}.help' - if not help_file.exists(): # Run the script with --h and capture the output result = subprocess.run( ['python', script, '--h'], capture_output=True, text=True) @@ -223,7 +209,27 @@ def _generate_help_files(): with open(help_file, 'w') as f: f.write(result.stdout) - logging.debug(f'Regenerated help output for {script.name}') + logging.debug(f'Help file saved to {help_file}') + pbar.update(1) + + # Check if any help files are missing and regenerate them + with tqdm(total=total_scripts, desc="Checking missing help files") as pbar: + for script in scripts_dir.glob('*.py'): + if script.name == '__init__.py' or script.name == 'scil_search_keywords.py': + pbar.update(1) + continue + help_file = hidden_dir / f'{script.name}.help' + if not help_file.exists(): + # Run the script with --h and capture the output + result = subprocess.run( + ['python', script, '--h'], capture_output=True, text=True) + + # Save the output to the hidden file + with open(help_file, 'w') as f: + f.write(result.stdout) + + logging.debug(f'Regenerated help output for {script.name}') + pbar.update(1) def _highlight_keywords(text, stemmed_keywords): From 6a166d273fc4dcfeefd512a5e84fd9025f65e9f9 Mon Sep 17 00:00:00 2001 From: jinan02 Date: Sat, 17 Aug 2024 14:57:01 -0400 Subject: [PATCH 53/69] modify the message that is displayed when the hidden file does not exist --- scripts/scil_search_keywords.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 34e03c1fe..84f5e1235 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -100,8 +100,7 @@ def main(): logging.info('This is your first time running this script.\n' 'Generating help files may take a few minutes,' 'please be patient.\n' - 'Subsequent searches will be much faster.\n' - 'Generating help files....') + 'Subsequent searches will be much faster.') _generate_help_files() matches = [] From 6e8949f45eb77b6a13b530ab1ac409de11d7fc1c Mon Sep 17 00:00:00 2001 From: jinan02 Date: Sat, 17 Aug 2024 17:16:02 -0400 Subject: [PATCH 54/69] used tqdm.write to display the messages instead of logging --- scilpy/utils/scilpy_bot.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index 773b4f1b4..35c3c0215 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -7,7 +7,6 @@ import re from tqdm import tqdm import logging - stemmer = PorterStemmer() RED = '\033[31m' @@ -197,7 +196,7 @@ def _generate_help_files(): help_file = hidden_dir / f'{script.name}.help' # Check if help file already exists if help_file.exists(): - logging.debug(f'Help file for {script.name} already exists. Skipping.') + tqdm.write(f'Help file for {script.name} already exists. Skipping.') pbar.update(1) continue @@ -209,7 +208,7 @@ def _generate_help_files(): with open(help_file, 'w') as f: f.write(result.stdout) - logging.debug(f'Help file saved to {help_file}') + tqdm.write(f'Help file saved to {help_file}') pbar.update(1) # Check if any help files are missing and regenerate them @@ -228,10 +227,11 @@ def _generate_help_files(): with open(help_file, 'w') as f: f.write(result.stdout) - logging.debug(f'Regenerated help output for {script.name}') + tqdm.write(f'Regenerated help output for {script.name}') pbar.update(1) + def _highlight_keywords(text, stemmed_keywords): """ Highlight the stemmed keywords in the given text using colorama. From fea23ec49155a790af1c3d95ecfd770fa39ef6e9 Mon Sep 17 00:00:00 2001 From: jinan02 Date: Sat, 17 Aug 2024 17:18:15 -0400 Subject: [PATCH 55/69] lowercase the name of the vocaulary file and folder --- scilpy/utils/scilpy_bot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index 35c3c0215..2a3791c11 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -17,7 +17,7 @@ # Path to the JSON file containing script information and keywords VOCAB_FILE_PATH = pathlib.Path( - __file__).parent.parent.parent/'data' / 'Vocabulary'/'Vocabulary.json' + __file__).parent.parent.parent/'data'/ 'vocabulary'/'vocabulary.json' OBJECTS = [ From c48d476ac56c15536587db5d331455fa90d7506f Mon Sep 17 00:00:00 2001 From: jinan02 Date: Sat, 17 Aug 2024 17:19:02 -0400 Subject: [PATCH 56/69] change folder and file name in data folder --- data/{Vocabulary/Vocabulary.json => vocabulary/vocabulary.json} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename data/{Vocabulary/Vocabulary.json => vocabulary/vocabulary.json} (100%) diff --git a/data/Vocabulary/Vocabulary.json b/data/vocabulary/vocabulary.json similarity index 100% rename from data/Vocabulary/Vocabulary.json rename to data/vocabulary/vocabulary.json From a99f85fbb2be92b31f2f63c05b09b02ce2f53ae1 Mon Sep 17 00:00:00 2001 From: jinan02 Date: Sun, 18 Aug 2024 13:03:02 -0400 Subject: [PATCH 57/69] style: added missing whitespace --- scripts/scil_search_keywords.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 84f5e1235..2824dbb72 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -190,7 +190,7 @@ def update_matches_and_scores(filename, score_details): for synonym in synonyms: if synonym in search_text: # Update the score_details with the count of each synonym found - score_details[keyword+' synonyms'] = score_details.get( + score_details[keyword + ' synonyms'] = score_details.get( keyword + ' synonyms', 0) + search_text.count(synonym) score_details['total_score'] += search_text.count( synonym) From 944ddced2d01f2ac692ac151a4acd06d9c2de85c Mon Sep 17 00:00:00 2001 From: jinan02 Date: Sun, 18 Aug 2024 16:58:44 -0400 Subject: [PATCH 58/69] downcase all vocabulary --- data/vocabulary/vocabulary.json | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/data/vocabulary/vocabulary.json b/data/vocabulary/vocabulary.json index 84babcad5..03ce39584 100644 --- a/data/vocabulary/vocabulary.json +++ b/data/vocabulary/vocabulary.json @@ -262,7 +262,7 @@ { "name": "scil_dwi_reorder_philips.py", "keywords": [ - "Philips", + "philips", "DWI reorder", "original gradient table" ] @@ -318,8 +318,8 @@ "directions", "peak values", "peak indices", - "rgb", - "afd" + "RGB", + "AFD" ] }, { @@ -712,7 +712,7 @@ "name": "scil_tracking_pft_maps_edit.py", "keywords": [ "particule filtering tractography", - "cmc" + "CMC" ] }, { @@ -721,8 +721,8 @@ "particle filter tractography", "continuous map criterion", "tracking", - "fodf", - "cmc", + "FODF", + "CMC", "particle filtering tractography" ] }, @@ -732,7 +732,7 @@ "particle filter tractography", "continuous map criterion", "tracking", - "fodf" + "FODF" ] }, { @@ -766,7 +766,7 @@ "keywords": [ "microstructure informed", "filtering", - "mit" + "MIT" ] }, { @@ -958,9 +958,9 @@ "name": "scil_viz_fodf.py", "keywords": [ "visualize", - "fiber odf", - "odf", - "sh", + "fiber ODF", + "ODF", + "SH", "peaks", "background" ] @@ -1060,7 +1060,7 @@ ], "synonyms": [ [ - "Bundle", + "bundle", "tract", "pathway", "fasciculus" @@ -1083,11 +1083,11 @@ ], [ "ODF", - "Orientation Distribution Function" + "orientation distribution function" ], [ "DWI", - "Diffusion Weighted Imaging" + "diffusion weighted imaging" ], [ "shell", From b07e190aa6717159934e3aff24d144ded6eaecdf Mon Sep 17 00:00:00 2001 From: jinan02 Date: Mon, 19 Aug 2024 01:01:44 -0400 Subject: [PATCH 59/69] change acronyms format --- data/vocabulary/vocabulary.json | 1538 +++++-------------------------- 1 file changed, 239 insertions(+), 1299 deletions(-) diff --git a/data/vocabulary/vocabulary.json b/data/vocabulary/vocabulary.json index 03ce39584..ff1fdc3be 100644 --- a/data/vocabulary/vocabulary.json +++ b/data/vocabulary/vocabulary.json @@ -1116,1303 +1116,243 @@ "divide" ] ], - "acronyms": - [ - { - "abbreviation": "k-nn", - "Description": "k-nearest neighbors" - }, - { - "abbreviation": "1d", - "Description": "one-dimensional" - }, - { - "abbreviation": "2d", - "Description": "two-dimensional" - }, - { - "abbreviation": "3d", - "Description": "three-dimensional" - }, - { - "abbreviation": "ac", - "Description": "anterior commissure" - }, - { - "abbreviation": "ae", - "Description": "autoencoder" - }, - { - "abbreviation": "af", - "Description": "arcuate fascicle" - }, - { - "abbreviation": "ai", - "Description": "artificial intelligence" - }, - { - "abbreviation": "ann", - "Description": "artificial neural network" - }, - { - "abbreviation": "ar", - "Description": "acoustic radiation" - }, - { - "abbreviation": "atr", - "Description": "anterior thalamic radiation" - }, - { - "abbreviation": "cc", - "Description": "corpus callosum" - }, - { - "abbreviation": "cing", - "Description": "cingulum" - }, - { - "abbreviation": "cinta", - "Description": "clustering in tractography using autoencoders" - }, - { - "abbreviation": "cnn", - "Description": "convolutional neural network" - }, - { - "abbreviation": "csd", - "Description": "constrained spherical deconvolution" - }, - { - "abbreviation": "csf", - "Description": "cerebrospinal fluid" - }, - { - "abbreviation": "cst", - "Description": "corticospinal tract" - }, - { - "abbreviation": "dl", - "Description": "deep learning" - }, - { - "abbreviation": "dmri", - "Description": "diffusion magnetic resonance imaging" - }, - { - "abbreviation": "dodf", - "Description": "diffusion orientation distribution function" - }, - { - "abbreviation": "dt", - "Description": "diffusion tensor" - }, - { - "abbreviation": "dti", - "Description": "diffusion tensor imaging" - }, - { - "abbreviation": "dw-mri", - "Description": "diffusion-weighted magnetic resonance imaging" - }, - { - "abbreviation": "dwi", - "Description": "diffusion-weighted imaging" - }, - { - "abbreviation": "dwm", - "Description": "deep white matter" - }, - { - "abbreviation": "eap", - "Description": "ensemble average (diffusion) propagator" - }, - { - "abbreviation": "fa", - "Description": "fractional anisotropy" - }, - { - "abbreviation": "fat", - "Description": "frontal aslant tract" - }, - { - "abbreviation": "fc", - "Description": "fully connected" - }, - { - "abbreviation": "finta", - "Description": "filtering in tractography using autoencoders" - }, - { - "abbreviation": "fmri", - "Description": "functional magnetic resonance imaging" - }, - { - "abbreviation": "fod", - "Description": "fiber orientation distribution" - }, - { - "abbreviation": "fodf", - "Description": "fiber orientation distribution function" - }, - { - "abbreviation": "ft", - "Description": "fourier transform" - }, - { - "abbreviation": "fx", - "Description": "fornix" - }, - { - "abbreviation": "ge", - "Description": "gradient echo" - }, - { - "abbreviation": "gesta", - "Description": "generative sampling in bundle tractography using autoencoders" - }, - { - "abbreviation": "gm", - "Description": "gray matter" - }, - { - "abbreviation": "hardi", - "Description": "high angular resolution diffusion imaging" - }, - { - "abbreviation": "ic", - "Description": "internal capsule" - }, - { - "abbreviation": "icp", - "Description": "inferior cerebellar peduncle" - }, - { - "abbreviation": "ifof", - "Description": "inferior fronto-occipital fascicle" - }, - { - "abbreviation": "ils", - "Description": "inferior longitudinal system" - }, - { - "abbreviation": "jif", - "Description": "journal impact factor" - }, - { - "abbreviation": "mcp", - "Description": "middle cerebellar peduncle" - }, - { - "abbreviation": "ml", - "Description": "machine learning" - }, - { - "abbreviation": "mlp", - "Description": "multilayer perceptron" - }, - { - "abbreviation": "mls", - "Description": "middle longitudinal system" - }, - { - "abbreviation": "mr", - "Description": "magnetic resonance" - }, - { - "abbreviation": "mri", - "Description": "magnetic resonance imaging" - }, - { - "abbreviation": "nn", - "Description": "neural network" - }, - { - "abbreviation": "nos", - "Description": "number of streamlines" - }, - { - "abbreviation": "odf", - "Description": "orientation distribution function (also referred to as orientation density function)" - }, - { - "abbreviation": "or", - "Description": "optic radiation" - }, - { - "abbreviation": "pc", - "Description": "posterior commissure" - }, - { - "abbreviation": "pca", - "Description": "principal component analysis" - }, - { - "abbreviation": "pdf", - "Description": "probability density function" - }, - { - "abbreviation": "pgse", - "Description": "pulsed-gradient spin echo" - }, - { - "abbreviation": "pli", - "Description": "polarized light imaging" - }, - { - "abbreviation": "popt", - "Description": "parieto-occipital pontine tract" - }, - { - "abbreviation": "ps-oct", - "Description": "polarization-sensitive optical coherence tomography" - }, - { - "abbreviation": "rf", - "Description": "radio frequency" - }, - { - "abbreviation": "rnn", - "Description": "recurrent neural network" - }, - { - "abbreviation": "roc", - "Description": "receiver operating characteristic" - }, - { - "abbreviation": "scp", - "Description": "superior cerebellar peduncle" - }, - { - "abbreviation": "sd", - "Description": "spherical deconvolution" - }, - { - "abbreviation": "se", - "Description": "spin echo" - }, - { - "abbreviation": "set", - "Description": "surface-enhanced tractography" - }, - { - "abbreviation": "sls", - "Description": "superior longitudinal system" - }, - { - "abbreviation": "smri", - "Description": "structural magnetic resonance imaging" - }, - { - "abbreviation": "swm", - "Description": "superficial white matter" - }, - { - "abbreviation": "t1-w", - "Description": "t1-weighted image" - }, - { - "abbreviation": "te", - "Description": "echo time" - }, - { - "abbreviation": "tr", - "Description": "repetition time" - }, - { - "abbreviation": "uf", - "Description": "uncinate fascicle" - }, - { - "abbreviation": "vae", - "Description": "variational autoencoder" - }, - { - "abbreviation": "wm", - "Description": "white matter" - }, - { - "abbreviation": "3d", - "Description": "three dimensions" - }, - { - "abbreviation": "4d", - "Description": "four dimensions" - }, - { - "abbreviation": "act", - "Description": "anatomically-constrained tractography" - }, - { - "abbreviation": "amico", - "Description": "accelerated microstructure imaging via convex optimization" - }, - { - "abbreviation": "apm", - "Description": "average pathlength map" - }, - { - "abbreviation": "bet", - "Description": "brain extraction tool" - }, - { - "abbreviation": "cdmri", - "Description": "computational diffusion mri" - }, - { - "abbreviation": "cg", - "Description": "cingulum" - }, - { - "abbreviation": "cmc", - "Description": "continuous maps criterion" - }, - { - "abbreviation": "commit", - "Description": "convex optimization modeling for microstructure informed tractography" - }, - { - "abbreviation": "csa", - "Description": "constant solid-angle" - }, - { - "abbreviation": "csf/lcs/lcr", - "Description": "cerebrospinal fluid" - }, - { - "abbreviation": "c-dec", - "Description": "connectivity directionally-encoded color" - }, - { - "abbreviation": "dec", - "Description": "directionally-encoded color" - }, - { - "abbreviation": "dwi", - "Description": "diffusion-weighted imaging" - }, - { - "abbreviation": "dmri", - "Description": "diffusion-weighted imaging" - }, - { - "abbreviation": "irmd", - "Description": "diffusion-weighted imaging" - }, - { - "abbreviation": "eap", - "Description": "ensemble average propagator" - }, - { - "abbreviation": "epi", - "Description": "echo-planar imaging" - }, - { - "abbreviation": "fast", - "Description": "fmrib\u2019s automated segmentation tool" - }, - { - "abbreviation": "flirt", - "Description": "fmrib\u2019s linear image registration tool" - }, - { - "abbreviation": "fmt", - "Description": "fast marching tractography" - }, - { - "abbreviation": "fsl", - "Description": "fmrib software library" - }, - { - "abbreviation": "grappa", - "Description": "generalized autocalibrating partially parallel acquisition" - }, - { - "abbreviation": "ifof", - "Description": "inferior fronto-occipital fasciculus" - }, - { - "abbreviation": "ipmi", - "Description": "information processing in medical imaging" - }, - { - "abbreviation": "ismrm", - "Description": "international society for magnetic resonance in medicine" - }, - { - "abbreviation": "miccai", - "Description": "medical image computing and computer assisted intervention" - }, - { - "abbreviation": "mprage", - "Description": "magnetization-prepared rapid acquisition with gradient-echo" - }, - { - "abbreviation": "irm", - "Description": "magnetic resonance imaging" - }, - { - "abbreviation": "mri", - "Description": "magnetic resonance imaging" - }, - { - "abbreviation": "odf", - "Description": "orientation distribution function" - }, - { - "abbreviation": "ohbm", - "Description": "organization for human brain mapping" - }, - { - "abbreviation": "pve", - "Description": "partial volume estimation" - }, - { - "abbreviation": "roi", - "Description": "region of interest" - }, - { - "abbreviation": "rtt", - "Description": "real-time tractography" - }, - { - "abbreviation": "sh", - "Description": "spherical harmonics" - }, - { - "abbreviation": "slf", - "Description": "superior longitudinal fasciculus" - }, - { - "abbreviation": "snr", - "Description": "signal-to-noise ratio" - }, - { - "abbreviation": "twi", - "Description": "track-weighted imaging" - }, - { - "abbreviation": "voi", - "Description": "volume of interest" - }, - { - "abbreviation": "ats", - "Description": "anterior traverse system" - }, - { - "abbreviation": "a123", - "Description": "area 1/2/3 (upper limb, head, and face region)" - }, - { - "abbreviation": "a4hf", - "Description": "area 4 (head and face region)" - }, - { - "abbreviation": "a4ul", - "Description": "area 4 (upper limb region)" - }, - { - "abbreviation": "a46", - "Description": "area 46" - }, - { - "abbreviation": "af", - "Description": "arcuate fasciculus" - }, - { - "abbreviation": "bls", - "Description": "basal longitudinal system" - }, - { - "abbreviation": "ca39", - "Description": "caudal area 39" - }, - { - "abbreviation": "cdl", - "Description": "caudal dorsolateral area" - }, - { - "abbreviation": "cvl", - "Description": "caudal ventrolateral area" - }, - { - "abbreviation": "cdl", - "Description": "caudolateral of area " - }, - { - "abbreviation": "csf", - "Description": "cerebral spinal fluid" - }, - { - "abbreviation": "ctoi", - "Description": "conservative tract of interest" - }, - { - "abbreviation": "da9/36", - "Description": "dorsal area 9/46" - }, - { - "abbreviation": "ddi", - "Description": "dorsal dysgranular insula" - }, - { - "abbreviation": "dl6", - "Description": "dorsolateral area 6" - }, - { - "abbreviation": "dl37", - "Description": "dorsolateral area 37 region" - }, - { - "abbreviation": "efc", - "Description": "extreme/external capsule fibre system" - }, - { - "abbreviation": "fodfs", - "Description": "fibre orientation distribution functions" - }, - { - "abbreviation": "fus", - "Description": "fusiform gyrus" - }, - { - "abbreviation": "hcp", - "Description": "human connectome project" - }, - { - "abbreviation": "ifg", - "Description": "inferior frontal gyrus" - }, - { - "abbreviation": "ils ", - "Description": "inferior longitudinal system " - }, - { - "abbreviation": "ipl", - "Description": "inferior parietal lobe" - }, - { - "abbreviation": "itg", - "Description": "inferior temporal gyrus" - }, - { - "abbreviation": "ins", - "Description": "insula" - }, - { - "abbreviation": "ipa ", - "Description": "intraparietal area " - }, - { - "abbreviation": "la", - "Description": "lateral area" - }, - { - "abbreviation": "locc", - "Description": "lateral occipital cortex" - }, - { - "abbreviation": "cin", - "Description": "limbic lobe/cingulum" - }, - { - "abbreviation": "mme", - "Description": "mean millimetre error" - }, - { - "abbreviation": "mvocc", - "Description": "medioventral occipital cortex" - }, - { - "abbreviation": "mlf", - "Description": "medial longitudinal fasciculus" - }, - { - "abbreviation": "mesls", - "Description": "mesial longitudinal system" - }, - { - "abbreviation": "mfg", - "Description": "middle frontal gyrus" - }, - { - "abbreviation": "midls", - "Description": "middle longitudinal systems" - }, - { - "abbreviation": "mlf", - "Description": "middle longitudinal fasciculus" - }, - { - "abbreviation": "mtg", - "Description": "middle temporal gyrus" - }, - { - "abbreviation": "mni", - "Description": "montreal neurological institute" - }, - { - "abbreviation": "opa", - "Description": "opercular area" - }, - { - "abbreviation": "ofg", - "Description": "orbital frontal gyrus" - }, - { - "abbreviation": "pag", - "Description": "paracentral gyrus" - }, - { - "abbreviation": "pft", - "Description": "particle-filter tractography" - }, - { - "abbreviation": "pocg", - "Description": "postcentral gyrus" - }, - { - "abbreviation": "pts", - "Description": "posterior traverse system" - }, - { - "abbreviation": "pcg", - "Description": "precentral gyrus" - }, - { - "abbreviation": "pcun", - "Description": "precuneus" - }, - { - "abbreviation": "rois", - "Description": "regions of interest" - }, - { - "abbreviation": "rda", - "Description": "rostrodorsal area" - }, - { - "abbreviation": "rva", - "Description": "rostroventral area" - }, - { - "abbreviation": "stoi", - "Description": "sublobe tract of interest" - }, - { - "abbreviation": "sfg", - "Description": "superior frontal gyrus" - }, - { - "abbreviation": "slf ", - "Description": "superior longitudinal fasciculus " - }, - { - "abbreviation": "sls ", - "Description": "superior longitudinal system" - }, - { - "abbreviation": "spl", - "Description": "superior parietal lobule" - }, - { - "abbreviation": "stl", - "Description": "superior temporal lobe" - }, - { - "abbreviation": "sma", - "Description": "supplementary motor area" - }, - { - "abbreviation": "tois", - "Description": "tracts of interest" - }, - { - "abbreviation": "t", - "Description": "tesla" - }, - { - "abbreviation": "uf", - "Description": "uncinate fasciculus" - }, - { - "abbreviation": "vtois", - "Description": "variable tract of interest" - }, - { - "abbreviation": "abs", - "Description": "atlas based segmentation" - }, - { - "abbreviation": "afd", - "Description": "apparent fibre density" - }, - { - "abbreviation": "ad", - "Description": "axial diffusivity" - }, - { - "abbreviation": "bids", - "Description": "brain imaging data structure" - }, - { - "abbreviation": "lcs", - "Description": "cerebrospinal fluid" - }, - { - "abbreviation": "dodf", - "Description": "diffusion orientation distribution function" - }, - { - "abbreviation": "flair", - "Description": "fluid-attenuated inversion recovery" - }, - { - "abbreviation": "frf", - "Description": "fibre response function" - }, - { - "abbreviation": "rd", - "Description": "radial diffusivity" - }, - { - "abbreviation": "rf", - "Description": "radio frequency" - }, - { - "abbreviation": "scil", - "Description": "sherbrooke connectivity imaging laboratory" - }, - { - "abbreviation": "sp", - "Description": "multiple sclerosis" - }, - { - "abbreviation": "cpu", - "Description": "central processing unit" - }, - { - "abbreviation": "frt", - "Description": "funk-radon transform" - }, - { - "abbreviation": "go", - "Description": "gigabyte" - }, - { - "abbreviation": "gpu", - "Description": "graphical processing unit" - }, - { - "abbreviation": "gru", - "Description": "gated recurrent unit" - }, - { - "abbreviation": "irm", - "Description": "magnetic resonance imaging" - }, - { - "abbreviation": "irmd", - "Description": "diffusion-weighted magnetic resonance imaging" - }, - { - "abbreviation": "lstm", - "Description": "long short-term memory network" - }, - { - "abbreviation": "md", - "Description": "mean diffusivity" - }, - { - "abbreviation": "ram", - "Description": "random access memory" - }, - { - "abbreviation": "rnn", - "Description": "recurrent neural network" - }, - { - "abbreviation": "3d-shore", - "Description": "three dimensional simple harmonic oscillator based reconstruction and estimation model" - }, - { - "abbreviation": "ae", - "Description": "angular error metric" - }, - { - "abbreviation": "cdf", - "Description": "cohen-daubechies-feauveau" - }, - { - "abbreviation": "cdsi", - "Description": "classical diffusion spectrum imaging model" - }, - { - "abbreviation": "cs", - "Description": "compressive sensing" - }, - { - "abbreviation": "csa", - "Description": "constant solid angle q-ball model" - }, - { - "abbreviation": "csd", - "Description": "constrained spherical deconvolution model" - }, - { - "abbreviation": "cv", - "Description": "cross validation" - }, - { - "abbreviation": "ddsi", - "Description": "diffusion spectrum imaging deconvolution model" - }, - { - "abbreviation": "dipy", - "Description": "diffusion in python software" - }, - { - "abbreviation": "dnc", - "Description": "difference in the number of fiber compartments metric" - }, - { - "abbreviation": "dsi", - "Description": "diffusion spectrum imaging model" - }, - { - "abbreviation": "dsi515", - "Description": "classical diffusion spectrum imaging acquisition scheme with 515 samples" - }, - { - "abbreviation": "dsistudio", - "Description": "dsi studio software" - }, - { - "abbreviation": "dti", - "Description": "diffusion tensor imaging model" - }, - { - "abbreviation": "dtk", - "Description": "diffusion toolkit software" - }, - { - "abbreviation": "dtwt", - "Description": "dual tree wavelet transform" - }, - { - "abbreviation": "dw", - "Description": "diffusion weighted" - }, - { - "abbreviation": "dwi", - "Description": "diffusion weighted imaging" - }, - { - "abbreviation": "dwt", - "Description": "discrete wavelet transform" - }, - { - "abbreviation": "fft", - "Description": "fast fourier transform" - }, - { - "abbreviation": "fodf", - "Description": "fiber orientation distribution function" - }, - { - "abbreviation": "ib", - "Description": "invalib bundles metric" - }, - { - "abbreviation": "idft", - "Description": "inverse discrete fourier transform" - }, - { - "abbreviation": "isbi", - "Description": "ieee international symposium on biomedical imaging" - }, - { - "abbreviation": "isbi2013", - "Description": "subset of the dataset from the hardi challenge at the conference isbi2013" - }, - { - "abbreviation": "isbi2013-full", - "Description": "dataset from the hardi challenge at the conference isbi2013" - }, - { - "abbreviation": "mgh-ucla hcp", - "Description": "(massachusetts general hospital - university of california, los angeles) human connectome project" - }, - { - "abbreviation": "nmse", - "Description": "normalized mean square error" - }, - { - "abbreviation": "odsi", - "Description": "optimal diffusion spectrum imaging model" - }, - { - "abbreviation": "pccoeff", - "Description": "pearson correlation coefficient" - }, - { - "abbreviation": "pdsi", - "Description": "plain diffusion spectrum imaging model" - }, - { - "abbreviation": "pgse", - "Description": "pulse-gradient spin-echo" - }, - { - "abbreviation": "qbi", - "Description": "q-ball imaging model" - }, - { - "abbreviation": "rip", - "Description": "restricted isometry property" - }, - { - "abbreviation": "sc", - "Description": "sampling scheme" - }, - { - "abbreviation": "sense", - "Description": "sensitivity encoding algorithm" - }, - { - "abbreviation": "swt", - "Description": "stationary wavelet transform" - }, - { - "abbreviation": "tv", - "Description": "total variation" - }, - { - "abbreviation": "vb", - "Description": "valid bundles metric" - }, - { - "abbreviation": "vccr", - "Description": "valid connections to connection ratio" - }, - { - "abbreviation": "wu-minn hcp", - "Description": "(washington university, university of minnesota, and oxford university) human connectome project" - }, - { - "abbreviation": "2d", - "Description": "two dimensions" - }, - { - "abbreviation": "adc", - "Description": "apparent diffusion coefficient" - }, - { - "abbreviation": "aim", - "Description": "medical imaging axis" - }, - { - "abbreviation": "eeg", - "Description": "electroencephalography" - }, - { - "abbreviation": "chus", - "Description": "centre hospitalier universitaire de sherbrooke" - }, - { - "abbreviation": "cims", - "Description": "centre d\u2019imagerie mol\u00e9culaire de sherbrooke" - }, - { - "abbreviation": "crchus", - "Description": "centre de recherche du centre hospitalier universitaire de sherbrooke" - }, - { - "abbreviation": "fmr", - "Description": "mass-stiffness flow" - }, - { - "abbreviation": "fcm", - "Description": "mean-curvature flow" - }, - { - "abbreviation": "hr", - "Description": "high resolution" - }, - { - "abbreviation": "irmf", - "Description": "functional magnetic resonance imaging" - }, - { - "abbreviation": "meg", - "Description": "magnetoencephalography" - }, - { - "abbreviation": "psf", - "Description": "point spread function" - }, - { - "abbreviation": "roi", - "Description": "regions of interest" - }, - { - "abbreviation": "rgb", - "Description": "red, green and blue" - }, - { - "abbreviation": "rmn", - "Description": "nuclear magnetic resonance" - }, - { - "abbreviation": "sdp", - "Description": "semi-definite positive" - }, - { - "abbreviation": "snr", - "Description": "signal to noise ratio" - }, - { - "abbreviation": "tms", - "Description": "transcranial magnetic stimulation" - }, - { - "abbreviation": "wm", - "Description": "white matter" - }, - { - "abbreviation": "ad", - "Description": "alzheimer\u2019s disease" - }, - { - "abbreviation": "adni", - "Description": "alzheimer\u2019s disease neuroimaging initiative" - }, - { - "abbreviation": "bst", - "Description": "bundle-specific tractography" - }, - { - "abbreviation": "cnn", - "Description": "convolutional neural network" - }, - { - "abbreviation": "csc", - "Description": "continuous map criterion" - }, - { - "abbreviation": "dci", - "Description": "diffusion compartment imaging" - }, - { - "abbreviation": "dki", - "Description": "diffusion kurtosis imaging" - }, - { - "abbreviation": "edp", - "Description": "partial differential equation" - }, - { - "abbreviation": "mact", - "Description": "mesh anatomically-constrained tractography" - }, - { - "abbreviation": "mci", - "Description": "mild cognitive impairment" - }, - { - "abbreviation": "nc", - "Description": "normal control group" - }, - { - "abbreviation": "pft", - "Description": "particle filtering tractography" - }, - { - "abbreviation": "ping", - "Description": "pediatric imaging, neurocognition, and genetics" - }, - { - "abbreviation": "pve", - "Description": "partial volume effect" - }, - { - "abbreviation": "se", - "Description": "special euclidean group" - }, - { - "abbreviation": "sc", - "Description": "subcortical structures" - }, - { - "abbreviation": "sf", - "Description": "spherical function" - }, - { - "abbreviation": "tod", - "Description": "tract orientation density" - }, - { - "abbreviation": "act", - "Description": "anatomically constrained tractography" - }, - { - "abbreviation": "ad", - "Description": "alzheimer's disease" - }, - { - "abbreviation": "adni", - "Description": "alzheimer's disease neuroimaging initiative" - }, - { - "abbreviation": "ba", - "Description": "bundle adjacency" - }, - { - "abbreviation": "balsa", - "Description": "brain analysis library of spatial maps and atlases" - }, - { - "abbreviation": "boi", - "Description": "bundle of interest" - }, - { - "abbreviation": "clarity", - "Description": "clear lipid-exchanged acrylamide-hybridized rigid imaging / immunostaining / in situ-hybridization-compatible tissue hydrogel" - }, - { - "abbreviation": "cmc", - "Description": "continuous map criterion" - }, - { - "abbreviation": "cpu", - "Description": "central processing unit" - }, - { - "abbreviation": "dsc", - "Description": "dice score coefficient" - }, - { - "abbreviation": "eadc", - "Description": "european alzheimer\u2019s disease consortium" - }, - { - "abbreviation": "fat", - "Description": "fronto aslant tract" - }, - { - "abbreviation": "frf", - "Description": "fiber response function" - }, - { - "abbreviation": "efod", - "Description": "enhanced fiber orientation distribution" - }, - { - "abbreviation": "harp", - "Description": "harmonized hippocampal protocol" - }, - { - "abbreviation": "hc", - "Description": "healthy control" - }, - { - "abbreviation": "hcp", - "Description": "human connectome project" - }, - { - "abbreviation": "ifof", - "Description": "inferior fronto-occipital fasciculus" - }, - { - "abbreviation": "iqr", - "Description": "interquartile range" - }, - { - "abbreviation": "mri", - "Description": "magnetic resonance imaging" - }, - { - "abbreviation": "irm", - "Description": "magnetic resonance imaging" - }, - { - "abbreviation": "dmri", - "Description": "diffusion magnetic resonance imaging" - }, - { - "abbreviation": "irmd", - "Description": "diffusion magnetic resonance imaging" - }, - { - "abbreviation": "fmri", - "Description": "functional magnetic resonance imaging" - }, - { - "abbreviation": "irmf", - "Description": "functional magnetic resonance imaging" - }, - { - "abbreviation": "mdf", - "Description": "minimal direct-flip distance" - }, - { - "abbreviation": "ml", - "Description": "machine learning" - }, - { - "abbreviation": "oct", - "Description": "optical coherence tomography" - }, - { - "abbreviation": "pft", - "Description": "particle filter tractography" - }, - { - "abbreviation": "pve", - "Description": "partial volume effect" - }, - { - "abbreviation": "pyt", - "Description": "pyramidal tract" - }, - { - "abbreviation": "qb", - "Description": "quickbundles" - }, - { - "abbreviation": "ram", - "Description": "random access memory" - }, - { - "abbreviation": "rb(x)", - "Description": "recobundles(x)" - }, - { - "abbreviation": "mci", - "Description": "mild cognitive impairment" - }, - { - "abbreviation": "nmr", - "Description": "nuclear magnetic resonance" - }, - { - "abbreviation": "set", - "Description": "surface enhanced tractography" - }, - { - "abbreviation": "sfof", - "Description": "superior fronto-occipital fasciculus" - }, - { - "abbreviation": "slr", - "Description": "streamlines linear registration" - }, - { - "abbreviation": "tdi", - "Description": "tract density imaging" - }, - { - "abbreviation": "todi", - "Description": "tract orientation density imaging" - } - ] + "acronyms": [ + ["k-nn", "k-nearest neighbors"], + ["1d", "one-dimensional"], + ["2d", "two-dimensional"], + ["3d", "three-dimensional"], + ["ac", "anterior commissure"], + ["ae", "autoencoder"], + ["af", "arcuate fascicle"], + ["ai", "artificial intelligence"], + ["ann", "artificial neural network"], + ["ar", "acoustic radiation"], + ["atr", "anterior thalamic radiation"], + ["cc", "corpus callosum"], + ["cing", "cingulum"], + ["cinta", "clustering in tractography using autoencoders"], + ["cnn", "convolutional neural network"], + ["csd", "constrained spherical deconvolution"], + ["csf", "cerebrospinal fluid"], + ["cst", "corticospinal tract"], + ["dl", "deep learning"], + ["dmri", "diffusion magnetic resonance imaging"], + ["dodf", "diffusion orientation distribution function"], + ["dt", "diffusion tensor"], + ["dti", "diffusion tensor imaging"], + ["dw-mri", "diffusion-weighted magnetic resonance imaging"], + ["dwi", "diffusion-weighted imaging"], + ["dwm", "deep white matter"], + ["eap", "ensemble average (diffusion) propagator"], + ["fa", "fractional anisotropy"], + ["fat", "frontal aslant tract"], + ["fc", "fully connected"], + ["finta", "filtering in tractography using autoencoders"], + ["fmri", "functional magnetic resonance imaging"], + ["fod", "fiber orientation distribution"], + ["fodf", "fiber orientation distribution function"], + ["ft", "fourier transform"], + ["fx", "fornix"], + ["ge", "gradient echo"], + ["gesta", "generative sampling in bundle tractography using autoencoders"], + ["gm", "gray matter"], + ["hardi", "high angular resolution diffusion imaging"], + ["ic", "internal capsule"], + ["icp", "inferior cerebellar peduncle"], + ["ifof", "inferior fronto-occipital fascicle"], + ["ils", "inferior longitudinal system"], + ["jif", "journal impact factor"], + ["mcp", "middle cerebellar peduncle"], + ["ml", "machine learning"], + ["mlp", "multilayer perceptron"], + ["mls", "middle longitudinal system"], + ["mr", "magnetic resonance"], + ["mri", "magnetic resonance imaging"], + ["nn", "neural network"], + ["nos", "number of streamlines"], + ["odf", "orientation distribution function (also referred to as orientation density function)"], + ["or", "optic radiation"], + ["pc", "posterior commissure"], + ["pca", "principal component analysis"], + ["pdf", "probability density function"], + ["pgse", "pulsed-gradient spin echo"], + ["pli", "polarized light imaging"], + ["popt", "parieto-occipital pontine tract"], + ["ps-oct", "polarization-sensitive optical coherence tomography"], + ["rf", "radio frequency"], + ["rnn", "recurrent neural network"], + ["roc", "receiver operating characteristic"], + ["scp", "superior cerebellar peduncle"], + ["sd", "spherical deconvolution"], + ["se", "spin echo"], + ["set", "surface-enhanced tractography"], + ["sls", "superior longitudinal system"], + ["smri", "structural magnetic resonance imaging"], + ["swm", "superficial white matter"], + ["t1-w", "t1-weighted image"], + ["te", "echo time"], + ["tr", "repetition time"], + ["uf", "uncinate fascicle"], + ["vae", "variational autoencoder"], + ["wm", "white matter"], + ["3d", "three dimensions"], + ["4d", "four dimensions"], + ["act", "anatomically-constrained tractography"], + ["amico", "accelerated microstructure imaging via convex optimization"], + ["apm", "average pathlength map"], + ["bet", "brain extraction tool"], + ["cdmri", "computational diffusion mri"], + ["cg", "cingulum"], + ["cmc", "continuous maps criterion"], + ["commit", "convex optimization modeling for microstructure informed tractography"], + ["csa", "constant solid-angle"], + ["csf/lcs/lcr", "cerebrospinal fluid"], + ["c-dec", "connectivity directionally-encoded color"], + ["dec", "directionally-encoded color"], + ["dwi", "diffusion-weighted imaging"], + ["dmri", "diffusion-weighted imaging"], + ["irmd", "diffusion-weighted imaging"], + ["eap", "ensemble average propagator"], + ["epi", "echo-planar imaging"], + ["fast", "fmrib’s automated segmentation tool"], + ["flirt", "fmrib’s linear image registration tool"], + ["fmt", "fast marching tractography"], + ["fsl", "fmrib software library"], + ["grappa", "generalized autocalibrating partially parallel acquisition"], + ["ifof", "inferior fronto-occipital fasciculus"], + ["ipmi", "information processing in medical imaging"], + ["ismrm", "international society for magnetic resonance in medicine"], + ["miccai", "medical image computing and computer assisted intervention"], + ["mprage", "magnetization-prepared rapid acquisition with gradient-echo"], + ["irm", "magnetic resonance imaging"], + ["mri", "magnetic resonance imaging"], + ["odf", "orientation distribution function"], + ["ohbm", "organization for human brain mapping"], + ["pve", "partial volume estimation"], + ["roi", "region of interest"], + ["rtt", "real-time tractography"], + ["sh", "spherical harmonics"], + ["slf", "superior longitudinal fasciculus"], + ["snr", "signal-to-noise ratio"], + ["twi", "track-weighted imaging"], + ["voi", "volume of interest"], + ["ats", "anterior traverse system"], + ["a123", "area 1/2/3 (upper limb, head, and face region)"], + ["a4hf", "area 4 (head and face region)"], + ["a4ul", "area 4 (upper limb region)"], + ["a46", "area 46"], + ["af", "arcuate fasciculus"], + ["bls", "basal longitudinal system"], + ["ca39", "caudal area 39"], + ["cdl", "caudal dorsolateral area"], + ["cvl", "caudal ventrolateral area"], + ["cdl", "caudolateral of area"], + ["csf", "cerebral spinal fluid"], + ["ctoi", "conservative tract of interest"], + ["da9/36", "dorsal area 9/46"], + ["ddi", "dorsal dysgranular insula"], + ["dl6", "dorsolateral area 6"], + ["dl37", "dorsolateral area 37 region"], + ["efc", "extreme/external capsule fibre system"], + ["fodfs", "fibre orientation distribution functions"], + ["fus", "fusiform gyrus"], + ["hcp", "human connectome project"], + ["ifg", "inferior frontal gyrus"], + ["ils", "inferior longitudinal system"], + ["ipl", "inferior parietal lobe"], + ["itg", "inferior temporal gyrus"], + ["ins", "insula"], + ["ipa", "intraparietal area"], + ["la", "lateral area"], + ["locc", "lateral occipital cortex"], + ["cin", "limbic lobe/cingulum"], + ["mme", "mean millimetre error"], + ["mvocc", "medioventral occipital cortex"], + ["mlf", "medial longitudinal fasciculus"], + ["mesls", "mesial longitudinal system"], + ["mfg", "middle frontal gyrus"], + ["midls", "middle longitudinal systems"], + ["mlf", "middle longitudinal fasciculus"], + ["mtg", "middle temporal gyrus"], + ["mni", "montreal neurological institute"], + ["opa", "opercular area"], + ["ofg", "orbital frontal gyrus"], + ["pag", "paracentral gyrus"], + ["pft", "particle-filter tractography"], + ["pocg", "postcentral gyrus"], + ["pts", "posterior traverse system"], + ["pcg", "precentral gyrus"], + ["pcun", "precuneus"], + ["rois", "regions of interest"], + ["rda", "rostrodorsal area"], + ["rva", "rostroventral area"], + ["stoi", "sublobe tract of interest"], + ["sfg", "superior frontal gyrus"], + ["slf", "superior longitudinal fasciculus"], + ["sls", "superior longitudinal system"], + ["spl", "superior parietal lobule"], + ["stl", "superior temporal lobe"], + ["sma", "supplementary motor area"], + ["tois", "tracts of interest"], + ["t", "tesla"], + ["uf", "uncinate fasciculus"], + ["vtois", "variable tract of interest"], + ["abs", "atlas based segmentation"], + ["afd", "apparent fibre density"], + ["ad", "axial diffusivity"], + ["bids", "brain imaging data structure"], + ["lcs", "cerebrospinal fluid"], + ["dodf", "diffusion orientation distribution function"], + ["flair", "fluid-attenuated inversion recovery"], + ["frf", "fibre response function"], + ["rd", "radial diffusivity"], + ["rf", "radio frequency"], + ["scil", "sherbrooke connectivity imaging laboratory"], + ["sp", "multiple sclerosis"], + ["cpu", "central processing unit"], + ["frt", "funk-radon transform"], + ["go", "gigabyte"], + ["gpu", "graphical processing unit"], + ["gru", "gated recurrent unit"], + ["irm", "magnetic resonance imaging"], + ["irmd", "diffusion-weighted magnetic resonance imaging"], + ["lstm", "long short-term memory network"], + ["md", "mean diffusivity"], + ["ram", "random access memory"], + ["rnn", "recurrent neural network"], + ["3d-shore", "three dimensional simple harmonic oscillator based reconstruction and estimation model"], + ["ae", "angular error metric"], + ["cdf", "cohen-daubechies-feauveau"], + ["cdsi", "classical diffusion spectrum imaging model"], + ["cs", "compressive sensing"], + ["csa", "constant solid angle q-ball model"], + ["csd", "constrained spherical deconvolution model"], + ["cv", "cross validation"], + ["ddsi", "diffusion spectrum imaging deconvolution model"], + ["dipy", "diffusion in python software"], + ["dnc", "difference in the number of fiber compartments metric"], + ["dsi", "diffusion spectrum imaging model"], + ["dsi515", "classical diffusion spectrum imaging acquisition scheme with 515 samples"], + ["dsistudio", "dsi studio software"], + ["dti", "diffusion tensor imaging model"], + ["dtk", "diffusion toolkit software"], + ["dtwt", "dual tree wavelet transform"], + ["dw", "diffusion weighted"], + ["dwi", "diffusion weighted imaging"], + ["dwt", "discrete wavelet transform"], + ["fodf", "fiber orientation distribution function"], + ["ib", "invalib bundles metric"], + ["idft", "inverse discrete fourier transform"], + ["isbi", "ieee international symposium on biomedical imaging"], + ["isbi2013", "subset of the dataset from the hardi challenge at the conference isbi2013"], + ["isbi2013-full", "dataset from the hardi challenge at the conference isbi2013"], + ["mgh-ucla hcp", "(massachusetts general hospital - university of california, los angeles) human connectome project"], + ["nmse", "normalized mean square error"], + ["odsi", "optimal diffusion spectrum imaging model"], + ["pccoeff", "pearson correlation coefficient"], + ["pdsi", "plain diffusion spectrum imaging model"], + ["pgse", "pulse-gradient spin-echo"], + ["qbi", "q-ball imaging model"] + ] + } \ No newline at end of file From 0fdb98d360b2e1925312f32f8c8529c171cfdb2b Mon Sep 17 00:00:00 2001 From: jinan02 Date: Mon, 19 Aug 2024 01:49:23 -0400 Subject: [PATCH 60/69] change the way the scores are updated for synonyms --- scilpy/utils/scilpy_bot.py | 15 +++++++++++++-- scripts/scil_search_keywords.py | 9 ++++++--- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index 2a3791c11..c4e0858fd 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -335,12 +335,23 @@ def _calculate_score(keywords, phrases, text, filename): stemmed_filename = _stem_text(filename.lower()) score_details = {'total_score': 0} + def is_match(found_word, keyword): + if len(keyword) <= 3: + return found_word == keyword + return stemmer.stem(found_word) == stemmer.stem(keyword) + for keyword in keywords: keyword = keyword.lower() # Use regular expressions to match whole words only keyword_pattern = re.compile(r'\b' + re.escape(keyword) + r'\b') - keyword_score = len(keyword_pattern.findall( - stemmed_text)) + len(keyword_pattern.findall(stemmed_filename)) + found_words = keyword_pattern.findall( + stemmed_text) + keyword_pattern.findall(stemmed_filename) + + keyword_score = 0 + for found_word in found_words: + if is_match(found_word, keyword): + keyword_score += 1 + score_details[keyword] = keyword_score score_details['total_score'] += keyword_score diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 2824dbb72..20c114568 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -188,14 +188,16 @@ def update_matches_and_scores(filename, score_details): score_details = scores.get(filename, {'total_score': 0}) for synonym in synonyms: - if synonym in search_text: + if synonym in search_text and synonym != keyword: # Update the score_details with the count of each synonym found score_details[keyword + ' synonyms'] = score_details.get( keyword + ' synonyms', 0) + search_text.count(synonym) score_details['total_score'] += search_text.count( synonym) - update_matches_and_scores(filename, score_details) + # Directly update scores dictionary + scores[filename] = score_details + if not matches: logging.info(_make_title(' No results found! ')) @@ -207,7 +209,8 @@ def update_matches_and_scores(filename, score_details): logging.info(_make_title(' Results Ordered by Score ')) for match in sorted_matches: - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match}{Style.RESET_ALL}") + if scores[match]['total_score'] > 0: + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match}{Style.RESET_ALL}") for word, score in scores[match].items(): if word != 'total_score': From 48109d96ce4e2f42870df27ec5a35feaed89c8bf Mon Sep 17 00:00:00 2001 From: jinan02 Date: Mon, 19 Aug 2024 01:55:43 -0400 Subject: [PATCH 61/69] style the code to respect the pep8 rules --- scilpy/utils/scilpy_bot.py | 9 ++++----- scripts/scil_search_keywords.py | 3 +-- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index c4e0858fd..56c923cdc 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -6,7 +6,7 @@ from colorama import Fore, Style import re from tqdm import tqdm -import logging + stemmer = PorterStemmer() RED = '\033[31m' @@ -17,7 +17,7 @@ # Path to the JSON file containing script information and keywords VOCAB_FILE_PATH = pathlib.Path( - __file__).parent.parent.parent/'data'/ 'vocabulary'/'vocabulary.json' + __file__).parent.parent.parent/'data' / 'vocabulary' / 'vocabulary.json' OBJECTS = [ @@ -231,7 +231,6 @@ def _generate_help_files(): pbar.update(1) - def _highlight_keywords(text, stemmed_keywords): """ Highlight the stemmed keywords in the given text using colorama. @@ -339,15 +338,15 @@ def is_match(found_word, keyword): if len(keyword) <= 3: return found_word == keyword return stemmer.stem(found_word) == stemmer.stem(keyword) - + for keyword in keywords: keyword = keyword.lower() # Use regular expressions to match whole words only keyword_pattern = re.compile(r'\b' + re.escape(keyword) + r'\b') found_words = keyword_pattern.findall( stemmed_text) + keyword_pattern.findall(stemmed_filename) - keyword_score = 0 + for found_word in found_words: if is_match(found_word, keyword): keyword_score += 1 diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 20c114568..1af2f4d92 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -189,7 +189,7 @@ def update_matches_and_scores(filename, score_details): for synonym in synonyms: if synonym in search_text and synonym != keyword: - # Update the score_details with the count of each synonym found + # Update score_details with the count of each synonym found score_details[keyword + ' synonyms'] = score_details.get( keyword + ' synonyms', 0) + search_text.count(synonym) score_details['total_score'] += search_text.count( @@ -198,7 +198,6 @@ def update_matches_and_scores(filename, score_details): # Directly update scores dictionary scores[filename] = score_details - if not matches: logging.info(_make_title(' No results found! ')) From ef6966bed5660e648750ed83c07dc3e6c1aaafbe Mon Sep 17 00:00:00 2001 From: jinan02 Date: Mon, 26 Aug 2024 11:55:52 -0400 Subject: [PATCH 62/69] add initialize_logging function --- scripts/scil_search_keywords.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 1af2f4d92..568c6dd11 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -50,6 +50,14 @@ nltk.download('punkt', quiet=True) +def _initialize_logging(verbosity): + logging.basicConfig(level=logging.WARNING) + if verbosity == 'INFO': + logging.getLogger().setLevel(logging.INFO) + elif verbosity == 'DEBUG': + logging.getLogger().setLevel(logging.DEBUG) + + def _build_arg_parser(): p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) From 297b8bc0cc5a7f8a05c1f9c5d98ad9fa4bb14f74 Mon Sep 17 00:00:00 2001 From: jinan02 Date: Mon, 26 Aug 2024 19:53:07 -0400 Subject: [PATCH 63/69] fix the -v arg to display the 1st sentence of the docstring and -v DEBUG to display the whole docstring --- scripts/scil_search_keywords.py | 45 ++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 568c6dd11..cd25c3ca2 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -42,7 +42,8 @@ _get_docstring_from_script_path, _stem_keywords, _stem_phrase, _generate_help_files, _get_synonyms, _extract_keywords_and_phrases, - _calculate_score, _make_title, prompt_user_for_object + _calculate_score, _make_title, prompt_user_for_object, + _split_first_sentence, _highlight_keywords ) from scilpy.utils.scilpy_bot import SPACING_LEN, VOCAB_FILE_PATH from scilpy.io.utils import add_verbose_arg @@ -50,14 +51,6 @@ nltk.download('punkt', quiet=True) -def _initialize_logging(verbosity): - logging.basicConfig(level=logging.WARNING) - if verbosity == 'INFO': - logging.getLogger().setLevel(logging.INFO) - elif verbosity == 'DEBUG': - logging.getLogger().setLevel(logging.DEBUG) - - def _build_arg_parser(): p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) @@ -113,11 +106,13 @@ def main(): matches = [] scores = {} + docstrings = {} # To store the docstrings of each script + # pattern to search for search_pattern = f'scil_{"{}_" if selected_object else ""}*.py' - def update_matches_and_scores(filename, score_details): + def update_matches_and_scores(filename, score_details, docstring=None): """ Update the matches and scores for the given filename based on the score details. @@ -131,7 +126,8 @@ def update_matches_and_scores(filename, score_details): and phrases found in the script. This dictionary should have a 'total_score' key indicating the cumulative score. - + docstring : str, optional + The docstring of the script. Returns ------- None @@ -141,12 +137,16 @@ def update_matches_and_scores(filename, score_details): if filename not in matches: matches.append(filename) scores[filename] = score_details + if docstring: + docstrings[filename] = docstring else: for key, value in score_details.items(): if key != 'total_score': scores[filename][key] = scores[filename].get( key, 0) + value scores[filename]['total_score'] += score_details['total_score'] + if docstring: + docstrings[filename] = docstring for script in sorted(script_dir.glob(search_pattern.format(selected_object))): filename = script.stem @@ -157,7 +157,7 @@ def update_matches_and_scores(filename, score_details): search_text = _get_docstring_from_script_path(str(script)) score_details = _calculate_score( stemmed_keywords, stemmed_phrases, search_text, filename=filename) - update_matches_and_scores(filename, score_details) + update_matches_and_scores(filename, score_details, docstring=search_text) # Search in help files help_file = hidden_dir / f"{filename}.py.help" @@ -221,16 +221,19 @@ def update_matches_and_scores(filename, score_details): for word, score in scores[match].items(): if word != 'total_score': - if word.endswith(' synonyms'): - logging.info( - f"{Fore.GREEN}Occurrence of '{word}': {score}{Style.RESET_ALL}") - else: - original_word = keyword_mapping.get( - word, phrase_mapping.get(word, word)) logging.info( - f"{Fore.GREEN}Occurrence of '{original_word}': {score}{Style.RESET_ALL}") - - logging.info(f"Total Score: {scores[match]['total_score']}") + f"{Fore.GREEN}Occurrence of '{keyword_mapping.get(word, phrase_mapping.get(word, word))}': {score}{Style.RESET_ALL}") + # Highlight keywords in the docstring or full text based on verbosity level + if match in docstrings: + highlighted_docstring = _highlight_keywords(docstrings[match], stemmed_keywords) + if args.verbose == 'INFO': + first_sentence = _split_first_sentence(highlighted_docstring)[0] + logging.info(f"{first_sentence.strip()}") + elif args.verbose == 'DEBUG': + logging.debug(f"{highlighted_docstring.strip()}") + + + logging.info(f"{Fore.RED}Total Score: {scores[match]['total_score']}{Style.RESET_ALL}") logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info("\n") logging.info(_make_title( From 7e991d8d0e1b5490a2294e217116373a74ece676 Mon Sep 17 00:00:00 2001 From: jinan02 Date: Mon, 26 Aug 2024 20:23:27 -0400 Subject: [PATCH 64/69] modify docstring --- scripts/scil_search_keywords.py | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index cd25c3ca2..ac75f1ed7 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -15,12 +15,24 @@ - Words enclosed in quotes will be searched as phrases, ensuring the words appear next to each other in the text. +Verbosity Options: +- If the `-v` option is provided, the script will display the first sentence + of the docstring for each matching script. +- If the `-v DEBUG` option is provided, the script will display the full + docstring for each matching script. + +Keywords Highlighting: +- When displaying the docstrings, the script highlights the found keywords in +red. Examples: - scil_search_keywords.py tractogram filtering -- scil_search_keywords.py "Spherical Harmonics" convert -- scil_search_keywords.py --no_synonyms tractogram filtering -- scil_search_keywords.py --search_category tractogram filtering +- scil_search_keywords.py "Spherical Harmonics" +- scil_search_keywords.py --no_synonyms "Spherical Harmonics" +- scil_search_keywords.py --search_category tractogram +- scil_search_keywords.py -v sh +- scil_search_keywords.py -v DEBUG sh + """ import argparse @@ -108,7 +120,6 @@ def main(): scores = {} docstrings = {} # To store the docstrings of each script - # pattern to search for search_pattern = f'scil_{"{}_" if selected_object else ""}*.py' @@ -157,7 +168,8 @@ def update_matches_and_scores(filename, score_details, docstring=None): search_text = _get_docstring_from_script_path(str(script)) score_details = _calculate_score( stemmed_keywords, stemmed_phrases, search_text, filename=filename) - update_matches_and_scores(filename, score_details, docstring=search_text) + update_matches_and_scores(filename, score_details, + docstring=search_text) # Search in help files help_file = hidden_dir / f"{filename}.py.help" @@ -197,7 +209,7 @@ def update_matches_and_scores(filename, score_details, docstring=None): for synonym in synonyms: if synonym in search_text and synonym != keyword: - # Update score_details with the count of each synonym found + # Update score_details with count of each synonym found score_details[keyword + ' synonyms'] = score_details.get( keyword + ' synonyms', 0) + search_text.count(synonym) score_details['total_score'] += search_text.count( @@ -221,8 +233,7 @@ def update_matches_and_scores(filename, score_details, docstring=None): for word, score in scores[match].items(): if word != 'total_score': - logging.info( - f"{Fore.GREEN}Occurrence of '{keyword_mapping.get(word, phrase_mapping.get(word, word))}': {score}{Style.RESET_ALL}") + logging.info(f"{Fore.GREEN}Occurrence of '{keyword_mapping.get(word, phrase_mapping.get(word, word))}': {score}{Style.RESET_ALL}") # Highlight keywords in the docstring or full text based on verbosity level if match in docstrings: highlighted_docstring = _highlight_keywords(docstrings[match], stemmed_keywords) @@ -230,9 +241,7 @@ def update_matches_and_scores(filename, score_details, docstring=None): first_sentence = _split_first_sentence(highlighted_docstring)[0] logging.info(f"{first_sentence.strip()}") elif args.verbose == 'DEBUG': - logging.debug(f"{highlighted_docstring.strip()}") - - + logging.debug(f"{highlighted_docstring.strip()}") logging.info(f"{Fore.RED}Total Score: {scores[match]['total_score']}{Style.RESET_ALL}") logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info("\n") From fff8baedda1c81556312dcb032cf1790c2a177c3 Mon Sep 17 00:00:00 2001 From: jinan02 Date: Mon, 26 Aug 2024 20:38:10 -0400 Subject: [PATCH 65/69] run flake8 on the script --- scripts/scil_search_keywords.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index ac75f1ed7..713805935 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -233,15 +233,17 @@ def update_matches_and_scores(filename, score_details, docstring=None): for word, score in scores[match].items(): if word != 'total_score': - logging.info(f"{Fore.GREEN}Occurrence of '{keyword_mapping.get(word, phrase_mapping.get(word, word))}': {score}{Style.RESET_ALL}") - # Highlight keywords in the docstring or full text based on verbosity level + logging.info(f"{Fore.GREEN}Occurrence of '{keyword_mapping.get(word, phrase_mapping.get(word, word))}': {score}{Style.RESET_ALL}") + + # Highlight keywords based on verbosity level if match in docstrings: - highlighted_docstring = _highlight_keywords(docstrings[match], stemmed_keywords) + highlighted_docstring = _highlight_keywords(docstrings[match], + stemmed_keywords) if args.verbose == 'INFO': first_sentence = _split_first_sentence(highlighted_docstring)[0] logging.info(f"{first_sentence.strip()}") elif args.verbose == 'DEBUG': - logging.debug(f"{highlighted_docstring.strip()}") + logging.debug(f"{highlighted_docstring.strip()}") logging.info(f"{Fore.RED}Total Score: {scores[match]['total_score']}{Style.RESET_ALL}") logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") logging.info("\n") From 56b87fdace744c8d02149209c9d186d1ca7c3927 Mon Sep 17 00:00:00 2001 From: frheault Date: Sun, 29 Sep 2024 21:43:22 -0400 Subject: [PATCH 66/69] Improved behavior on synonym --- data/vocabulary/vocabulary.json | 1293 ++++++++++++++++++++++++------- scilpy/utils/scilpy_bot.py | 101 ++- scripts/scil_search_keywords.py | 194 ++--- 3 files changed, 1143 insertions(+), 445 deletions(-) diff --git a/data/vocabulary/vocabulary.json b/data/vocabulary/vocabulary.json index ff1fdc3be..15d506182 100644 --- a/data/vocabulary/vocabulary.json +++ b/data/vocabulary/vocabulary.json @@ -1059,300 +1059,1005 @@ } ], "synonyms": [ - [ - "bundle", - "tract", - "pathway", - "fasciculus" - ], - [ - "multi-shells", - "multishell", - "multi shell", - "msmt" - ], - [ - "SH", - "Spherical Harmonics" - ], - - [ - "single-shell", - "single shell", - "ssst" - ], - [ - "ODF", - "orientation distribution function" - ], - [ - "DWI", - "diffusion weighted imaging" - ], - [ - "shell", - "bval", - "b-value", - "bvals" - ], - [ - "b-tensor encoding", - "tensor-valued" - ], - [ - "surface", - "mesh" - ], - [ - "merge", - "fuse", - "concatenate", - "add" - ], - [ - "parcellate", - "subdivide", - "split", - "divide" - ] + [ + "bundle", + "tract", + "pathway", + "fasciculus" + ], + [ + "multi-shells", + "multishell", + "multi shell", + "msmt" + ], + [ + "SH", + "spherical harmonic" + ], + [ + "single-shell", + "single shell", + "ssst" + ], + [ + "ODF", + "orientation distribution function" + ], + [ + "DWI", + "diffusion weighted imaging" + ], + [ + "shell", + "bval", + "b-value", + "bvals" + ], + [ + "b-tensor encoding", + "tensor-valued" + ], + [ + "surface", + "mesh" + ], + [ + "merge", + "fuse", + "concatenate", + "add" + ], + [ + "parcellate", + "subdivide", + "split", + "divide" + ], + [ + "k-nn", + "knn", + "k-nearest neighbors" + ], + [ + "1d", + "one-dimensional" + ], + [ + "2d", + "two-dimensional" + ], + [ + "3d", + "three-dimensional" + ], + [ + "ac", + "anterior commissure" + ], + [ + "ae", + "autoencoder" + ], + [ + "af", + "arcuate fascicle" + ], + [ + "ai", + "artificial intelligence" + ], + [ + "ann", + "artificial neural network" + ], + [ + "ar", + "acoustic radiation" + ], + [ + "atr", + "anterior thalamic radiation" + ], + [ + "cc", + "corpus callosum" + ], + [ + "cing", + "cingulum" + ], + [ + "cinta", + "clustering in tractography using autoencoders" + ], + [ + "cnn", + "convolutional neural network" + ], + [ + "csd", + "constrained spherical deconvolution" + ], + [ + "csf", + "cerebrospinal fluid" + ], + [ + "cst", + "corticospinal tract" + ], + [ + "dl", + "deep learning" + ], + [ + "dmri", + "diffusion magnetic resonance imaging" + ], + [ + "dodf", + "diffusion orientation distribution function" + ], + [ + "dt", + "diffusion tensor" + ], + [ + "dti", + "diffusion tensor imaging" + ], + [ + "dw-mri", + "diffusion-weighted magnetic resonance imaging" + ], + [ + "dwi", + "diffusion-weighted imaging" + ], + [ + "dwm", + "deep white matter" + ], + [ + "eap", + "ensemble average (diffusion) propagator" + ], + [ + "fa", + "fractional anisotropy" + ], + [ + "fat", + "frontal aslant tract" + ], + [ + "fc", + "fully connected" + ], + [ + "finta", + "filtering in tractography using autoencoders" + ], + [ + "fmri", + "functional magnetic resonance imaging" + ], + [ + "fod", + "fiber orientation distribution" + ], + [ + "fodf", + "fiber orientation distribution function" + ], + [ + "ft", + "fourier transform" + ], + [ + "fx", + "fornix" + ], + [ + "ge", + "gradient echo" + ], + [ + "gesta", + "generative sampling in bundle tractography using autoencoders" + ], + [ + "gm", + "gray matter" + ], + [ + "hardi", + "high angular resolution diffusion imaging" + ], + [ + "ic", + "internal capsule" + ], + [ + "icp", + "inferior cerebellar peduncle" + ], + [ + "ifof", + "inferior fronto-occipital fascicle" + ], + [ + "ils", + "inferior longitudinal system" + ], + [ + "jif", + "journal impact factor" + ], + [ + "mcp", + "middle cerebellar peduncle" + ], + [ + "ml", + "machine learning" + ], + [ + "mlp", + "multilayer perceptron" + ], + [ + "mls", + "middle longitudinal system" + ], + [ + "mr", + "magnetic resonance" + ], + [ + "mri", + "magnetic resonance imaging" + ], + [ + "nn", + "neural network" + ], + [ + "nos", + "number of streamlines" + ], + [ + "odf", + "orientation distribution function (also referred to as orientation density function)" + ], + [ + "or", + "optic radiation" + ], + [ + "pc", + "posterior commissure" + ], + [ + "pca", + "principal component analysis" + ], + [ + "pdf", + "probability density function" + ], + [ + "pgse", + "pulsed-gradient spin echo" + ], + [ + "pli", + "polarized light imaging" + ], + [ + "popt", + "parieto-occipital pontine tract" + ], + [ + "ps-oct", + "polarization-sensitive optical coherence tomography" + ], + [ + "rf", + "radio frequency" + ], + [ + "rnn", + "recurrent neural network" + ], + [ + "roc", + "receiver operating characteristic" + ], + [ + "scp", + "superior cerebellar peduncle" + ], + [ + "sd", + "spherical deconvolution" + ], + [ + "se", + "spin echo" + ], + [ + "set", + "surface-enhanced tractography" + ], + [ + "sls", + "superior longitudinal system" + ], + [ + "smri", + "structural magnetic resonance imaging" + ], + [ + "swm", + "superficial white matter" + ], + [ + "t1-w", + "t1-weighted image" + ], + [ + "te", + "echo time" + ], + [ + "tr", + "repetition time" + ], + [ + "uf", + "uncinate fascicle" + ], + [ + "vae", + "variational autoencoder" + ], + [ + "wm", + "white matter" + ], + [ + "3d", + "three dimensions" + ], + [ + "4d", + "four dimensions" + ], + [ + "act", + "anatomically-constrained tractography" + ], + [ + "amico", + "accelerated microstructure imaging via convex optimization" + ], + [ + "apm", + "average pathlength map" + ], + [ + "bet", + "brain extraction tool" + ], + [ + "cdmri", + "computational diffusion mri" + ], + [ + "cg", + "cingulum" + ], + [ + "cmc", + "continuous maps criterion" + ], + [ + "commit", + "convex optimization modeling for microstructure informed tractography" + ], + [ + "csa", + "constant solid-angle" + ], + [ + "csf/lcs/lcr", + "cerebrospinal fluid" + ], + [ + "c-dec", + "connectivity directionally-encoded color" + ], + [ + "dec", + "directionally-encoded color" + ], + [ + "dwi", + "diffusion-weighted imaging" + ], + [ + "dmri", + "diffusion-weighted imaging" + ], + [ + "irmd", + "diffusion-weighted imaging" + ], + [ + "eap", + "ensemble average propagator" + ], + [ + "epi", + "echo-planar imaging" + ], + [ + "fast", + "fmrib’s automated segmentation tool" + ], + [ + "flirt", + "fmrib’s linear image registration tool" + ], + [ + "fmt", + "fast marching tractography" + ], + [ + "fsl", + "fmrib software library" + ], + [ + "grappa", + "generalized autocalibrating partially parallel acquisition" + ], + [ + "ifof", + "inferior fronto-occipital fasciculus" + ], + [ + "ipmi", + "information processing in medical imaging" + ], + [ + "ismrm", + "international society for magnetic resonance in medicine" + ], + [ + "miccai", + "medical image computing and computer assisted intervention" + ], + [ + "mprage", + "magnetization-prepared rapid acquisition with gradient-echo" + ], + [ + "irm", + "magnetic resonance imaging" + ], + [ + "mri", + "magnetic resonance imaging" + ], + [ + "odf", + "orientation distribution function" + ], + [ + "ohbm", + "organization for human brain mapping" + ], + [ + "pve", + "partial volume estimation" + ], + [ + "roi", + "region of interest" + ], + [ + "rtt", + "real-time tractography" + ], + [ + "sh", + "spherical harmonics" + ], + [ + "slf", + "superior longitudinal fasciculus" + ], + [ + "snr", + "signal-to-noise ratio" + ], + [ + "twi", + "track-weighted imaging" + ], + [ + "voi", + "volume of interest" + ], + [ + "ats", + "anterior traverse system" + ], + [ + "a123", + "area 1/2/3 (upper limb, head, and face region)" + ], + [ + "a4hf", + "area 4 (head and face region)" + ], + [ + "a4ul", + "area 4 (upperspherical limb region)" + ], + [ + "a46", + "area 46" + ], + [ + "af", + "arcuate fasciculus" + ], + [ + "bls", + "basal longitudinal system" + ], + [ + "ca39", + "caudal area 39" + ], + [ + "cdl", + "caudal dorsolateral area" + ], + [ + "cvl", + "caudal ventrolateral area" + ], + [ + "cdl", + "caudolateral of area" + ], + [ + "csf", + "cerebral spinal fluid" + ], + [ + "ctoi", + "conservative tract of interest" + ], + [ + "da9/36", + "dorsal area 9/46" + ], + [ + "ddi", + "dorsal dysgranular insula" + ], + [ + "dl6", + "dorsolateral area 6" + ], + [ + "dl37", + "dorsolateral area 37 region" + ], + [ + "efc", + "extreme/external capsule fibre system" + ], + [ + "fodfs", + "fibre orientation distribution functions" + ], + [ + "fus", + "fusiform gyrus" + ], + [ + "hcp", + "human connectome project" + ], + [ + "ifg", + "inferior frontal gyrus" + ], + [ + "ils", + "inferior longitudinal system" + ], + [ + "ipl", + "inferior parietal lobe" + ], + [ + "itg", + "inferior temporal gyrus" + ], + [ + "ins", + "insula" + ], + [ + "ipa", + "intraparietal area" + ], + [ + "la", + "lateral area" + ], + [ + "locc", + "lateral occipital cortex" + ], + [ + "cin", + "limbic lobe/cingulum" + ], + [ + "mme", + "mean millimetre error" + ], + [ + "mvocc", + "medioventral occipital cortex" + ], + [ + "mlf", + "medial longitudinal fasciculus" + ], + [ + "mesls", + "mesial longitudinal system" + ], + [ + "mfg", + "middle frontal gyrus" + ], + [ + "midls", + "middle longitudinal systems" + ], + [ + "mlf", + "middle longitudinal fasciculus" + ], + [ + "mtg", + "middle temporal gyrus" + ], + [ + "mni", + "montreal neurological institute" + ], + [ + "opa", + "opercular area" + ], + [ + "ofg", + "orbital frontal gyrus" + ], + [ + "pag", + "paracentral gyrus" + ], + [ + "pft", + "particle-filter tractography" + ], + [ + "pocg", + "postcentral gyrus" + ], + [ + "pts", + "posterior traverse system" + ], + [ + "pcg", + "precentral gyrus" + ], + [ + "pcun", + "precuneus" + ], + [ + "rois", + "regions of interest" + ], + [ + "rda", + "rostrodorsal area" + ], + [ + "rva", + "rostroventral area" + ], + [ + "stoi", + "sublobe tract of interest" + ], + [ + "sfg", + "superior frontal gyrus" + ], + [ + "slf", + "superior longitudinal fasciculus" + ], + [ + "sls", + "superior longitudinal system" + ], + [ + "spl", + "superior parietal lobule" + ], + [ + "stl", + "superior temporal lobe" + ], + [ + "sma", + "supplementary motor area" + ], + [ + "tois", + "tracts of interest" + ], + [ + "t", + "tesla" + ], + [ + "uf", + "uncinate fasciculus" + ], + [ + "vtois", + "variable tract of interest" + ], + [ + "abs", + "atlas based segmentation" + ], + [ + "afd", + "apparent fibre density" + ], + [ + "ad", + "axial diffusivity" + ], + [ + "bids", + "brain imaging data structure" + ], + [ + "lcs", + "cerebrospinal fluid" + ], + [ + "dodf", + "diffusion orientation distribution function" + ], + [ + "flair", + "fluid-attenuated inversion recovery" + ], + [ + "frf", + "fibre response function" + ], + [ + "rd", + "radial diffusivity" + ], + [ + "rf", + "radio frequency" + ], + [ + "scil", + "sherbrooke connectivity imaging laboratory" + ], + [ + "sp", + "multiple sclerosis" + ], + [ + "cpu", + "central processing unit" + ], + [ + "frt", + "funk-radon transform" + ], + [ + "go", + "gigabyte" + ], + [ + "gpu", + "graphical processing unit" + ], + [ + "gru", + "gated recurrent unit" + ], + [ + "irm", + "magnetic resonance imaging" + ], + [ + "irmd", + "diffusion-weighted magnetic resonance imaging" + ], + [ + "lstm", + "long short-term memory network" + ], + [ + "md", + "mean diffusivity" + ], + [ + "ram", + "random access memory" + ], + [ + "rnn", + "recurrent neural network" + ], + [ + "3d-shore", + "three dimensional simple harmonic oscillator based reconstruction and estimation model" + ], + [ + "ae", + "angular error metric" + ], + [ + "cdf", + "cohen-daubechies-feauveau" + ], + [ + "cdsi", + "classical diffusion spectrum imaging model" + ], + [ + "cs", + "compressive sensing" + ], + [ + "csa", + "constant solid angle q-ball model" + ], + [ + "csd", + "constrained spherical deconvolution model" + ], + [ + "cv", + "cross validation" + ], + [ + "ddsi", + "diffusion spectrum imaging deconvolution model" + ], + [ + "dipy", + "diffusion in python software" + ], + [ + "dnc", + "difference in the number of fiber compartments metric" + ], + [ + "dsi", + "diffusion spectrum imaging model" + ], + [ + "dsi515", + "classical diffusion spectrum imaging acquisition scheme with 515 samples" + ], + [ + "dsistudio", + "dsi studio software" + ], + [ + "dti", + "diffusion tensor imaging model" + ], + [ + "dtk", + "diffusion toolkit software" + ], + [ + "dtwt", + "dual tree wavelet transform" + ], + [ + "dw", + "diffusion weighted" + ], + [ + "dwi", + "diffusion weighted imaging" + ], + [ + "dwt", + "discrete wavelet transform" + ], + [ + "fodf", + "fiber orientation distribution function" + ], + [ + "ib", + "invalib bundles metric" + ], + [ + "idft", + "inverse discrete fourier transform" + ], + [ + "isbi", + "ieee international symposium on biomedical imaging" + ], + [ + "isbi2013", + "subset of the dataset from the hardi challenge at the conference isbi2013" + ], + [ + "isbi2013-full", + "dataset from the hardi challenge at the conference isbi2013" + ], + [ + "mgh-ucla hcp", + "(massachusetts general hospital - university of california, los angeles) human connectome project" + ], + [ + "nmse", + "normalized mean square error" + ], + [ + "odsi", + "optimal diffusion spectrum imaging model" + ], + [ + "pccoeff", + "pearson correlation coefficient" + ], + [ + "pdsi", + "plain diffusion spectrum imaging model" + ], + [ + "pgse", + "pulse-gradient spin-echo" ], - "acronyms": [ - ["k-nn", "k-nearest neighbors"], - ["1d", "one-dimensional"], - ["2d", "two-dimensional"], - ["3d", "three-dimensional"], - ["ac", "anterior commissure"], - ["ae", "autoencoder"], - ["af", "arcuate fascicle"], - ["ai", "artificial intelligence"], - ["ann", "artificial neural network"], - ["ar", "acoustic radiation"], - ["atr", "anterior thalamic radiation"], - ["cc", "corpus callosum"], - ["cing", "cingulum"], - ["cinta", "clustering in tractography using autoencoders"], - ["cnn", "convolutional neural network"], - ["csd", "constrained spherical deconvolution"], - ["csf", "cerebrospinal fluid"], - ["cst", "corticospinal tract"], - ["dl", "deep learning"], - ["dmri", "diffusion magnetic resonance imaging"], - ["dodf", "diffusion orientation distribution function"], - ["dt", "diffusion tensor"], - ["dti", "diffusion tensor imaging"], - ["dw-mri", "diffusion-weighted magnetic resonance imaging"], - ["dwi", "diffusion-weighted imaging"], - ["dwm", "deep white matter"], - ["eap", "ensemble average (diffusion) propagator"], - ["fa", "fractional anisotropy"], - ["fat", "frontal aslant tract"], - ["fc", "fully connected"], - ["finta", "filtering in tractography using autoencoders"], - ["fmri", "functional magnetic resonance imaging"], - ["fod", "fiber orientation distribution"], - ["fodf", "fiber orientation distribution function"], - ["ft", "fourier transform"], - ["fx", "fornix"], - ["ge", "gradient echo"], - ["gesta", "generative sampling in bundle tractography using autoencoders"], - ["gm", "gray matter"], - ["hardi", "high angular resolution diffusion imaging"], - ["ic", "internal capsule"], - ["icp", "inferior cerebellar peduncle"], - ["ifof", "inferior fronto-occipital fascicle"], - ["ils", "inferior longitudinal system"], - ["jif", "journal impact factor"], - ["mcp", "middle cerebellar peduncle"], - ["ml", "machine learning"], - ["mlp", "multilayer perceptron"], - ["mls", "middle longitudinal system"], - ["mr", "magnetic resonance"], - ["mri", "magnetic resonance imaging"], - ["nn", "neural network"], - ["nos", "number of streamlines"], - ["odf", "orientation distribution function (also referred to as orientation density function)"], - ["or", "optic radiation"], - ["pc", "posterior commissure"], - ["pca", "principal component analysis"], - ["pdf", "probability density function"], - ["pgse", "pulsed-gradient spin echo"], - ["pli", "polarized light imaging"], - ["popt", "parieto-occipital pontine tract"], - ["ps-oct", "polarization-sensitive optical coherence tomography"], - ["rf", "radio frequency"], - ["rnn", "recurrent neural network"], - ["roc", "receiver operating characteristic"], - ["scp", "superior cerebellar peduncle"], - ["sd", "spherical deconvolution"], - ["se", "spin echo"], - ["set", "surface-enhanced tractography"], - ["sls", "superior longitudinal system"], - ["smri", "structural magnetic resonance imaging"], - ["swm", "superficial white matter"], - ["t1-w", "t1-weighted image"], - ["te", "echo time"], - ["tr", "repetition time"], - ["uf", "uncinate fascicle"], - ["vae", "variational autoencoder"], - ["wm", "white matter"], - ["3d", "three dimensions"], - ["4d", "four dimensions"], - ["act", "anatomically-constrained tractography"], - ["amico", "accelerated microstructure imaging via convex optimization"], - ["apm", "average pathlength map"], - ["bet", "brain extraction tool"], - ["cdmri", "computational diffusion mri"], - ["cg", "cingulum"], - ["cmc", "continuous maps criterion"], - ["commit", "convex optimization modeling for microstructure informed tractography"], - ["csa", "constant solid-angle"], - ["csf/lcs/lcr", "cerebrospinal fluid"], - ["c-dec", "connectivity directionally-encoded color"], - ["dec", "directionally-encoded color"], - ["dwi", "diffusion-weighted imaging"], - ["dmri", "diffusion-weighted imaging"], - ["irmd", "diffusion-weighted imaging"], - ["eap", "ensemble average propagator"], - ["epi", "echo-planar imaging"], - ["fast", "fmrib’s automated segmentation tool"], - ["flirt", "fmrib’s linear image registration tool"], - ["fmt", "fast marching tractography"], - ["fsl", "fmrib software library"], - ["grappa", "generalized autocalibrating partially parallel acquisition"], - ["ifof", "inferior fronto-occipital fasciculus"], - ["ipmi", "information processing in medical imaging"], - ["ismrm", "international society for magnetic resonance in medicine"], - ["miccai", "medical image computing and computer assisted intervention"], - ["mprage", "magnetization-prepared rapid acquisition with gradient-echo"], - ["irm", "magnetic resonance imaging"], - ["mri", "magnetic resonance imaging"], - ["odf", "orientation distribution function"], - ["ohbm", "organization for human brain mapping"], - ["pve", "partial volume estimation"], - ["roi", "region of interest"], - ["rtt", "real-time tractography"], - ["sh", "spherical harmonics"], - ["slf", "superior longitudinal fasciculus"], - ["snr", "signal-to-noise ratio"], - ["twi", "track-weighted imaging"], - ["voi", "volume of interest"], - ["ats", "anterior traverse system"], - ["a123", "area 1/2/3 (upper limb, head, and face region)"], - ["a4hf", "area 4 (head and face region)"], - ["a4ul", "area 4 (upper limb region)"], - ["a46", "area 46"], - ["af", "arcuate fasciculus"], - ["bls", "basal longitudinal system"], - ["ca39", "caudal area 39"], - ["cdl", "caudal dorsolateral area"], - ["cvl", "caudal ventrolateral area"], - ["cdl", "caudolateral of area"], - ["csf", "cerebral spinal fluid"], - ["ctoi", "conservative tract of interest"], - ["da9/36", "dorsal area 9/46"], - ["ddi", "dorsal dysgranular insula"], - ["dl6", "dorsolateral area 6"], - ["dl37", "dorsolateral area 37 region"], - ["efc", "extreme/external capsule fibre system"], - ["fodfs", "fibre orientation distribution functions"], - ["fus", "fusiform gyrus"], - ["hcp", "human connectome project"], - ["ifg", "inferior frontal gyrus"], - ["ils", "inferior longitudinal system"], - ["ipl", "inferior parietal lobe"], - ["itg", "inferior temporal gyrus"], - ["ins", "insula"], - ["ipa", "intraparietal area"], - ["la", "lateral area"], - ["locc", "lateral occipital cortex"], - ["cin", "limbic lobe/cingulum"], - ["mme", "mean millimetre error"], - ["mvocc", "medioventral occipital cortex"], - ["mlf", "medial longitudinal fasciculus"], - ["mesls", "mesial longitudinal system"], - ["mfg", "middle frontal gyrus"], - ["midls", "middle longitudinal systems"], - ["mlf", "middle longitudinal fasciculus"], - ["mtg", "middle temporal gyrus"], - ["mni", "montreal neurological institute"], - ["opa", "opercular area"], - ["ofg", "orbital frontal gyrus"], - ["pag", "paracentral gyrus"], - ["pft", "particle-filter tractography"], - ["pocg", "postcentral gyrus"], - ["pts", "posterior traverse system"], - ["pcg", "precentral gyrus"], - ["pcun", "precuneus"], - ["rois", "regions of interest"], - ["rda", "rostrodorsal area"], - ["rva", "rostroventral area"], - ["stoi", "sublobe tract of interest"], - ["sfg", "superior frontal gyrus"], - ["slf", "superior longitudinal fasciculus"], - ["sls", "superior longitudinal system"], - ["spl", "superior parietal lobule"], - ["stl", "superior temporal lobe"], - ["sma", "supplementary motor area"], - ["tois", "tracts of interest"], - ["t", "tesla"], - ["uf", "uncinate fasciculus"], - ["vtois", "variable tract of interest"], - ["abs", "atlas based segmentation"], - ["afd", "apparent fibre density"], - ["ad", "axial diffusivity"], - ["bids", "brain imaging data structure"], - ["lcs", "cerebrospinal fluid"], - ["dodf", "diffusion orientation distribution function"], - ["flair", "fluid-attenuated inversion recovery"], - ["frf", "fibre response function"], - ["rd", "radial diffusivity"], - ["rf", "radio frequency"], - ["scil", "sherbrooke connectivity imaging laboratory"], - ["sp", "multiple sclerosis"], - ["cpu", "central processing unit"], - ["frt", "funk-radon transform"], - ["go", "gigabyte"], - ["gpu", "graphical processing unit"], - ["gru", "gated recurrent unit"], - ["irm", "magnetic resonance imaging"], - ["irmd", "diffusion-weighted magnetic resonance imaging"], - ["lstm", "long short-term memory network"], - ["md", "mean diffusivity"], - ["ram", "random access memory"], - ["rnn", "recurrent neural network"], - ["3d-shore", "three dimensional simple harmonic oscillator based reconstruction and estimation model"], - ["ae", "angular error metric"], - ["cdf", "cohen-daubechies-feauveau"], - ["cdsi", "classical diffusion spectrum imaging model"], - ["cs", "compressive sensing"], - ["csa", "constant solid angle q-ball model"], - ["csd", "constrained spherical deconvolution model"], - ["cv", "cross validation"], - ["ddsi", "diffusion spectrum imaging deconvolution model"], - ["dipy", "diffusion in python software"], - ["dnc", "difference in the number of fiber compartments metric"], - ["dsi", "diffusion spectrum imaging model"], - ["dsi515", "classical diffusion spectrum imaging acquisition scheme with 515 samples"], - ["dsistudio", "dsi studio software"], - ["dti", "diffusion tensor imaging model"], - ["dtk", "diffusion toolkit software"], - ["dtwt", "dual tree wavelet transform"], - ["dw", "diffusion weighted"], - ["dwi", "diffusion weighted imaging"], - ["dwt", "discrete wavelet transform"], - ["fodf", "fiber orientation distribution function"], - ["ib", "invalib bundles metric"], - ["idft", "inverse discrete fourier transform"], - ["isbi", "ieee international symposium on biomedical imaging"], - ["isbi2013", "subset of the dataset from the hardi challenge at the conference isbi2013"], - ["isbi2013-full", "dataset from the hardi challenge at the conference isbi2013"], - ["mgh-ucla hcp", "(massachusetts general hospital - university of california, los angeles) human connectome project"], - ["nmse", "normalized mean square error"], - ["odsi", "optimal diffusion spectrum imaging model"], - ["pccoeff", "pearson correlation coefficient"], - ["pdsi", "plain diffusion spectrum imaging model"], - ["pgse", "pulse-gradient spin-echo"], - ["qbi", "q-ball imaging model"] + [ + "qbi", + "q-ball imaging model" ] - + ] } \ No newline at end of file diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index 56c923cdc..9cd036990 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -2,12 +2,13 @@ import nltk import pathlib import subprocess -from nltk.stem import PorterStemmer +from nltk.stem import WordNetLemmatizer from colorama import Fore, Style import re from tqdm import tqdm -stemmer = PorterStemmer() +stemmer = WordNetLemmatizer() +nltk.download('wordnet', quiet=True) RED = '\033[31m' BOLD = '\033[1m' @@ -124,7 +125,7 @@ def _stem_keywords(keywords): list of str Stemmed keywords. """ - return [stemmer.stem(keyword) for keyword in keywords] + return [stemmer.lemmatize(keyword) for keyword in keywords] def _stem_text(text): @@ -142,7 +143,7 @@ def _stem_text(text): Stemmed text. """ words = nltk.word_tokenize(text) - return ' '.join([stemmer.stem(word) for word in words]) + return ' '.join([stemmer.lemmatize(word) for word in words]) def _stem_phrase(phrase): @@ -160,7 +161,7 @@ def _stem_phrase(phrase): Stemmed phrase. """ words = phrase.split() - return ' '.join([stemmer.stem(word) for word in words]) + return ' '.join([stemmer.lemmatize(word) for word in words]) def _generate_help_files(): @@ -180,55 +181,39 @@ def _generate_help_files(): """ scripts_dir = pathlib.Path(__file__).parent.parent.parent / 'scripts' + help_dir = scripts_dir / '.hidden' scripts = [script for script in scripts_dir.glob('*.py') if script.name not in ['__init__.py', 'scil_search_keywords.py']] - total_scripts = len(scripts) + + helps = [help for help in help_dir.glob('*.help')] + scripts_to_regenerate = [script for script in scripts + if help_dir / f'{script.name}.help' not in helps] + + # Check if all help files are present + if len(scripts_to_regenerate) == 0: + print("All help files are already generated.") + return # Hidden directory to store help files hidden_dir = scripts_dir / '.hidden' hidden_dir.mkdir(exist_ok=True) # Iterate over all scripts and generate help files - with tqdm(total=total_scripts, desc="Generating help files") as pbar: - for script in scripts: - help_file = hidden_dir / f'{script.name}.help' - # Check if help file already exists - if help_file.exists(): - tqdm.write(f'Help file for {script.name} already exists. Skipping.') - pbar.update(1) - continue - - # Run the script with --h and capture the output - result = subprocess.run( - ['python', script, '--h'], capture_output=True, text=True) - - # Save the output to the hidden file - with open(help_file, 'w') as f: - f.write(result.stdout) - - tqdm.write(f'Help file saved to {help_file}') - pbar.update(1) - - # Check if any help files are missing and regenerate them - with tqdm(total=total_scripts, desc="Checking missing help files") as pbar: - for script in scripts_dir.glob('*.py'): - if script.name == '__init__.py' or script.name == 'scil_search_keywords.py': - pbar.update(1) - continue - help_file = hidden_dir / f'{script.name}.help' - if not help_file.exists(): - # Run the script with --h and capture the output - result = subprocess.run( - ['python', script, '--h'], capture_output=True, text=True) - - # Save the output to the hidden file - with open(help_file, 'w') as f: - f.write(result.stdout) - - tqdm.write(f'Regenerated help output for {script.name}') - pbar.update(1) + for script in tqdm(scripts_to_regenerate): + help_file = hidden_dir / f'{script.name}.help' + # Check if help file already exists + if help_file.exists(): + continue + + # Run the script with --h and capture the output + result = subprocess.run(['python', script, '--h'], + capture_output=True, text=True) + + # Save the output to the hidden file + with open(help_file, 'w') as f: + f.write(result.stdout) def _highlight_keywords(text, stemmed_keywords): @@ -250,7 +235,7 @@ def _highlight_keywords(text, stemmed_keywords): words = text.split() highlighted_text = [] for word in words: - stemmed_word = stemmer.stem(word) + stemmed_word = stemmer.lemmatize(word) if stemmed_word in stemmed_keywords: highlighted_text.append( f'{Fore.RED}{Style.BRIGHT}{word}{Style.RESET_ALL}') @@ -276,11 +261,13 @@ def _get_synonyms(keyword, synonyms_data): List of synonyms for the given keyword. """ keyword = keyword.lower() + complete_synonyms = [] for synonym_set in synonyms_data: synonym_set = [synonym.lower() for synonym in synonym_set] if keyword in synonym_set: - return synonym_set - return [] + complete_synonyms.extend(synonym_set) + + return set(complete_synonyms) def _extract_keywords_and_phrases(keywords): @@ -303,13 +290,14 @@ def _extract_keywords_and_phrases(keywords): for keyword in keywords: # if keyword contain blank space (contains more that 1 word) if ' ' in keyword: - phrases_list.append(keyword) + phrases_list.append(keyword.lower()) else: - keywords_list.append(keyword) + keywords_list.append(keyword.lower()) + return keywords_list, phrases_list -def _calculate_score(keywords, phrases, text, filename): +def _calculate_score(keywords, phrases, text, filename, suffix=''): """ Calculate a score for how well the text and filename match the keywords. @@ -332,12 +320,12 @@ def _calculate_score(keywords, phrases, text, filename): """ stemmed_text = _stem_text(text.lower()) stemmed_filename = _stem_text(filename.lower()) - score_details = {'total_score': 0} + score_details = {} def is_match(found_word, keyword): if len(keyword) <= 3: return found_word == keyword - return stemmer.stem(found_word) == stemmer.stem(keyword) + return stemmer.lemmatize(found_word) == stemmer.lemmatize(keyword) for keyword in keywords: keyword = keyword.lower() @@ -351,12 +339,13 @@ def is_match(found_word, keyword): if is_match(found_word, keyword): keyword_score += 1 - score_details[keyword] = keyword_score - score_details['total_score'] += keyword_score + if keyword_score > 0: + score_details[keyword + suffix] = keyword_score for phrase in phrases: phrase_stemmed = _stem_text(phrase.lower()) phrase_score = stemmed_text.count(phrase_stemmed) - score_details[phrase] = phrase_score - score_details['total_score'] += phrase_score + if phrase_score > 0: + score_details[phrase + suffix] = phrase_score + return score_details diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 713805935..6bb599863 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -32,20 +32,19 @@ - scil_search_keywords.py --search_category tractogram - scil_search_keywords.py -v sh - scil_search_keywords.py -v DEBUG sh - """ import argparse import logging import pathlib +import shutil try: import nltk nltk.download('punkt', quiet=True) except ImportError: - print("You must install the 'nltk' package to use this script." - "Please run 'pip install nltk'.") - exit(1) + raise ImportError("You must install the 'nltk' package to use this script." + "Please run 'pip install nltk'.") from colorama import Fore, Style import json @@ -72,10 +71,12 @@ def _build_arg_parser(): p.add_argument('--search_category', action='store_true', help='Search within a specific category of scripts.') - p.add_argument('--no_synonyms', action='store_true', help='Search without using synonyms.') + p.add_argument('--regenerate_help_files', action='store_true', + help='Regenerate help files for all scripts.') + add_verbose_arg(p) return p @@ -92,6 +93,8 @@ def main(): selected_object = None if args.search_category: selected_object = prompt_user_for_object() + else: + selected_object = '' # keywords are single words. Phrases are composed keywords keywords, phrases = _extract_keywords_and_phrases(args.keywords) @@ -108,22 +111,20 @@ def main(): script_dir = pathlib.Path(__file__).parent hidden_dir = script_dir / '.hidden' + if args.regenerate_help_files: + shutil.rmtree(hidden_dir) + if not hidden_dir.exists(): hidden_dir.mkdir() - logging.info('This is your first time running this script.\n' - 'Generating help files may take a few minutes,' - 'please be patient.\n' - 'Subsequent searches will be much faster.') - _generate_help_files() + logging.info('This is your first time running this script. ' + 'Generating help files may take a few minutes\n ' + 'Please be patient, subsequent searches will be faster.') - matches = [] - scores = {} - docstrings = {} # To store the docstrings of each script + _generate_help_files() - # pattern to search for - search_pattern = f'scil_{"{}_" if selected_object else ""}*.py' + scores = {} - def update_matches_and_scores(filename, score_details, docstring=None): + def update_matches_and_scores(filename, score_details): """ Update the matches and scores for the given filename based on the score details. @@ -135,51 +136,34 @@ def update_matches_and_scores(filename, score_details, docstring=None): score_details : dict A dictionary containing the scores for the keywords and phrases found in the script. - This dictionary should have a 'total_score' key - indicating the cumulative score. - docstring : str, optional - The docstring of the script. Returns ------- None Just updates the global `matches` and `scores` lists/dictionaries. """ - if score_details['total_score'] > 0: - if filename not in matches: - matches.append(filename) - scores[filename] = score_details - if docstring: - docstrings[filename] = docstring + for key, value in score_details.items(): + if value == 0: + continue + + if filename not in scores: + scores[filename] = {key: value} + elif key not in scores[filename]: + scores[filename].update({key: value}) else: - for key, value in score_details.items(): - if key != 'total_score': - scores[filename][key] = scores[filename].get( - key, 0) + value - scores[filename]['total_score'] += score_details['total_score'] - if docstring: - docstrings[filename] = docstring - - for script in sorted(script_dir.glob(search_pattern.format(selected_object))): + scores[filename][key] += value + + return + + for script in sorted(hidden_dir.glob(f'scil_{selected_object}*.help')): filename = script.stem - if filename == '__init__' or filename == 'scil_search_keywords': - continue # Search through the docstring - search_text = _get_docstring_from_script_path(str(script)) + with open(script, 'r') as f: + search_text = f.read() + score_details = _calculate_score( stemmed_keywords, stemmed_phrases, search_text, filename=filename) - update_matches_and_scores(filename, score_details, - docstring=search_text) - - # Search in help files - help_file = hidden_dir / f"{filename}.py.help" - if help_file.exists(): - with open(help_file, 'r') as f: - search_text = f.read() - score_details = _calculate_score( - stemmed_keywords, stemmed_phrases, - search_text, filename=filename) - update_matches_and_scores(filename, score_details) + update_matches_and_scores(filename, score_details) # Search in keywords file with open(VOCAB_FILE_PATH, 'r') as f: @@ -187,8 +171,6 @@ def update_matches_and_scores(filename, score_details, docstring=None): for script in vocab_data['scripts']: script_name = script['name'] - if selected_object and not script_name.startswith(f'scil_{selected_object}_'): - continue script_keywords = script['keywords'] search_text = ' '.join(script_keywords) score_details = _calculate_score( @@ -197,58 +179,80 @@ def update_matches_and_scores(filename, score_details, docstring=None): # Search in synonyms file if not args.no_synonyms is not specified if not args.no_synonyms: - for keyword in keywords + phrases: + full_list_to_verify = set(stemmed_keywords + stemmed_phrases) + for keyword in full_list_to_verify: synonyms = _get_synonyms(keyword, vocab_data['synonyms']) - for script in sorted(script_dir.glob(search_pattern.format(selected_object))): + + for script in sorted(hidden_dir.glob(f'scil_{selected_object}*.help')): + score_details = {} filename = script.stem - if filename == '__init__' or filename == 'scil_search_keywords': - continue - search_text = _get_docstring_from_script_path(str(script)) - # Initialize or get existing score_details for the script - score_details = scores.get(filename, {'total_score': 0}) - for synonym in synonyms: - if synonym in search_text and synonym != keyword: - # Update score_details with count of each synonym found - score_details[keyword + ' synonyms'] = score_details.get( - keyword + ' synonyms', 0) + search_text.count(synonym) - score_details['total_score'] += search_text.count( - synonym) + with open(script, 'r') as f: + search_text = f.read() - # Directly update scores dictionary - scores[filename] = score_details + for synonym in synonyms: + if filename in scores and synonym in scores[filename]: + continue + + if ' ' in synonym: + stemmed_phrases = [synonym] + stemmed_keywords = [] + else: + stemmed_keywords = [synonym] + stemmed_phrases = [] + + score_details = _calculate_score(stemmed_keywords, + stemmed_phrases, + search_text, + script_name, + suffix=' (synonyms)') + + # Directly update scores dictionary + if filename in scores: + scores[filename].update(score_details) + else: + scores[filename] = score_details + + matches = list(scores.keys()) if not matches: logging.info(_make_title(' No results found! ')) + return + + total_scores = {match: sum(scores[match].values()) for match in matches} + sorted_matches = sorted(total_scores, key=total_scores.get) # Sort matches by score and display them - else: - sorted_matches = sorted( - matches, key=lambda x: scores[x]['total_score'], reverse=False) - - logging.info(_make_title(' Results Ordered by Score ')) - for match in sorted_matches: - if scores[match]['total_score'] > 0: - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match}{Style.RESET_ALL}") - - for word, score in scores[match].items(): - if word != 'total_score': - logging.info(f"{Fore.GREEN}Occurrence of '{keyword_mapping.get(word, phrase_mapping.get(word, word))}': {score}{Style.RESET_ALL}") - - # Highlight keywords based on verbosity level - if match in docstrings: - highlighted_docstring = _highlight_keywords(docstrings[match], - stemmed_keywords) - if args.verbose == 'INFO': - first_sentence = _split_first_sentence(highlighted_docstring)[0] - logging.info(f"{first_sentence.strip()}") - elif args.verbose == 'DEBUG': - logging.debug(f"{highlighted_docstring.strip()}") - logging.info(f"{Fore.RED}Total Score: {scores[match]['total_score']}{Style.RESET_ALL}") - logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") - logging.info("\n") - logging.info(_make_title( - ' Results Ordered by Score (Best results at the bottom) ')) + logging.info(_make_title(' Results Ordered by Score ')) + for match in sorted_matches: + if total_scores[match] == 0: + continue + logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match}{Style.RESET_ALL}") + + for word, score in scores[match].items(): + original_word = keyword_mapping.get( + word, phrase_mapping.get(word, word)) + logging.info( + f"{Fore.GREEN}Occurrence of '{original_word}': {score}{Style.RESET_ALL}") + + # Highlight keywords based on verbosity level + with open(hidden_dir / f'{match}.help', 'r') as f: + docstrings = f.read() + highlighted_docstring = _highlight_keywords(docstrings, + stemmed_keywords) + if args.verbose == 'INFO': + first_sentence = _split_first_sentence( + highlighted_docstring)[0] + logging.info(f"{first_sentence.strip()}") + elif args.verbose == 'DEBUG': + logging.debug(f"{highlighted_docstring.strip()}") + logging.info( + f"{Fore.RED}Total Score: {total_scores[match]}{Style.RESET_ALL}") + logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") + logging.info("\n") + + logging.info(_make_title( + ' Results Ordered by Score (Best results at the bottom) ')) if __name__ == '__main__': From 238dd9af92739f8a7d237b34d687d792ac12cdea Mon Sep 17 00:00:00 2001 From: frheault Date: Wed, 2 Oct 2024 14:10:04 -0400 Subject: [PATCH 67/69] New and improved version --- data/{vocabulary => }/vocabulary.json | 32 +++-- scilpy/utils/scilpy_bot.py | 144 +++++++++++++++------- scilpy/utils/tests/test_scilpy_bot.py | 32 ++++- scripts/scil_search_keywords.py | 167 +++++++++++--------------- 4 files changed, 217 insertions(+), 158 deletions(-) rename data/{vocabulary => }/vocabulary.json (98%) diff --git a/data/vocabulary/vocabulary.json b/data/vocabulary.json similarity index 98% rename from data/vocabulary/vocabulary.json rename to data/vocabulary.json index 15d506182..738de6765 100644 --- a/data/vocabulary/vocabulary.json +++ b/data/vocabulary.json @@ -736,8 +736,13 @@ ] }, { - "name": "scil_tractogram_alter.py", - "keywords": [] + "name": "scil_bundle_alter_to_target_dice.py", + "keywords": [ + "cut", + "dice", + "transform", + "trim" + ] }, { "name": "scil_tractogram_apply_transform.py", @@ -894,19 +899,11 @@ "keywords": [] }, { - "name": "scil_tractogram_segment_and_score.py", + "name": "scil_tractogram_segment_with_ROI_and_score.py", "keywords": [] }, { - "name": "scil_tractogram_segment_bundles_for_connectivity.py", - "keywords": [] - }, - { - "name": "scil_tractogram_segment_bundles.py", - "keywords": [] - }, - { - "name": "scil_tractogram_segment_one_bundle.py", + "name": "scil_tractogram_segment_connections_from_labels.py", "keywords": [] }, { @@ -1059,6 +1056,11 @@ } ], "synonyms": [ + [ + "tractogram", + "streamline", + "whole brain" + ], [ "bundle", "tract", @@ -2031,10 +2033,6 @@ "isbi2013-full", "dataset from the hardi challenge at the conference isbi2013" ], - [ - "mgh-ucla hcp", - "(massachusetts general hospital - university of california, los angeles) human connectome project" - ], [ "nmse", "normalized mean square error" @@ -2060,4 +2058,4 @@ "q-ball imaging model" ] ] -} \ No newline at end of file +} diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index 9cd036990..f567a2698 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -2,23 +2,20 @@ import nltk import pathlib import subprocess -from nltk.stem import WordNetLemmatizer +from nltk.stem import WordNetLemmatizer, PorterStemmer from colorama import Fore, Style import re from tqdm import tqdm -stemmer = WordNetLemmatizer() -nltk.download('wordnet', quiet=True) - -RED = '\033[31m' -BOLD = '\033[1m' -END_COLOR = '\033[0m' -SPACING_CHAR = '=' SPACING_LEN = 80 +stemmer_a = WordNetLemmatizer() +stemmer_b = PorterStemmer() +nltk.download('wordnet', quiet=True) + # Path to the JSON file containing script information and keywords VOCAB_FILE_PATH = pathlib.Path( - __file__).parent.parent.parent/'data' / 'vocabulary' / 'vocabulary.json' + __file__).parent.parent.parent/'data' / 'vocabulary.json' OBJECTS = [ @@ -55,7 +52,8 @@ def _make_title(text): """ Returns a formatted title string with centered text and spacing """ - return f'{Fore.BLUE}{Style.BRIGHT}{text.center(80, "=")}{Style.RESET_ALL}' + return f'{Fore.LIGHTBLUE_EX}{Style.BRIGHT}{text.center(SPACING_LEN, "=")}' \ + f'{Style.RESET_ALL}' def _get_docstring_from_script_path(script): @@ -111,6 +109,27 @@ def _split_first_sentence(text): return sentence, remaining +def _stem_word(word): + """ + Stem a word using two different stemmers and return the most appropriate + stem. + + Parameters + ---------- + word : str + Word to stem. + + Returns + ------- + str + Stemmed word. + """ + if len(word) <= 3: + return word + version_b = stemmer_b.stem(word) + return version_b + + def _stem_keywords(keywords): """ Stem a list of keywords using PorterStemmer. @@ -125,7 +144,7 @@ def _stem_keywords(keywords): list of str Stemmed keywords. """ - return [stemmer.lemmatize(keyword) for keyword in keywords] + return [_stem_word(keyword) for keyword in keywords] def _stem_text(text): @@ -143,7 +162,7 @@ def _stem_text(text): Stemmed text. """ words = nltk.word_tokenize(text) - return ' '.join([stemmer.lemmatize(word) for word in words]) + return ' '.join([_stem_word(word) for word in words]) def _stem_phrase(phrase): @@ -161,7 +180,7 @@ def _stem_phrase(phrase): Stemmed phrase. """ words = phrase.split() - return ' '.join([stemmer.lemmatize(word) for word in words]) + return ' '.join([_stem_word(word) for word in words]) def _generate_help_files(): @@ -216,7 +235,7 @@ def _generate_help_files(): f.write(result.stdout) -def _highlight_keywords(text, stemmed_keywords): +def _highlight_keywords(text, all_expressions): """ Highlight the stemmed keywords in the given text using colorama. @@ -224,24 +243,29 @@ def _highlight_keywords(text, stemmed_keywords): ---------- text : str Text to highlight keywords in. - stemmed_keywords : list of str - Stemmed keywords to highlight. + all_expressions : list of str + List of all things to highlight. Returns ------- str Text with highlighted keywords. """ - words = text.split() - highlighted_text = [] - for word in words: - stemmed_word = stemmer.lemmatize(word) - if stemmed_word in stemmed_keywords: - highlighted_text.append( - f'{Fore.RED}{Style.BRIGHT}{word}{Style.RESET_ALL}') - else: - highlighted_text.append(word) - return ' '.join(highlighted_text) + # Iterate over each keyword in the list + for kw in all_expressions: + # Create a regex pattern to match any word containing the keyword + pattern = re.compile( + r'\b(\w?' + re.escape(kw) + r's?\w?)\b', re.IGNORECASE) + + # Function to apply highlighting to the matched word + def apply_highlight(match): + return f'{Fore.LIGHTYELLOW_EX}{Style.BRIGHT}{match.group(0)}' \ + f'{Style.RESET_ALL}' + + # Replace the matched word with its highlighted version + text = pattern.sub(apply_highlight, text) + + return text def _get_synonyms(keyword, synonyms_data): @@ -264,19 +288,21 @@ def _get_synonyms(keyword, synonyms_data): complete_synonyms = [] for synonym_set in synonyms_data: synonym_set = [synonym.lower() for synonym in synonym_set] - if keyword in synonym_set: + stemmed_synonyms_set = [_stem_word(synonym) for synonym in synonym_set] + + if keyword in synonym_set or _stem_word(keyword) in stemmed_synonyms_set: complete_synonyms.extend(synonym_set) - return set(complete_synonyms) + return list(set(complete_synonyms)) -def _extract_keywords_and_phrases(keywords): +def _extract_keywords_and_phrases(expressions): """ Extract keywords and phrases from the provided list. Parameters ---------- - keywords : list of str + expressions : list of str List of keywords and phrases. Returns @@ -284,17 +310,17 @@ def _extract_keywords_and_phrases(keywords): list of str, list of str List of individual keywords and list of phrases. """ - keywords_list = [] - phrases_list = [] + keywords_set = set() + phrases_set = set() - for keyword in keywords: + for expression in expressions: # if keyword contain blank space (contains more that 1 word) - if ' ' in keyword: - phrases_list.append(keyword.lower()) + if ' ' in expression: + phrases_set.add(expression.lower()) else: - keywords_list.append(keyword.lower()) + keywords_set.add(expression.lower()) - return keywords_list, phrases_list + return list(keywords_set), list(phrases_set) def _calculate_score(keywords, phrases, text, filename, suffix=''): @@ -325,19 +351,22 @@ def _calculate_score(keywords, phrases, text, filename, suffix=''): def is_match(found_word, keyword): if len(keyword) <= 3: return found_word == keyword - return stemmer.lemmatize(found_word) == stemmer.lemmatize(keyword) + return _stem_word(found_word) == _stem_word(keyword) for keyword in keywords: keyword = keyword.lower() # Use regular expressions to match whole words only - keyword_pattern = re.compile(r'\b' + re.escape(keyword) + r'\b') - found_words = keyword_pattern.findall( - stemmed_text) + keyword_pattern.findall(stemmed_filename) + + keyword_pattern = re.compile( + r'\b(\w?' + re.escape(keyword) + r's?\w?)\b', re.IGNORECASE) + found_words = keyword_pattern.findall(stemmed_text) \ + + keyword_pattern.findall(stemmed_filename) keyword_score = 0 for found_word in found_words: if is_match(found_word, keyword): keyword_score += 1 + continue if keyword_score > 0: score_details[keyword + suffix] = keyword_score @@ -349,3 +378,36 @@ def is_match(found_word, keyword): score_details[phrase + suffix] = phrase_score return score_details + + +def update_matches_and_scores(scores, filename, score_details): + """ + Update the matches and scores for the given filename based + on the score details. + + Parameters + ---------- + scores : dict + A dictionary containing the scores for the keywords (to be updated). + filename : str + The name of the script file being analyzed. + score_details : dict + A dictionary containing the scores for the keywords + and phrases found in the script. + Returns + ------- + None + Just updates the global `matches` and `scores` lists/dictionaries. + """ + for key, value in score_details.items(): + if value == 0: + continue + + if filename not in scores: + scores[filename] = {key: value} + elif key not in scores[filename]: + scores[filename].update({key: value}) + else: + scores[filename][key] += value + + return scores diff --git a/scilpy/utils/tests/test_scilpy_bot.py b/scilpy/utils/tests/test_scilpy_bot.py index 5574a1ce4..9d3ee5303 100644 --- a/scilpy/utils/tests/test_scilpy_bot.py +++ b/scilpy/utils/tests/test_scilpy_bot.py @@ -1,8 +1,8 @@ from scilpy.utils.scilpy_bot import ( _make_title, _get_docstring_from_script_path, - _split_first_sentence, _stem_keywords, _stem_text, _stem_phrase, - _highlight_keywords, _get_synonyms, + _split_first_sentence, _stem_word, _stem_keywords, _stem_text, + _stem_phrase, _highlight_keywords, _get_synonyms, _extract_keywords_and_phrases, _calculate_score ) @@ -55,14 +55,35 @@ def test_highlight_keywords(): def test_get_synonyms(): synonyms_data = [["run", "sprint"], ["jump", "leap"]] result = _get_synonyms("run", synonyms_data) - assert result == ["run", "sprint"] + # There is no synonyms for "run" in the provided data + assert len(result) == 2 def test_extract_keywords_and_phrases(): keywords = ["running", "jumps", "quick run"] result_keywords, result_phrases = _extract_keywords_and_phrases(keywords) - assert result_keywords == ["running", "jumps"] - assert result_phrases == ["quick run"] + + # Verify all keywords and phrases are extracted + assert len(result_keywords) == 2 + for expected in ["running", "jumps"]: + assert expected in result_keywords + + assert len(result_phrases) == 1 + assert "quick run" in result_phrases + + +def test_stem_word_specific(): + result = _stem_word("streamlines") + assert result == "streamlin" + + result = _stem_word("tractograms") + assert result == "tractogram" + + result = _stem_word("tractography") + assert result == "tractographi" + + result = _stem_word("tractometry") + assert result == "tractometri" def test_calculate_score(): @@ -71,6 +92,5 @@ def test_calculate_score(): text = "Running quickly is fun. A quick run is good." filename = "run_script.py" result = _calculate_score(keywords, phrases, text, filename) - assert result["total_score"] == 3 assert result["run"] == 2 assert result["quick run"] == 1 diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 6bb599863..576e54eb7 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -34,6 +34,12 @@ - scil_search_keywords.py -v DEBUG sh """ +# TODO harmonize variable names +# TODO add more tests (generic even if code evolves) +# TODO add more comments about the stemming and synonyms +# TODO Order imports and functions alphabetically +# TODO remove useless code if any + import argparse import logging import pathlib @@ -50,7 +56,7 @@ import json from scilpy.utils.scilpy_bot import ( - _get_docstring_from_script_path, _stem_keywords, + _stem_keywords, update_matches_and_scores, _stem_phrase, _generate_help_files, _get_synonyms, _extract_keywords_and_phrases, _calculate_score, _make_title, prompt_user_for_object, @@ -66,8 +72,9 @@ def _build_arg_parser(): p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter) - p.add_argument('keywords', nargs='+', - help='Search the provided list of keywords.') + p.add_argument('expressions', nargs='+', + help='Search the provided list of expressions.\n' + 'Use quotes to search for phrases.') p.add_argument('--search_category', action='store_true', help='Search within a specific category of scripts.') @@ -96,10 +103,26 @@ def main(): else: selected_object = '' + args.expressions = [expression.lower() for expression in args.expressions] + # keywords are single words. Phrases are composed keywords - keywords, phrases = _extract_keywords_and_phrases(args.keywords) + keywords, phrases = _extract_keywords_and_phrases(args.expressions) + + with open(VOCAB_FILE_PATH, 'r') as f: + vocab_data = json.load(f) + + # If synonyms are enabled, extend the search to include synonyms + if not args.no_synonyms: + all_expressions = keywords + phrases + extended_expressions = set() + for expression in all_expressions: + synonyms = _get_synonyms(expression, vocab_data['synonyms']) + extended_expressions.update(synonyms) + extended_expressions.update(args.expressions) + keywords, phrases = _extract_keywords_and_phrases(extended_expressions) + stemmed_keywords = _stem_keywords(keywords) - stemmed_phrases = [_stem_phrase(phrase) for phrase in phrases] + stemmed_phrases = list(set([_stem_phrase(phrase) for phrase in phrases])) # Create a mapping of stemmed to original keywords # This will be needed to display the occurence of the keywords @@ -122,104 +145,46 @@ def main(): _generate_help_files() - scores = {} - - def update_matches_and_scores(filename, score_details): - """ - Update the matches and scores for the given filename based - on the score details. - - Parameters - ---------- - filename : str - The name of the script file being analyzed. - score_details : dict - A dictionary containing the scores for the keywords - and phrases found in the script. - Returns - ------- - None - Just updates the global `matches` and `scores` lists/dictionaries. - """ - for key, value in score_details.items(): - if value == 0: - continue - - if filename not in scores: - scores[filename] = {key: value} - elif key not in scores[filename]: - scores[filename].update({key: value}) - else: - scores[filename][key] += value - - return + scores_per_script = {} + # Search through the docstrings of all scripts for script in sorted(hidden_dir.glob(f'scil_{selected_object}*.help')): - filename = script.stem + script_name = script.stem - # Search through the docstring with open(script, 'r') as f: search_text = f.read() score_details = _calculate_score( - stemmed_keywords, stemmed_phrases, search_text, filename=filename) - update_matches_and_scores(filename, score_details) + stemmed_keywords, stemmed_phrases, search_text, + filename=script_name) - # Search in keywords file - with open(VOCAB_FILE_PATH, 'r') as f: - vocab_data = json.load(f) + scores_per_script = update_matches_and_scores(scores_per_script, + script_name, score_details) + # Search in additional keywords in the vocabulary file for script in vocab_data['scripts']: + if selected_object and selected_object not in script: + continue + script_name = script['name'] script_keywords = script['keywords'] search_text = ' '.join(script_keywords) score_details = _calculate_score( stemmed_keywords, stemmed_phrases, search_text, script_name) - update_matches_and_scores(script_name, score_details) + scores_per_script = update_matches_and_scores(scores_per_script, + script_name, score_details) - # Search in synonyms file if not args.no_synonyms is not specified - if not args.no_synonyms: - full_list_to_verify = set(stemmed_keywords + stemmed_phrases) - for keyword in full_list_to_verify: - synonyms = _get_synonyms(keyword, vocab_data['synonyms']) - - for script in sorted(hidden_dir.glob(f'scil_{selected_object}*.help')): - score_details = {} - filename = script.stem - - with open(script, 'r') as f: - search_text = f.read() - - for synonym in synonyms: - if filename in scores and synonym in scores[filename]: - continue - - if ' ' in synonym: - stemmed_phrases = [synonym] - stemmed_keywords = [] - else: - stemmed_keywords = [synonym] - stemmed_phrases = [] - - score_details = _calculate_score(stemmed_keywords, - stemmed_phrases, - search_text, - script_name, - suffix=' (synonyms)') - - # Directly update scores dictionary - if filename in scores: - scores[filename].update(score_details) - else: - scores[filename] = score_details - - matches = list(scores.keys()) - - if not matches: + # Remove scripts with no matches + scores_per_script = {script: score for script, + score in scores_per_script.items() if score} + matched_scripts = list(scores_per_script.keys()) + + if not matched_scripts: logging.info(_make_title(' No results found! ')) return - total_scores = {match: sum(scores[match].values()) for match in matches} + total_scores = {match: sum( + scores_per_script[match].values()) for match in matched_scripts} sorted_matches = sorted(total_scores, key=total_scores.get) # Sort matches by score and display them @@ -227,28 +192,42 @@ def update_matches_and_scores(filename, score_details): for match in sorted_matches: if total_scores[match] == 0: continue - logging.info(f"{Fore.BLUE}{Style.BRIGHT}{match}{Style.RESET_ALL}") - - for word, score in scores[match].items(): - original_word = keyword_mapping.get( - word, phrase_mapping.get(word, word)) - logging.info( - f"{Fore.GREEN}Occurrence of '{original_word}': {score}{Style.RESET_ALL}") # Highlight keywords based on verbosity level with open(hidden_dir / f'{match}.help', 'r') as f: docstrings = f.read() + + all_experessions = stemmed_keywords + keywords + phrases \ + + stemmed_phrases + if not args.no_synonyms: + all_experessions += synonyms + + all_experessions = set(all_experessions) + highlighted_docstring = _highlight_keywords(docstrings, - stemmed_keywords) + all_experessions) if args.verbose == 'INFO': first_sentence = _split_first_sentence( highlighted_docstring)[0] logging.info(f"{first_sentence.strip()}") elif args.verbose == 'DEBUG': logging.debug(f"{highlighted_docstring.strip()}") + + # Print the basic information at the end + logging.info( + f"{Fore.LIGHTYELLOW_EX}Total Score: {total_scores[match]}" + f"{Style.RESET_ALL}") + logging.info( - f"{Fore.RED}Total Score: {total_scores[match]}{Style.RESET_ALL}") - logging.info(f"{Fore.BLUE}{'=' * SPACING_LEN}") + f"{Fore.LIGHTBLUE_EX}{Style.BRIGHT}{match}{Style.RESET_ALL}") + + for word, score in scores_per_script[match].items(): + original_word = keyword_mapping.get( + word, phrase_mapping.get(word, word)) + logging.info( + f"{Fore.LIGHTGREEN_EX}Occurrence of '{original_word}': ' \ + f'{score}{Style.RESET_ALL}") + logging.info(f"{Fore.LIGHTBLUE_EX}{'=' * SPACING_LEN}") logging.info("\n") logging.info(_make_title( From a9f43cf85fd3dac937f0e332ddf6c3bf2ece8fc6 Mon Sep 17 00:00:00 2001 From: frheault Date: Tue, 8 Oct 2024 08:30:34 -0400 Subject: [PATCH 68/69] Update fury requirements --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6d6701aad..23cb23241 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,7 @@ docopt==0.6.* dvc==3.48.* dvc-http==2.32.* formulaic==0.3.* -fury==0.10.* +fury==0.11.* future==0.18.* GitPython==3.1.* h5py==3.10.* From bf138ae173aad6d4a1255c5e3274899516a294ce Mon Sep 17 00:00:00 2001 From: frheault Date: Fri, 11 Oct 2024 11:14:01 -0400 Subject: [PATCH 69/69] Fix import --- scilpy/utils/scilpy_bot.py | 22 ++++++++++++++-------- scripts/scil_search_keywords.py | 8 +++----- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/scilpy/utils/scilpy_bot.py b/scilpy/utils/scilpy_bot.py index f567a2698..0bd3563e0 100644 --- a/scilpy/utils/scilpy_bot.py +++ b/scilpy/utils/scilpy_bot.py @@ -1,17 +1,23 @@ +# -*- coding: utf-8 -*- import ast -import nltk -import pathlib -import subprocess -from nltk.stem import WordNetLemmatizer, PorterStemmer from colorama import Fore, Style +import pathlib import re +import subprocess + +import nltk +from nltk.stem import PorterStemmer from tqdm import tqdm SPACING_LEN = 80 -stemmer_a = WordNetLemmatizer() -stemmer_b = PorterStemmer() -nltk.download('wordnet', quiet=True) +stemmer = PorterStemmer() +try: + nltk.download('punkt', quiet=True) + nltk.download('wordnet', quiet=True) +except ImportError: + raise ImportError("You must install the 'nltk' package to use this script." + "Please run 'pip install nltk'.") # Path to the JSON file containing script information and keywords VOCAB_FILE_PATH = pathlib.Path( @@ -126,7 +132,7 @@ def _stem_word(word): """ if len(word) <= 3: return word - version_b = stemmer_b.stem(word) + version_b = stemmer.stem(word) return version_b diff --git a/scripts/scil_search_keywords.py b/scripts/scil_search_keywords.py index 576e54eb7..71ad5f543 100755 --- a/scripts/scil_search_keywords.py +++ b/scripts/scil_search_keywords.py @@ -41,6 +41,8 @@ # TODO remove useless code if any import argparse +from colorama import Fore, Style +import json import logging import pathlib import shutil @@ -48,13 +50,11 @@ try: import nltk nltk.download('punkt', quiet=True) + nltk.download('wordnet', quiet=True) except ImportError: raise ImportError("You must install the 'nltk' package to use this script." "Please run 'pip install nltk'.") -from colorama import Fore, Style -import json - from scilpy.utils.scilpy_bot import ( _stem_keywords, update_matches_and_scores, _stem_phrase, _generate_help_files, @@ -65,8 +65,6 @@ from scilpy.utils.scilpy_bot import SPACING_LEN, VOCAB_FILE_PATH from scilpy.io.utils import add_verbose_arg -nltk.download('punkt', quiet=True) - def _build_arg_parser(): p = argparse.ArgumentParser(description=__doc__,