From d66c567aa8ae67b57227353ccbab841caced4e04 Mon Sep 17 00:00:00 2001 From: lorenzocerrone Date: Thu, 13 Apr 2023 15:51:14 +0200 Subject: [PATCH 01/15] add import name callback --- plantseg/viewer/widget/io.py | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/plantseg/viewer/widget/io.py b/plantseg/viewer/widget/io.py index e2938402..242a7967 100644 --- a/plantseg/viewer/widget/io.py +++ b/plantseg/viewer/widget/io.py @@ -88,8 +88,8 @@ def unpack_load(loaded_dict, key): call_button='Open file', path={'label': 'Pick a file (tiff or h5, png, jpg)', 'tooltip': 'Select a file to be imported, the file can be a tiff or h5.'}, - name={'label': 'Layer Name', - 'tooltip': 'Define the name of the output layer, default is either image or label.'}, + new_layer_name={'label': 'Layer Name', + 'tooltip': 'Define the name of the output layer, default is either image or label.'}, layer_type={ 'label': 'Layer type', 'tooltip': 'Select if the image is a normal image or a segmentation', @@ -105,14 +105,14 @@ def unpack_load(loaded_dict, key): 'tooltip': 'Channel to select and channels layout'}) def open_file(path: Path = Path.home(), layer_type: str = 'image', - name: str = '', + new_layer_name: str = '', advanced_load: bool = False, key: str = 'raw', channel: Tuple[int, str] = (0, 'xcxx'), ) -> LayerDataTuple: """Open a file and return a napari layer.""" - name = layer_type if name == '' else name - loaded_dict_name = f'{name}_loaded_dict' + new_layer_name = layer_type if new_layer_name == '' else new_layer_name + loaded_dict_name = f'{new_layer_name}_loaded_dict' # wrap load routine and add it to the dag step_params = {'key': key, @@ -121,7 +121,7 @@ def open_file(path: Path = Path.home(), 'layer_type': layer_type} dag_manager.add_step(napari_image_load, - input_keys=(f'{name}_path',), + input_keys=(f'{new_layer_name}_path',), output_key=loaded_dict_name, step_name='Load stack', static_params=step_params) @@ -133,9 +133,9 @@ def open_file(path: Path = Path.home(), voxel_size_unit = load_dict['voxel_size_unit'] # add the key unwrapping to the dag - for key, out_name in [('data', name), - ('voxel_size', f'{name}_voxel_size'), - ('voxel_size_unit', f'{name}_voxel_size_unit')]: + for key, out_name in [('data', new_layer_name), + ('voxel_size', f'{new_layer_name}_voxel_size'), + ('voxel_size_unit', f'{new_layer_name}_voxel_size_unit')]: step_params = {'key': key} dag_manager.add_step(unpack_load, input_keys=(loaded_dict_name,), @@ -146,16 +146,22 @@ def open_file(path: Path = Path.home(), # return layer - napari_formatted_logging(f'{name} Correctly imported, voxel_size: {voxel_size} {voxel_size_unit}', + napari_formatted_logging(f'{new_layer_name} Correctly imported, voxel_size: {voxel_size} {voxel_size_unit}', thread='Open file') - layer_kwargs = layer_properties(name=name, + layer_kwargs = layer_properties(name=new_layer_name, scale=voxel_size, metadata={'original_voxel_size': voxel_size, 'voxel_size_unit': voxel_size_unit, - 'root_name': name}) + 'root_name': new_layer_name}) return data, layer_kwargs, layer_type +@open_file.path.changed.connect +def _on_path_changed(path: Path): + open_file.new_layer_name.value = path.stem + + + def export_stack_as_tiff(data, name, directory, From 039dcdb924090eeec0e8de4b43ac7252379228df Mon Sep 17 00:00:00 2001 From: lorenzocerrone Date: Thu, 13 Apr 2023 16:22:09 +0200 Subject: [PATCH 02/15] add rescaling callbacks to synchronize different fields --- plantseg/viewer/widget/dataprocessing.py | 54 +++++++++++++++--------- 1 file changed, 33 insertions(+), 21 deletions(-) diff --git a/plantseg/viewer/widget/dataprocessing.py b/plantseg/viewer/widget/dataprocessing.py index bcbb207a..addff817 100644 --- a/plantseg/viewer/widget/dataprocessing.py +++ b/plantseg/viewer/widget/dataprocessing.py @@ -22,7 +22,7 @@ 'widget_type': 'FloatSlider', 'tooltip': 'Define the size of the gaussian smoothing kernel. ' 'The larger the more blurred will be the output image.', - 'max': 5., + 'max': 10., 'min': 0.}) def widget_gaussian_smoothing(image: Image, sigma: float = 1., @@ -48,14 +48,6 @@ def widget_gaussian_smoothing(image: Image, @magicgui(call_button='Run Image Rescaling', image={'label': 'Image or Label', 'tooltip': 'Layer to apply the rescaling.'}, - type_of_refactor={'label': 'Type of refactor', - 'tooltip': 'Select the mode of finding the right rescaling factor.', - 'widget_type': 'RadioButtons', - 'orientation': 'vertical', - 'choices': ['Rescaling factor', - 'Voxel size', - 'Same as Reference Layer', - 'Same as Reference Model']}, rescaling_factor={'label': 'Rescaling factor', 'tooltip': 'Define the scaling factor to use for resizing the input image.'}, out_voxel_size={'label': 'Out voxel size', @@ -67,10 +59,11 @@ def widget_gaussian_smoothing(image: Image, 'tooltip': 'Rescale to same voxel size as selected model.', 'choices': list_models()}, order={'label': 'Interpolation order', + 'widget_type': 'ComboBox', + 'choices': [0, 1, 2], 'tooltip': '0 for nearest neighbours (default for labels), 1 for linear, 2 for bilinear.', }) def widget_rescaling(image: Layer, - type_of_refactor: str = 'Rescaling factor', rescaling_factor: Tuple[float, float, float] = (1., 1., 1.), out_voxel_size: Tuple[float, float, float] = (1., 1., 1.), reference_layer: Union[None, Layer] = None, @@ -88,19 +81,12 @@ def widget_rescaling(image: Layer, raise ValueError(f'{type(image)} cannot be rescaled, please use Image layers or Labels layers') current_resolution = image.scale - if type_of_refactor == 'Voxel size (um)': - rescaling_factor = compute_scaling_factor(current_resolution, out_voxel_size) + rescaling_factor = [float(x) for x in rescaling_factor] - elif type_of_refactor == 'Same as Reference Layer': - out_voxel_size = reference_layer.scale - rescaling_factor = compute_scaling_factor(current_resolution, reference_layer.scale) + if image.data.ndim == 2: + rescaling_factor[0] = 1. - elif type_of_refactor == 'Same as Reference Model': - out_voxel_size = get_model_resolution(reference_model) - rescaling_factor = compute_scaling_factor(current_resolution, out_voxel_size) - - else: - out_voxel_size = compute_scaling_voxelsize(current_resolution, scaling_factor=rescaling_factor) + out_voxel_size = compute_scaling_voxelsize(current_resolution, scaling_factor=rescaling_factor) out_name = create_layer_name(image.name, 'Rescaled') inputs_kwarg = {'image': image.data} @@ -122,6 +108,32 @@ def widget_rescaling(image: Layer, ) +@widget_rescaling.image.changed.connect +def _on_image_changed(image: Layer): + widget_rescaling.out_voxel_size.value = image.scale + + +@widget_rescaling.out_voxel_size.changed.connect +def _on_voxel_size_changed(voxel_size: Tuple[float, float, float]): + rescaling_factor = compute_scaling_factor(widget_rescaling.image.value.scale, voxel_size) + widget_rescaling.rescaling_factor.value = rescaling_factor + + +@widget_rescaling.reference_layer.changed.connect +def _on_reference_layer_changed(reference_layer: Layer): + rescaling_factor = compute_scaling_factor(widget_rescaling.image.value.scale, reference_layer.scale) + widget_rescaling.rescaling_factor.value = rescaling_factor + widget_rescaling.out_voxel_size.value = reference_layer.scale + + +@widget_rescaling.reference_model.changed.connect +def _on_reference_model_changed(reference_model: str): + out_voxel_size = get_model_resolution(reference_model) + rescaling_factor = compute_scaling_factor(widget_rescaling.image.value.scale, out_voxel_size) + widget_rescaling.rescaling_factor.value = rescaling_factor + widget_rescaling.out_voxel_size.value = out_voxel_size + + def _compute_slices(rectangle, crop_z, shape): z_start = max(rectangle[0, 0] - crop_z // 2, 0) z_end = min(rectangle[0, 0] + math.ceil(crop_z / 2), shape[0]) From 33b19380d04473e819605253f0b29e9b71ba5450 Mon Sep 17 00:00:00 2001 From: lorenzocerrone Date: Thu, 13 Apr 2023 16:42:59 +0200 Subject: [PATCH 03/15] add cropping callback and improve usability using double sided sliders for the z cropping --- plantseg/viewer/widget/dataprocessing.py | 32 +++++++++++++++++------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/plantseg/viewer/widget/dataprocessing.py b/plantseg/viewer/widget/dataprocessing.py index addff817..25aed02f 100644 --- a/plantseg/viewer/widget/dataprocessing.py +++ b/plantseg/viewer/widget/dataprocessing.py @@ -1,4 +1,3 @@ -import math from concurrent.futures import Future from typing import Tuple, Union @@ -135,10 +134,13 @@ def _on_reference_model_changed(reference_model: str): def _compute_slices(rectangle, crop_z, shape): - z_start = max(rectangle[0, 0] - crop_z // 2, 0) - z_end = min(rectangle[0, 0] + math.ceil(crop_z / 2), shape[0]) + z_start = int(crop_z[0]) + z_end = int(crop_z[1]) z_slice = slice(z_start, z_end) + if rectangle is None: + return z_slice, slice(0, shape[1]), slice(0, shape[2]) + x_start = max(rectangle[0, 1], 0) x_end = min(rectangle[2, 1], shape[1]) x_slice = slice(x_start, x_end) @@ -146,7 +148,6 @@ def _compute_slices(rectangle, crop_z, shape): y_start = max(rectangle[0, 2], 0) y_end = min(rectangle[2, 2], shape[2]) y_slice = slice(y_start, y_end) - return z_slice, x_slice, y_slice @@ -160,14 +161,16 @@ def _cropping(data, crop_slices): crop_roi={'label': 'Crop ROI', 'tooltip': 'This must be a shape layer with a rectangle XY overlaying the area to crop.'}, crop_z={'label': 'Z slices', - 'tooltip': 'Numer of z slices to take next to the current selection.'}, + 'tooltip': 'Numer of z slices to take next to the current selection.', + 'widget_type': 'FloatRangeSlider', 'max': 100, 'min': 0, 'step': 1}, ) def widget_cropping(image: Layer, crop_roi: Union[Shapes, None] = None, - crop_z: int = 1, + crop_z: tuple[int, int] = (0, 100), ) -> Future[LayerDataTuple]: - assert len(crop_roi.shape_type) == 1, "Only one rectangle should be used for cropping" - assert crop_roi.shape_type[0] == 'rectangle', "Only a rectangle shape should be used for cropping" + if crop_roi is not None: + assert len(crop_roi.shape_type) == 1, "Only one rectangle should be used for cropping" + assert crop_roi.shape_type[0] == 'rectangle', "Only a rectangle shape should be used for cropping" if isinstance(image, Image): layer_type = 'image' @@ -184,7 +187,10 @@ def widget_cropping(image: Layer, scale=image.scale, metadata=image.metadata) - rectangle = crop_roi.data[0].astype('int64') + if crop_roi is not None: + rectangle = crop_roi.data[0].astype('int64') + else: + rectangle = None crop_slices = _compute_slices(rectangle, crop_z, image.data.shape) @@ -200,6 +206,14 @@ def widget_cropping(image: Layer, ) +@widget_cropping.image.changed.connect +def _on_image_changed(image: Layer): + widget_cropping.crop_z.max = int(image.data.shape[0]) + widget_cropping.crop_z.step = 1 + if widget_cropping.crop_z.value[1] > image.data.shape[0]: + widget_cropping.crop_z.value[1] = int(image.data.shape[0]) + + def _two_layers_operation(data1, data2, operation, weights: float = 0.5): if operation == 'Mean': return weights * data1 + (1. - weights) * data2 From 81d0e99a39fd2835f18ca846d17a76c88fa995aa Mon Sep 17 00:00:00 2001 From: lorenzocerrone Date: Thu, 13 Apr 2023 16:44:31 +0200 Subject: [PATCH 04/15] comment on sliders --- plantseg/viewer/widget/dataprocessing.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plantseg/viewer/widget/dataprocessing.py b/plantseg/viewer/widget/dataprocessing.py index 25aed02f..ac42d72a 100644 --- a/plantseg/viewer/widget/dataprocessing.py +++ b/plantseg/viewer/widget/dataprocessing.py @@ -160,6 +160,8 @@ def _cropping(data, crop_slices): 'tooltip': 'Layer to apply the rescaling.'}, crop_roi={'label': 'Crop ROI', 'tooltip': 'This must be a shape layer with a rectangle XY overlaying the area to crop.'}, + # FloatRangeSlider and RangeSlider are not working very nicely with napari, they are usable but not very + # nice. maybe we should use a custom widget for this. crop_z={'label': 'Z slices', 'tooltip': 'Numer of z slices to take next to the current selection.', 'widget_type': 'FloatRangeSlider', 'max': 100, 'min': 0, 'step': 1}, From c78315ec5afb9436a569261878b45bfc420b89a1 Mon Sep 17 00:00:00 2001 From: lorenzocerrone Date: Thu, 13 Apr 2023 16:51:34 +0200 Subject: [PATCH 05/15] change sliders defaults --- plantseg/viewer/widget/dataprocessing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plantseg/viewer/widget/dataprocessing.py b/plantseg/viewer/widget/dataprocessing.py index ac42d72a..e22c30f7 100644 --- a/plantseg/viewer/widget/dataprocessing.py +++ b/plantseg/viewer/widget/dataprocessing.py @@ -164,7 +164,9 @@ def _cropping(data, crop_slices): # nice. maybe we should use a custom widget for this. crop_z={'label': 'Z slices', 'tooltip': 'Numer of z slices to take next to the current selection.', - 'widget_type': 'FloatRangeSlider', 'max': 100, 'min': 0, 'step': 1}, + 'widget_type': 'FloatRangeSlider', 'max': 100, 'min': 0, 'step': 1, + 'readout': False, + 'tracking': False}, ) def widget_cropping(image: Layer, crop_roi: Union[Shapes, None] = None, From 6b28ba37b0a181b91170bc31da9cdc7c8320ff07 Mon Sep 17 00:00:00 2001 From: lorenzocerrone Date: Thu, 13 Apr 2023 15:51:14 +0200 Subject: [PATCH 06/15] add import name callback --- plantseg/viewer/widget/io.py | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/plantseg/viewer/widget/io.py b/plantseg/viewer/widget/io.py index e2938402..242a7967 100644 --- a/plantseg/viewer/widget/io.py +++ b/plantseg/viewer/widget/io.py @@ -88,8 +88,8 @@ def unpack_load(loaded_dict, key): call_button='Open file', path={'label': 'Pick a file (tiff or h5, png, jpg)', 'tooltip': 'Select a file to be imported, the file can be a tiff or h5.'}, - name={'label': 'Layer Name', - 'tooltip': 'Define the name of the output layer, default is either image or label.'}, + new_layer_name={'label': 'Layer Name', + 'tooltip': 'Define the name of the output layer, default is either image or label.'}, layer_type={ 'label': 'Layer type', 'tooltip': 'Select if the image is a normal image or a segmentation', @@ -105,14 +105,14 @@ def unpack_load(loaded_dict, key): 'tooltip': 'Channel to select and channels layout'}) def open_file(path: Path = Path.home(), layer_type: str = 'image', - name: str = '', + new_layer_name: str = '', advanced_load: bool = False, key: str = 'raw', channel: Tuple[int, str] = (0, 'xcxx'), ) -> LayerDataTuple: """Open a file and return a napari layer.""" - name = layer_type if name == '' else name - loaded_dict_name = f'{name}_loaded_dict' + new_layer_name = layer_type if new_layer_name == '' else new_layer_name + loaded_dict_name = f'{new_layer_name}_loaded_dict' # wrap load routine and add it to the dag step_params = {'key': key, @@ -121,7 +121,7 @@ def open_file(path: Path = Path.home(), 'layer_type': layer_type} dag_manager.add_step(napari_image_load, - input_keys=(f'{name}_path',), + input_keys=(f'{new_layer_name}_path',), output_key=loaded_dict_name, step_name='Load stack', static_params=step_params) @@ -133,9 +133,9 @@ def open_file(path: Path = Path.home(), voxel_size_unit = load_dict['voxel_size_unit'] # add the key unwrapping to the dag - for key, out_name in [('data', name), - ('voxel_size', f'{name}_voxel_size'), - ('voxel_size_unit', f'{name}_voxel_size_unit')]: + for key, out_name in [('data', new_layer_name), + ('voxel_size', f'{new_layer_name}_voxel_size'), + ('voxel_size_unit', f'{new_layer_name}_voxel_size_unit')]: step_params = {'key': key} dag_manager.add_step(unpack_load, input_keys=(loaded_dict_name,), @@ -146,16 +146,22 @@ def open_file(path: Path = Path.home(), # return layer - napari_formatted_logging(f'{name} Correctly imported, voxel_size: {voxel_size} {voxel_size_unit}', + napari_formatted_logging(f'{new_layer_name} Correctly imported, voxel_size: {voxel_size} {voxel_size_unit}', thread='Open file') - layer_kwargs = layer_properties(name=name, + layer_kwargs = layer_properties(name=new_layer_name, scale=voxel_size, metadata={'original_voxel_size': voxel_size, 'voxel_size_unit': voxel_size_unit, - 'root_name': name}) + 'root_name': new_layer_name}) return data, layer_kwargs, layer_type +@open_file.path.changed.connect +def _on_path_changed(path: Path): + open_file.new_layer_name.value = path.stem + + + def export_stack_as_tiff(data, name, directory, From b2f84e5aa0238d50aef9bab9380e656ad1c82c12 Mon Sep 17 00:00:00 2001 From: lorenzocerrone Date: Thu, 13 Apr 2023 16:22:09 +0200 Subject: [PATCH 07/15] add rescaling callbacks to synchronize different fields --- plantseg/viewer/widget/dataprocessing.py | 54 +++++++++++++++--------- 1 file changed, 33 insertions(+), 21 deletions(-) diff --git a/plantseg/viewer/widget/dataprocessing.py b/plantseg/viewer/widget/dataprocessing.py index bcbb207a..addff817 100644 --- a/plantseg/viewer/widget/dataprocessing.py +++ b/plantseg/viewer/widget/dataprocessing.py @@ -22,7 +22,7 @@ 'widget_type': 'FloatSlider', 'tooltip': 'Define the size of the gaussian smoothing kernel. ' 'The larger the more blurred will be the output image.', - 'max': 5., + 'max': 10., 'min': 0.}) def widget_gaussian_smoothing(image: Image, sigma: float = 1., @@ -48,14 +48,6 @@ def widget_gaussian_smoothing(image: Image, @magicgui(call_button='Run Image Rescaling', image={'label': 'Image or Label', 'tooltip': 'Layer to apply the rescaling.'}, - type_of_refactor={'label': 'Type of refactor', - 'tooltip': 'Select the mode of finding the right rescaling factor.', - 'widget_type': 'RadioButtons', - 'orientation': 'vertical', - 'choices': ['Rescaling factor', - 'Voxel size', - 'Same as Reference Layer', - 'Same as Reference Model']}, rescaling_factor={'label': 'Rescaling factor', 'tooltip': 'Define the scaling factor to use for resizing the input image.'}, out_voxel_size={'label': 'Out voxel size', @@ -67,10 +59,11 @@ def widget_gaussian_smoothing(image: Image, 'tooltip': 'Rescale to same voxel size as selected model.', 'choices': list_models()}, order={'label': 'Interpolation order', + 'widget_type': 'ComboBox', + 'choices': [0, 1, 2], 'tooltip': '0 for nearest neighbours (default for labels), 1 for linear, 2 for bilinear.', }) def widget_rescaling(image: Layer, - type_of_refactor: str = 'Rescaling factor', rescaling_factor: Tuple[float, float, float] = (1., 1., 1.), out_voxel_size: Tuple[float, float, float] = (1., 1., 1.), reference_layer: Union[None, Layer] = None, @@ -88,19 +81,12 @@ def widget_rescaling(image: Layer, raise ValueError(f'{type(image)} cannot be rescaled, please use Image layers or Labels layers') current_resolution = image.scale - if type_of_refactor == 'Voxel size (um)': - rescaling_factor = compute_scaling_factor(current_resolution, out_voxel_size) + rescaling_factor = [float(x) for x in rescaling_factor] - elif type_of_refactor == 'Same as Reference Layer': - out_voxel_size = reference_layer.scale - rescaling_factor = compute_scaling_factor(current_resolution, reference_layer.scale) + if image.data.ndim == 2: + rescaling_factor[0] = 1. - elif type_of_refactor == 'Same as Reference Model': - out_voxel_size = get_model_resolution(reference_model) - rescaling_factor = compute_scaling_factor(current_resolution, out_voxel_size) - - else: - out_voxel_size = compute_scaling_voxelsize(current_resolution, scaling_factor=rescaling_factor) + out_voxel_size = compute_scaling_voxelsize(current_resolution, scaling_factor=rescaling_factor) out_name = create_layer_name(image.name, 'Rescaled') inputs_kwarg = {'image': image.data} @@ -122,6 +108,32 @@ def widget_rescaling(image: Layer, ) +@widget_rescaling.image.changed.connect +def _on_image_changed(image: Layer): + widget_rescaling.out_voxel_size.value = image.scale + + +@widget_rescaling.out_voxel_size.changed.connect +def _on_voxel_size_changed(voxel_size: Tuple[float, float, float]): + rescaling_factor = compute_scaling_factor(widget_rescaling.image.value.scale, voxel_size) + widget_rescaling.rescaling_factor.value = rescaling_factor + + +@widget_rescaling.reference_layer.changed.connect +def _on_reference_layer_changed(reference_layer: Layer): + rescaling_factor = compute_scaling_factor(widget_rescaling.image.value.scale, reference_layer.scale) + widget_rescaling.rescaling_factor.value = rescaling_factor + widget_rescaling.out_voxel_size.value = reference_layer.scale + + +@widget_rescaling.reference_model.changed.connect +def _on_reference_model_changed(reference_model: str): + out_voxel_size = get_model_resolution(reference_model) + rescaling_factor = compute_scaling_factor(widget_rescaling.image.value.scale, out_voxel_size) + widget_rescaling.rescaling_factor.value = rescaling_factor + widget_rescaling.out_voxel_size.value = out_voxel_size + + def _compute_slices(rectangle, crop_z, shape): z_start = max(rectangle[0, 0] - crop_z // 2, 0) z_end = min(rectangle[0, 0] + math.ceil(crop_z / 2), shape[0]) From 1102afb642abda552ae8ba05f23148984e4422e6 Mon Sep 17 00:00:00 2001 From: lorenzocerrone Date: Thu, 13 Apr 2023 16:42:59 +0200 Subject: [PATCH 08/15] add cropping callback and improve usability using double sided sliders for the z cropping --- plantseg/viewer/widget/dataprocessing.py | 32 +++++++++++++++++------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/plantseg/viewer/widget/dataprocessing.py b/plantseg/viewer/widget/dataprocessing.py index addff817..25aed02f 100644 --- a/plantseg/viewer/widget/dataprocessing.py +++ b/plantseg/viewer/widget/dataprocessing.py @@ -1,4 +1,3 @@ -import math from concurrent.futures import Future from typing import Tuple, Union @@ -135,10 +134,13 @@ def _on_reference_model_changed(reference_model: str): def _compute_slices(rectangle, crop_z, shape): - z_start = max(rectangle[0, 0] - crop_z // 2, 0) - z_end = min(rectangle[0, 0] + math.ceil(crop_z / 2), shape[0]) + z_start = int(crop_z[0]) + z_end = int(crop_z[1]) z_slice = slice(z_start, z_end) + if rectangle is None: + return z_slice, slice(0, shape[1]), slice(0, shape[2]) + x_start = max(rectangle[0, 1], 0) x_end = min(rectangle[2, 1], shape[1]) x_slice = slice(x_start, x_end) @@ -146,7 +148,6 @@ def _compute_slices(rectangle, crop_z, shape): y_start = max(rectangle[0, 2], 0) y_end = min(rectangle[2, 2], shape[2]) y_slice = slice(y_start, y_end) - return z_slice, x_slice, y_slice @@ -160,14 +161,16 @@ def _cropping(data, crop_slices): crop_roi={'label': 'Crop ROI', 'tooltip': 'This must be a shape layer with a rectangle XY overlaying the area to crop.'}, crop_z={'label': 'Z slices', - 'tooltip': 'Numer of z slices to take next to the current selection.'}, + 'tooltip': 'Numer of z slices to take next to the current selection.', + 'widget_type': 'FloatRangeSlider', 'max': 100, 'min': 0, 'step': 1}, ) def widget_cropping(image: Layer, crop_roi: Union[Shapes, None] = None, - crop_z: int = 1, + crop_z: tuple[int, int] = (0, 100), ) -> Future[LayerDataTuple]: - assert len(crop_roi.shape_type) == 1, "Only one rectangle should be used for cropping" - assert crop_roi.shape_type[0] == 'rectangle', "Only a rectangle shape should be used for cropping" + if crop_roi is not None: + assert len(crop_roi.shape_type) == 1, "Only one rectangle should be used for cropping" + assert crop_roi.shape_type[0] == 'rectangle', "Only a rectangle shape should be used for cropping" if isinstance(image, Image): layer_type = 'image' @@ -184,7 +187,10 @@ def widget_cropping(image: Layer, scale=image.scale, metadata=image.metadata) - rectangle = crop_roi.data[0].astype('int64') + if crop_roi is not None: + rectangle = crop_roi.data[0].astype('int64') + else: + rectangle = None crop_slices = _compute_slices(rectangle, crop_z, image.data.shape) @@ -200,6 +206,14 @@ def widget_cropping(image: Layer, ) +@widget_cropping.image.changed.connect +def _on_image_changed(image: Layer): + widget_cropping.crop_z.max = int(image.data.shape[0]) + widget_cropping.crop_z.step = 1 + if widget_cropping.crop_z.value[1] > image.data.shape[0]: + widget_cropping.crop_z.value[1] = int(image.data.shape[0]) + + def _two_layers_operation(data1, data2, operation, weights: float = 0.5): if operation == 'Mean': return weights * data1 + (1. - weights) * data2 From a914746863f74b2f3b31834a8a0856f2c89dfef2 Mon Sep 17 00:00:00 2001 From: lorenzocerrone Date: Thu, 13 Apr 2023 16:44:31 +0200 Subject: [PATCH 09/15] comment on sliders --- plantseg/viewer/widget/dataprocessing.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plantseg/viewer/widget/dataprocessing.py b/plantseg/viewer/widget/dataprocessing.py index 25aed02f..ac42d72a 100644 --- a/plantseg/viewer/widget/dataprocessing.py +++ b/plantseg/viewer/widget/dataprocessing.py @@ -160,6 +160,8 @@ def _cropping(data, crop_slices): 'tooltip': 'Layer to apply the rescaling.'}, crop_roi={'label': 'Crop ROI', 'tooltip': 'This must be a shape layer with a rectangle XY overlaying the area to crop.'}, + # FloatRangeSlider and RangeSlider are not working very nicely with napari, they are usable but not very + # nice. maybe we should use a custom widget for this. crop_z={'label': 'Z slices', 'tooltip': 'Numer of z slices to take next to the current selection.', 'widget_type': 'FloatRangeSlider', 'max': 100, 'min': 0, 'step': 1}, From 7c17a0a7286c652f8d519a1b88c693f46ff4158c Mon Sep 17 00:00:00 2001 From: lorenzocerrone Date: Thu, 13 Apr 2023 16:51:34 +0200 Subject: [PATCH 10/15] change sliders defaults --- plantseg/viewer/widget/dataprocessing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plantseg/viewer/widget/dataprocessing.py b/plantseg/viewer/widget/dataprocessing.py index ac42d72a..e22c30f7 100644 --- a/plantseg/viewer/widget/dataprocessing.py +++ b/plantseg/viewer/widget/dataprocessing.py @@ -164,7 +164,9 @@ def _cropping(data, crop_slices): # nice. maybe we should use a custom widget for this. crop_z={'label': 'Z slices', 'tooltip': 'Numer of z slices to take next to the current selection.', - 'widget_type': 'FloatRangeSlider', 'max': 100, 'min': 0, 'step': 1}, + 'widget_type': 'FloatRangeSlider', 'max': 100, 'min': 0, 'step': 1, + 'readout': False, + 'tracking': False}, ) def widget_cropping(image: Layer, crop_roi: Union[Shapes, None] = None, From 4b560faa2a0ce9e2eee0c563448ef3f29b879fee Mon Sep 17 00:00:00 2001 From: lorenzocerrone Date: Thu, 13 Apr 2023 22:45:26 +0200 Subject: [PATCH 11/15] automatically setup good defaults for widgets --- plantseg/resources/models_zoo.yaml | 6 +- plantseg/viewer/widget/dataprocessing.py | 55 ++++++++++++++++--- plantseg/viewer/widget/predictions.py | 11 +++- .../widget/proofreading/proofreading.py | 4 +- plantseg/viewer/widget/segmentation.py | 18 +++--- plantseg/viewer/widget/utils.py | 22 +++++++- 6 files changed, 94 insertions(+), 22 deletions(-) diff --git a/plantseg/resources/models_zoo.yaml b/plantseg/resources/models_zoo.yaml index 0e666e9d..be55e997 100644 --- a/plantseg/resources/models_zoo.yaml +++ b/plantseg/resources/models_zoo.yaml @@ -55,7 +55,7 @@ confocal_2D_unet_ovules_ds2x: description: "2D Unet trained on z-slices of confocal images of Arabidopsis Ovules (1/2-resolution in XY) with BCEDiceLoss." dimensionality: "2D" modality: "confocal" - recommended_patch_size: [ 80, 170, 170 ] + recommended_patch_size: [ 1, 170, 170 ] output_type: "boundaries" confocal_3D_unet_ovules_nuclei_ds1x: @@ -101,7 +101,7 @@ lightsheet_2D_unet_root_ds1x: description: "2D Unet trained on z-slices of light-sheet images of Lateral Root Primordia on original resolution with BCEDiceLoss." dimensionality: "2D" modality: "light-sheet" - recommended_patch_size: [ 80, 170, 170 ] + recommended_patch_size: [ 1, 170, 170 ] output_type: "boundaries" lightsheet_3D_unet_root_nuclei_ds1x: @@ -119,7 +119,7 @@ lightsheet_2D_unet_root_nuclei_ds1x: description: "2D Unet trained on z-slices of light-sheet images of Lateral Root Primordia nuclei on original resolution with BCEDiceLoss. The network predicts 2 channels: nuclei mask in the 1st channel, nuclei boundaries in the 2nd channel" dimensionality: "2D" modality: "light-sheet" - recommended_patch_size: [ 80, 170, 170 ] + recommended_patch_size: [ 1, 170, 170 ] output_type: "nuclei" # PNAS diff --git a/plantseg/viewer/widget/dataprocessing.py b/plantseg/viewer/widget/dataprocessing.py index e22c30f7..5610edf9 100644 --- a/plantseg/viewer/widget/dataprocessing.py +++ b/plantseg/viewer/widget/dataprocessing.py @@ -1,8 +1,10 @@ from concurrent.futures import Future +from enum import Enum from typing import Tuple, Union import numpy as np from magicgui import magicgui +from napari import Viewer from napari.layers import Image, Labels, Shapes, Layer from napari.types import LayerDataTuple @@ -11,6 +13,8 @@ from plantseg.dataprocessing.functional.labelprocessing import relabel_segmentation as _relabel_segmentation from plantseg.dataprocessing.functional.labelprocessing import set_background_to_value from plantseg.utils import list_models, get_model_resolution +from plantseg.viewer.widget.predictions import widget_unet_predictions +from plantseg.viewer.widget.segmentation import widget_agglomeration, widget_lifted_multicut, widget_simple_dt_ws from plantseg.viewer.widget.utils import start_threading_process, create_layer_name, layer_properties @@ -22,8 +26,9 @@ 'tooltip': 'Define the size of the gaussian smoothing kernel. ' 'The larger the more blurred will be the output image.', 'max': 10., - 'min': 0.}) -def widget_gaussian_smoothing(image: Image, + 'min': 0.1}) +def widget_gaussian_smoothing(viewer: Viewer, + image: Image, sigma: float = 1., ) -> Future[LayerDataTuple]: out_name = create_layer_name(image.name, 'GaussianSmoothing') @@ -41,9 +46,22 @@ def widget_gaussian_smoothing(image: Image, layer_kwarg=layer_kwargs, layer_type=layer_type, step_name='Gaussian Smoothing', + viewer=viewer, + widgets_to_update=[widget_unet_predictions.image, + widget_agglomeration.image, + widget_lifted_multicut.image, + widget_simple_dt_ws.image, + widget_rescaling.image, + widget_cropping.image] ) +class RescaleType(Enum): + nearest = 0 + linear = 1 + bilinear = 2 + + @magicgui(call_button='Run Image Rescaling', image={'label': 'Image or Label', 'tooltip': 'Layer to apply the rescaling.'}, @@ -59,18 +77,20 @@ def widget_gaussian_smoothing(image: Image, 'choices': list_models()}, order={'label': 'Interpolation order', 'widget_type': 'ComboBox', - 'choices': [0, 1, 2], + 'choices': RescaleType, 'tooltip': '0 for nearest neighbours (default for labels), 1 for linear, 2 for bilinear.', }) -def widget_rescaling(image: Layer, +def widget_rescaling(viewer: Viewer, + image: Layer, rescaling_factor: Tuple[float, float, float] = (1., 1., 1.), out_voxel_size: Tuple[float, float, float] = (1., 1., 1.), reference_layer: Union[None, Layer] = None, reference_model: str = list_models()[0], - order: int = 1, + order=RescaleType.linear, ) -> Future[LayerDataTuple]: if isinstance(image, Image): layer_type = 'image' + order = order.value elif isinstance(image, Labels): layer_type = 'labels' @@ -104,6 +124,13 @@ def widget_rescaling(image: Layer, layer_kwarg=layer_kwargs, step_name='Rescaling', layer_type=layer_type, + viewer=viewer, + widgets_to_update=[widget_unet_predictions.image, + widget_agglomeration.image, + widget_lifted_multicut.image, + widget_simple_dt_ws.image, + widget_cropping.image, + widget_gaussian_smoothing.image] ) @@ -168,7 +195,8 @@ def _cropping(data, crop_slices): 'readout': False, 'tracking': False}, ) -def widget_cropping(image: Layer, +def widget_cropping(viewer: Viewer, + image: Layer, crop_roi: Union[Shapes, None] = None, crop_z: tuple[int, int] = (0, 100), ) -> Future[LayerDataTuple]: @@ -207,6 +235,13 @@ def widget_cropping(image: Layer, layer_type=layer_type, step_name='Cropping', skip_dag=True, + viewer=viewer, + widgets_to_update=[widget_unet_predictions.image, + widget_agglomeration.image, + widget_lifted_multicut.image, + widget_simple_dt_ws.image, + widget_rescaling.image, + widget_gaussian_smoothing.image] ) @@ -240,7 +275,8 @@ def _two_layers_operation(data1, data2, operation, weights: float = 0.5): weights={'label': 'Mean weights', 'widget_type': 'FloatSlider', 'max': 1., 'min': 0.}, ) -def widget_add_layers(image1: Image, +def widget_add_layers(viewer: Viewer, + image1: Image, image2: Image, operation: str = 'Maximum', weights: float = 0.5, @@ -262,6 +298,11 @@ def widget_add_layers(image1: Image, layer_kwarg=layer_kwargs, layer_type=layer_type, step_name='Merge Layers', + viewer=viewer, + widgets_to_update=[widget_unet_predictions.image, + widget_agglomeration.image, + widget_lifted_multicut.image, + widget_simple_dt_ws.image] ) diff --git a/plantseg/viewer/widget/predictions.py b/plantseg/viewer/widget/predictions.py index 4f181e24..5716d53f 100644 --- a/plantseg/viewer/widget/predictions.py +++ b/plantseg/viewer/widget/predictions.py @@ -5,6 +5,7 @@ import torch.cuda from magicgui import magicgui +from napari import Viewer from napari.layers import Image from napari.qt.threading import thread_worker from napari.types import LayerDataTuple @@ -14,6 +15,8 @@ from plantseg.utils import list_all_modality, list_all_dimensionality, list_all_output_type from plantseg.utils import list_models, add_custom_model, get_train_config, get_model_zoo from plantseg.viewer.logging import napari_formatted_logging +from plantseg.viewer.widget.segmentation import widget_agglomeration, widget_lifted_multicut, widget_simple_dt_ws +from plantseg.viewer.widget.proofreading.proofreading import widget_split_and_merge_from_scribbles from plantseg.viewer.widget.utils import start_threading_process, create_layer_name, layer_properties ALL_CUDA_DEVICES = [f'cuda:{i}' for i in range(torch.cuda.device_count())] @@ -61,7 +64,8 @@ def unet_predictions_wrapper(raw, device, **kwargs): device={'label': 'Device', 'choices': ALL_DEVICES} ) -def widget_unet_predictions(image: Image, +def widget_unet_predictions(viewer: Viewer, + image: Image, model_name: str, dimensionality: str = 'All', modality: str = 'All', @@ -89,6 +93,11 @@ def widget_unet_predictions(image: Image, layer_kwarg=layer_kwargs, layer_type=layer_type, step_name='UNet Predictions', + viewer=viewer, + widgets_to_update=[widget_agglomeration.image, + widget_lifted_multicut.image, + widget_simple_dt_ws.image, + widget_split_and_merge_from_scribbles.image] ) diff --git a/plantseg/viewer/widget/proofreading/proofreading.py b/plantseg/viewer/widget/proofreading/proofreading.py index cb241f70..f58bedba 100644 --- a/plantseg/viewer/widget/proofreading/proofreading.py +++ b/plantseg/viewer/widget/proofreading/proofreading.py @@ -285,10 +285,12 @@ def func(): @magicgui(call_button=f'Extract correct labels') -def widget_filter_segmentation() -> Future[LayerDataTuple]: +def widget_filter_segmentation() -> Union[Future[LayerDataTuple], None]: + print(segmentation_handler.status) if not segmentation_handler.status: napari_formatted_logging('Proofreading widget not initialized. Run the proofreading widget tool once first', thread='Export correct labels') + return None future = Future() diff --git a/plantseg/viewer/widget/segmentation.py b/plantseg/viewer/widget/segmentation.py index 75322bab..85695d02 100644 --- a/plantseg/viewer/widget/segmentation.py +++ b/plantseg/viewer/widget/segmentation.py @@ -6,11 +6,13 @@ from napari.layers import Labels, Image, Layer from napari.types import LayerDataTuple +from napari import Viewer from plantseg.dataprocessing.functional.advanced_dataprocessing import fix_over_under_segmentation_from_nuclei -from plantseg.viewer.logging import napari_formatted_logging from plantseg.dataprocessing.functional.dataprocessing import normalize_01 from plantseg.segmentation.functional import gasp, multicut, dt_watershed, mutex_ws from plantseg.segmentation.functional import lifted_multicut_from_nuclei_segmentation, lifted_multicut_from_nuclei_pmaps +from plantseg.viewer.widget.proofreading.proofreading import widget_split_and_merge_from_scribbles +from plantseg.viewer.logging import napari_formatted_logging from plantseg.viewer.widget.utils import start_threading_process, create_layer_name, layer_properties @@ -31,8 +33,8 @@ def _generic_clustering(image: Image, labels: Labels, beta: float = 0.5, minsize: int = 100, name: str = 'GASP', - agg_func: Callable = gasp) -> Future[LayerDataTuple]: - + agg_func: Callable = gasp, + viewer: Viewer = None) -> Future[LayerDataTuple]: if 'pmap' not in image.metadata: _pmap_warn(f'{name} Clustering Widget') @@ -53,6 +55,8 @@ def _generic_clustering(image: Image, labels: Labels, layer_kwarg=layer_kwargs, layer_type=layer_type, step_name=f'{name} Clustering', + viewer=viewer, + widgets_to_update=[widget_split_and_merge_from_scribbles.segmentation] ) @@ -71,7 +75,8 @@ def _generic_clustering(image: Image, labels: Labels, 'widget_type': 'FloatSlider', 'max': 1., 'min': 0.}, minsize={'label': 'Min-size', 'tooltip': 'Minimum segment size allowed in voxels.'}) -def widget_agglomeration(image: Image, _labels: Labels, +def widget_agglomeration(viewer: Viewer, + image: Image, _labels: Labels, mode: str = "GASP", beta: float = 0.6, minsize: int = 100) -> Future[LayerDataTuple]: @@ -84,7 +89,7 @@ def widget_agglomeration(image: Image, _labels: Labels, else: func = multicut - return _generic_clustering(image, _labels, beta=beta, minsize=minsize, name=mode, agg_func=func) + return _generic_clustering(image, _labels, beta=beta, minsize=minsize, name=mode, agg_func=func, viewer=viewer) @magicgui(call_button='Run Lifted MultiCut', @@ -105,7 +110,6 @@ def widget_lifted_multicut(image: Image, _labels: Labels, beta: float = 0.5, minsize: int = 100) -> Future[LayerDataTuple]: - if 'pmap' not in image.metadata: _pmap_warn('Lifted MultiCut Widget') @@ -203,7 +207,6 @@ def widget_dt_ws(image: Image, pixel_pitch: Tuple[int, int, int] = (1, 1, 1), apply_nonmax_suppression: bool = False, nuclei: bool = False) -> Future[LayerDataTuple]: - if 'pmap' not in image.metadata: _pmap_warn("Watershed Widget") @@ -257,7 +260,6 @@ def widget_simple_dt_ws(image: Image, stacked: str = '2D', threshold: float = 0.5, min_size: int = 100) -> Future[LayerDataTuple]: - if 'pmap' not in image.metadata: _pmap_warn("Watershed Widget") diff --git a/plantseg/viewer/widget/utils.py b/plantseg/viewer/widget/utils.py index 6814a4c1..f0d7d64b 100644 --- a/plantseg/viewer/widget/utils.py +++ b/plantseg/viewer/widget/utils.py @@ -6,6 +6,8 @@ from plantseg.viewer.dag_handler import dag_manager from plantseg.viewer.logging import napari_formatted_logging +import timeit +from napari import Viewer def identity(*args, **kwargs): @@ -21,6 +23,15 @@ def identity(*args, **kwargs): raise ValueError('identity should have at least one positional argument') +def setup_layers_suggestions(viewer: Viewer, out_name: str, widgets: list): + if out_name not in viewer.layers: + return None + + out_layer = viewer.layers[out_name] + for widget in widgets: + widget.value = out_layer + + def start_threading_process(func: Callable, runtime_kwargs: dict, statics_kwargs: dict, @@ -29,13 +40,17 @@ def start_threading_process(func: Callable, layer_kwarg: dict, layer_type: str = 'image', step_name: str = '', - skip_dag: bool = False) -> Future: + skip_dag: bool = False, + viewer: Viewer = None, + widgets_to_update: list = None) -> Future: runtime_kwargs.update(statics_kwargs) thread_func = thread_worker(partial(func, **runtime_kwargs)) future = Future() + timer_start = timeit.default_timer() def on_done(result): - napari_formatted_logging(f'Widget {step_name} computation complete', thread=step_name) + timer = timeit.default_timer() - timer_start + napari_formatted_logging(f'Widget {step_name} computation complete in {timer:.2f}s', thread=step_name) _func = func if not skip_dag else identity dag_manager.add_step(_func, input_keys=input_keys, output_key=out_name, @@ -44,6 +59,9 @@ def on_done(result): result = result, layer_kwarg, layer_type future.set_result(result) + if viewer is not None and widgets_to_update is not None: + setup_layers_suggestions(viewer, out_name, widgets_to_update) + worker = thread_func() worker.returned.connect(on_done) worker.start() From 866519d68112815329a61e903f9fd8195e8aaf00 Mon Sep 17 00:00:00 2001 From: lorenzocerrone Date: Thu, 13 Apr 2023 23:07:09 +0200 Subject: [PATCH 12/15] add naive version check --- plantseg/run_plantseg.py | 2 ++ plantseg/utils.py | 12 ++++++++++++ 2 files changed, 14 insertions(+) diff --git a/plantseg/run_plantseg.py b/plantseg/run_plantseg.py index 993c52e2..d20807e5 100644 --- a/plantseg/run_plantseg.py +++ b/plantseg/run_plantseg.py @@ -15,6 +15,8 @@ def parser(): def main(): + from plantseg.utils import check_version + check_version() args = parser() if args.gui: diff --git a/plantseg/utils.py b/plantseg/utils.py index a7ecf3a8..9095b4ae 100644 --- a/plantseg/utils.py +++ b/plantseg/utils.py @@ -8,6 +8,7 @@ import requests import yaml +from plantseg.__version__ import __version__ as current_version from plantseg import model_zoo_path, custom_zoo, home_path, PLANTSEG_MODELS_DIR, plantseg_global_path from plantseg.pipeline import gui_logger @@ -276,3 +277,14 @@ def clean_models(): else: print("Invalid input, please type 'y' or 'n'.") + + +def check_version(): + plantseg_url = ' https://api.github.com/repos/hci-unihd/plant-seg/releases/latest' + response = requests.get(plantseg_url).json() + latest_version = response['tag_name'] + latest_version_numeric = int(latest_version.replace('.', '')) + plantseg_version_numeric = int(current_version.replace('.', '')) + if latest_version_numeric > plantseg_version_numeric: + print(f"New version of PlantSeg available: {latest_version}.\n" + f"Please update your version to the latest one!") From af5b879b3aeb44ec272e48529ac1f105917179c4 Mon Sep 17 00:00:00 2001 From: lorenzocerrone Date: Thu, 13 Apr 2023 23:38:57 +0200 Subject: [PATCH 13/15] fix a minor bug in the headless predictions --- plantseg/viewer/headless.py | 8 +++++--- plantseg/viewer/widget/io.py | 8 +++++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/plantseg/viewer/headless.py b/plantseg/viewer/headless.py index 80286093..83bc2133 100644 --- a/plantseg/viewer/headless.py +++ b/plantseg/viewer/headless.py @@ -10,10 +10,12 @@ from plantseg.viewer.dag_handler import DagHandler from plantseg.viewer.widget.predictions import ALL_DEVICES, ALL_CUDA_DEVICES -all_gpus_str = f'all {len(ALL_CUDA_DEVICES)} gpus' +all_gpus_str = f'all gpus: {len(ALL_CUDA_DEVICES)}' ALL_GPUS = [all_gpus_str] if len(ALL_CUDA_DEVICES) > 0 else [] ALL_DEVICES_HEADLESS = ALL_DEVICES + ALL_GPUS +MAX_WORKERS = len(ALL_CUDA_DEVICES) if len(ALL_CUDA_DEVICES) > 0 else multiprocessing.cpu_count() + def _parse_input_paths(inputs, path_suffix='_path'): list_input_paths = [_input for _input in inputs if _input[-len(path_suffix):] == path_suffix] @@ -39,7 +41,7 @@ def run_workflow_headless(path): 'widget_type': 'IntSlider', 'tooltip': 'Define the size of the gaussian smoothing kernel. ' 'The larger the more blurred will be the output image.', - 'max': multiprocessing.cpu_count(), 'min': 1}, + 'max': MAX_WORKERS, 'min': 1}, scheduler={'label': 'Scheduler', 'choices': ['multiprocessing', 'threaded'] }, @@ -48,7 +50,7 @@ def run_workflow_headless(path): def run(list_inputs: input_hints, out_directory: Path = Path.home(), device: str = ALL_DEVICES_HEADLESS[0], - num_workers: int = 1, + num_workers: int = MAX_WORKERS, scheduler: str = 'multiprocessing'): dict_of_jobs = {} cluster = distributed.LocalCluster(n_workers=num_workers, threads_per_worker=1) diff --git a/plantseg/viewer/widget/io.py b/plantseg/viewer/widget/io.py index 242a7967..dab57cfa 100644 --- a/plantseg/viewer/widget/io.py +++ b/plantseg/viewer/widget/io.py @@ -176,7 +176,7 @@ def export_stack_as_tiff(data, if scaling_factor is not None: data = image_rescale(data, factor=scaling_factor, order=order) - stack_name = f'{name}{standard_suffix}' if custom_name is None else custom_name + stack_name = f'{name}_{standard_suffix}' if custom_name is None else f'{name}_{custom_name}' directory = Path(directory) directory.mkdir(parents=True, exist_ok=True) @@ -244,7 +244,8 @@ def checkout(*args): call_button='Export stack', images={'label': 'Layers to export', 'layout': 'vertical', - 'tooltip': 'Select all layer to be exported, and (optional) set a custom file name.'}, + 'tooltip': 'Select all layer to be exported, and (optional) set a custom file name suffix that will be ' + 'appended at end of the layer name.'}, data_type={'label': 'Data Type', 'choices': ['float32', 'uint8', 'uint16'], 'tooltip': 'Export datatype (uint16 for segmentation) and all others for images.'}, @@ -276,6 +277,7 @@ def export_stacks(images: List[Tuple[Layer, str]], elif isinstance(image, Labels): order = 0 + stack_type = 'labels' dtype = 'uint16' if data_type != 'uint16': @@ -304,7 +306,7 @@ def export_stacks(images: List[Tuple[Layer, str]], root_name = image.metadata.get('root_name', 'unknown') image_custom_name = None if image_custom_name == '' else image_custom_name - standard_suffix = f'_{i}' if image_custom_name is None else '' + standard_suffix = f'{i}' if image_custom_name is None else '' # run step for the current export step_params = {'scaling_factor': scaling_factor, From dbc7876c8feaba9142b3fa24d50aeecf0a8c63f9 Mon Sep 17 00:00:00 2001 From: lorenzocerrone Date: Fri, 14 Apr 2023 15:02:48 +0200 Subject: [PATCH 14/15] improve check version robustness --- plantseg/utils.py | 43 +++++++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/plantseg/utils.py b/plantseg/utils.py index 9095b4ae..9b91c1ae 100644 --- a/plantseg/utils.py +++ b/plantseg/utils.py @@ -4,12 +4,13 @@ from pathlib import Path from shutil import copy2 from typing import Tuple, Optional +from warnings import warn import requests import yaml -from plantseg.__version__ import __version__ as current_version from plantseg import model_zoo_path, custom_zoo, home_path, PLANTSEG_MODELS_DIR, plantseg_global_path +from plantseg.__version__ import __version__ as current_version from plantseg.pipeline import gui_logger CONFIG_TRAIN_YAML = "config_train.yml" @@ -279,12 +280,34 @@ def clean_models(): print("Invalid input, please type 'y' or 'n'.") -def check_version(): - plantseg_url = ' https://api.github.com/repos/hci-unihd/plant-seg/releases/latest' - response = requests.get(plantseg_url).json() - latest_version = response['tag_name'] - latest_version_numeric = int(latest_version.replace('.', '')) - plantseg_version_numeric = int(current_version.replace('.', '')) - if latest_version_numeric > plantseg_version_numeric: - print(f"New version of PlantSeg available: {latest_version}.\n" - f"Please update your version to the latest one!") +def check_version(plantseg_url=' https://api.github.com/repos/hci-unihd/plant-seg/releases/latest'): + try: + response = requests.get(plantseg_url).json() + latest_version = response['tag_name'] + + except requests.exceptions.ConnectionError: + warn("Connection error, could not check for new version.") + return None + except requests.exceptions.Timeout: + warn("Connection timeout, could not check for new version.") + return None + except requests.exceptions.TooManyRedirects: + warn("Too many redirects, could not check for new version.") + return None + except Exception as e: + warn(f"Unknown error, could not check for new version. Error: {e}") + return None + + latest_version_numeric = [int(x) for x in latest_version.split(".")] + plantseg_version_numeric = [int(x) for x in current_version.split(".")] + + if len(latest_version_numeric) != len(plantseg_version_numeric): + warn(f"Could not check for new version, version number not in the correct format.\n" + f"Current version: {current_version}, latest version: {latest_version}") + return None + + for l_v, p_v in zip(latest_version_numeric, plantseg_version_numeric): + if l_v > p_v: + print(f"New version of PlantSeg available: {latest_version}.\n" + f"Please update your version to the latest one!") + return None From 5f381f9948c4fc39fd87e3aa896070d8e0e57a00 Mon Sep 17 00:00:00 2001 From: lorenzocerrone Date: Fri, 14 Apr 2023 15:05:42 +0200 Subject: [PATCH 15/15] nicer default for model patch_size --- plantseg/resources/models_zoo.yaml | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/plantseg/resources/models_zoo.yaml b/plantseg/resources/models_zoo.yaml index be55e997..fc37515d 100644 --- a/plantseg/resources/models_zoo.yaml +++ b/plantseg/resources/models_zoo.yaml @@ -8,7 +8,7 @@ generic_confocal_3D_unet: description: "Unet trained on confocal images of Arabidopsis Ovules on 1/2-resolution in XY with BCEDiceLoss." dimensionality: "3D" modality: "confocal" - recommended_patch_size: [80, 170, 170] + recommended_patch_size: [ 80, 160, 160 ] output_type: "boundaries" @@ -18,7 +18,7 @@ generic_light_sheet_3D_unet: description: "Unet trained on light-sheet images of Arabidopsis Lateral Root Primordia on original resolution with BCEDiceLoss." dimensionality: "3D" modality: "light-sheet" - recommended_patch_size: [ 80, 170, 170 ] + recommended_patch_size: [ 80, 160, 160 ] output_type: "boundaries" ## Ovules @@ -28,7 +28,7 @@ confocal_3D_unet_ovules_ds1x: description: "Unet trained on confocal images of Arabidopsis Ovules on original resolution with BCEDiceLoss." dimensionality: "3D" modality: "confocal" - recommended_patch_size: [ 80, 170, 170 ] + recommended_patch_size: [ 80, 160, 160 ] output_type: "boundaries" confocal_3D_unet_ovules_ds2x: @@ -37,7 +37,7 @@ confocal_3D_unet_ovules_ds2x: description: "Unet trained on confocal images of Arabidopsis Ovules on 1/2-resolution in XY with BCEDiceLoss." dimensionality: "3D" modality: "confocal" - recommended_patch_size: [ 80, 170, 170 ] + recommended_patch_size: [ 80, 160, 160 ] output_type: "boundaries" confocal_3D_unet_ovules_ds3x: @@ -46,7 +46,7 @@ confocal_3D_unet_ovules_ds3x: description: "Unet trained on confocal images of Arabidopsis Ovules on 1/3-resolution in XY with BCEDiceLoss." dimensionality: "3D" modality: "confocal" - recommended_patch_size: [ 80, 170, 170 ] + recommended_patch_size: [ 80, 160, 160 ] output_type: "boundaries" confocal_2D_unet_ovules_ds2x: @@ -55,7 +55,7 @@ confocal_2D_unet_ovules_ds2x: description: "2D Unet trained on z-slices of confocal images of Arabidopsis Ovules (1/2-resolution in XY) with BCEDiceLoss." dimensionality: "2D" modality: "confocal" - recommended_patch_size: [ 1, 170, 170 ] + recommended_patch_size: [ 1, 256, 256 ] output_type: "boundaries" confocal_3D_unet_ovules_nuclei_ds1x: @@ -64,7 +64,7 @@ confocal_3D_unet_ovules_nuclei_ds1x: description: "Unet trained on confocal images of Arabidopsis Ovules nuclei stain on original resolution with BCEDiceLoss. The network predicts 1 channel: nuclei probability maps" dimensionality: "3D" modality: "confocal" - recommended_patch_size: [ 80, 170, 170 ] + recommended_patch_size: [ 80, 160, 160 ] output_type: "nuclei" ## Root @@ -74,7 +74,7 @@ lightsheet_3D_unet_root_ds1x: description: "Unet trained on light-sheet images of Lateral Root Primordia on original resolution with BCEDiceLoss." dimensionality: "3D" modality: "light-sheet" - recommended_patch_size: [ 80, 170, 170 ] + recommended_patch_size: [ 80, 160, 160 ] output_type: "boundaries" lightsheet_3D_unet_root_ds2x: @@ -83,7 +83,7 @@ lightsheet_3D_unet_root_ds2x: description: "Unet trained on light-sheet images of Lateral Root Primordia on 1/2-resolution in XY with BCEDiceLoss." dimensionality: "3D" modality: "light-sheet" - recommended_patch_size: [ 80, 170, 170 ] + recommended_patch_size: [ 80, 160, 160 ] output_type: "boundaries" lightsheet_3D_unet_root_ds3x: @@ -92,7 +92,7 @@ lightsheet_3D_unet_root_ds3x: description: "Unet trained on light-sheet images of Lateral Root Primordia on 1/3-resolution in XY with BCEDiceLoss." dimensionality: "3D" modality: "light-sheet" - recommended_patch_size: [ 80, 170, 170 ] + recommended_patch_size: [ 80, 160, 160 ] output_type: "boundaries" lightsheet_2D_unet_root_ds1x: @@ -101,7 +101,7 @@ lightsheet_2D_unet_root_ds1x: description: "2D Unet trained on z-slices of light-sheet images of Lateral Root Primordia on original resolution with BCEDiceLoss." dimensionality: "2D" modality: "light-sheet" - recommended_patch_size: [ 1, 170, 170 ] + recommended_patch_size: [ 1, 256, 256 ] output_type: "boundaries" lightsheet_3D_unet_root_nuclei_ds1x: @@ -110,7 +110,7 @@ lightsheet_3D_unet_root_nuclei_ds1x: description: "Unet trained on light-sheet images of Lateral Root Primordia nuclei on original resolution with BCEDiceLoss. The network predicts 2 channels: nuclei mask in the 1st channel, nuclei boundaries in the 2nd channel" dimensionality: "3D" modality: "light-sheet" - recommended_patch_size: [ 80, 170, 170 ] + recommended_patch_size: [ 80, 160, 160 ] output_type: "nuclei" lightsheet_2D_unet_root_nuclei_ds1x: @@ -119,7 +119,7 @@ lightsheet_2D_unet_root_nuclei_ds1x: description: "2D Unet trained on z-slices of light-sheet images of Lateral Root Primordia nuclei on original resolution with BCEDiceLoss. The network predicts 2 channels: nuclei mask in the 1st channel, nuclei boundaries in the 2nd channel" dimensionality: "2D" modality: "light-sheet" - recommended_patch_size: [ 1, 170, 170 ] + recommended_patch_size: [ 1, 256, 256 ] output_type: "nuclei" # PNAS @@ -138,7 +138,7 @@ confocal_3D_unet_sa_meristem_cells: description: "3D Unet trained on confocal images of Arabidopsis thaliana apical stem cell: https://www.repository.cam.ac.uk/handle/1810/262530" dimensionality: "3D" modality: "confocal" - recommended_patch_size: [ 80, 170, 170 ] + recommended_patch_size: [ 80, 160, 160 ] output_type: "boundaries" # Mouse embryo ex vivo @@ -148,7 +148,7 @@ lightsheet_3D_unet_mouse_embryo_cells: description: "A a variant of 3D U-Net trained to predict the cell boundaries in live light-sheet images of ex-vivo developing mouse embryo. Voxel size: (0.2×0.2×1 µm^3) (XYZ)" dimensionality: "3D" modality: "light-sheet" - recommended_patch_size: [ 80, 170, 170 ] + recommended_patch_size: [ 80, 160, 160 ] output_type: "boundaries" confocal_3D_unet_mouse_embryo_nuclei: