diff --git a/.github/workflows/build-master.yml b/.github/workflows/build-master.yml index e347256..7400330 100644 --- a/.github/workflows/build-master.yml +++ b/.github/workflows/build-master.yml @@ -15,7 +15,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python-version: [3.6, 3.7, 3.8] + python-version: [3.7, 3.8] os: [ubuntu-latest, windows-latest, macOS-latest] steps: diff --git a/.github/workflows/test-and-lint.yml b/.github/workflows/test-and-lint.yml index ad2f5f7..6b23208 100644 --- a/.github/workflows/test-and-lint.yml +++ b/.github/workflows/test-and-lint.yml @@ -7,7 +7,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - python-version: [3.6, 3.7, 3.8] + python-version: [3.7, 3.8] os: [ubuntu-latest, windows-latest, macOS-latest] steps: diff --git a/README.md b/README.md index 62d47e9..8018db8 100644 --- a/README.md +++ b/README.md @@ -12,8 +12,8 @@ AICSImageIO bindings for napari * `CZI` * `OME-TIFF` * `TIFF` - * Any formats supported by [`aicsimageio`](https://github.com/AllenCellModeling/aicsimageio) - * Any additional format supported by [`imageio`](https://github.com/imageio/imageio) + * Any formats supported by [aicsimageio](https://github.com/AllenCellModeling/aicsimageio) + * Any additional format supported by [imageio](https://github.com/imageio/imageio) * Two variants of the AICSImageIO bindings: * `aicsimageio`, which reads the image fully into memory * `aicsimageio-delayed`, which delays reading YX planes until requested for large file support diff --git a/napari_aicsimageio/core.py b/napari_aicsimageio/core.py index 9acbeb1..898469e 100644 --- a/napari_aicsimageio/core.py +++ b/napari_aicsimageio/core.py @@ -2,13 +2,12 @@ # -*- coding: utf-8 -*- from functools import partial -from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, Union import dask.array as da import numpy as np -from aicsimageio import AICSImage, dask_utils, exceptions +from aicsimageio import AICSImage, exceptions from aicsimageio.constants import Dimensions -from aicsimageio.readers.reader import Reader ############################################################################### @@ -19,61 +18,12 @@ ############################################################################### -class LoadResult(NamedTuple): - data: np.ndarray - index: int - channel_axis: Optional[int] - channel_names: Optional[List[str]] - - -############################################################################### - - -def _load_image( - path: str, ReaderClass: Reader, index: int, compute: bool -) -> LoadResult: - # napari global viewer state can't be adjusted in a plugin and thus `ndisplay` - # will default be set to two (2). Because of this, set the chunk dims to be - # simply YX planes in the case where we are delayed loading to ensure we aren't - # requesting more data than necessary. - - # Initialize reader - # If in memory, no need to change the default chunk_by_dims - if compute: - reader = ReaderClass(path) - else: - reader = ReaderClass( - path, chunk_by_dims=[Dimensions.SpatialY, Dimensions.SpatialX] - ) - - # Set channel_axis - dims = [dim for dim in reader.dims if reader.size(dim)[0] > 1] - if Dimensions.Channel in dims: - channel_axis = dims.index(Dimensions.Channel) - else: - channel_axis = None - - # Set channel names - if channel_axis is not None: - channel_names = reader.get_channel_names() - else: - channel_names = None - - # Finalize data and metadata to send to napari viewer - return LoadResult( - data=np.squeeze(reader.dask_data), - index=index, - channel_axis=channel_axis, - channel_names=channel_names, - ) - - -def reader_function(path: PathLike, compute: bool, processes: bool) -> List[LayerData]: +def reader_function(path: PathLike, in_memory: bool) -> List[LayerData]: """ Given a single path return a list of LayerData tuples. """ # Alert console of how we are loading the image - print(f"Reader will load image in-memory: {compute}") + print(f"Reader will load image in-memory: {in_memory}") # Standardize path to list of paths paths = [path] if isinstance(path, str) else path @@ -81,64 +31,40 @@ def reader_function(path: PathLike, compute: bool, processes: bool) -> List[Laye # Determine reader for all ReaderClass = AICSImage.determine_reader(paths[0]) - # Spawn dask cluster for parallel read - with dask_utils.cluster_and_client(processes=processes) as (cluster, client): - # Map each file read - futures = client.map( - _load_image, - paths, - [ReaderClass for i in range(len(paths))], - [i for i in range(len(paths))], - [compute for compute in range(len(paths))], - ) - - # Block until done - results = client.gather(futures) - - # Sort results by index - results = sorted(results, key=lambda result: result.index) - - # Stack all arrays and configure metadata - data = da.stack([result.data for result in results]) - data = da.squeeze(data) - - # Determine whether or not to read in full first - if compute: - data = data.compute() - - # Construct metadata using any of the returns - # as there is an assumption it is all the same - channel_names = results[0].channel_names - - # Construct visible array if channel names are present - if channel_names is not None: - # Only display first channel - visible = [True if i == 0 else False for i, c in enumerate(channel_names)] - else: - # No channels, always display - visible = True + # Create readers for each path + readers = [ReaderClass(path) for path in paths] - # Construct basic metadata - meta = { - "name": channel_names, - "visible": visible, - } + # Read every file or create delayed arrays + if in_memory: + data = [reader.data for reader in readers] + data = np.stack(data) - # If multiple files were read we need to increment channel axis due to stack - channel_axis = results[0].channel_axis - if len(paths) > 1 and channel_axis is not None: - channel_axis += 1 + else: + data = [reader.dask_data for reader in readers] + data = da.stack(data) - # Only add channel axis if it's not None - if channel_axis is not None: - meta["channel_axis"] = channel_axis + # Construct empty metadata to pass through + meta = {} + + # If multiple files were read we need to increment channel axis due to stack + # But we only do this is the channel axis isn't single to begin with + img_contains_channels = Dimensions.Channel in readers[0].dims + if img_contains_channels: + # Get channel names for display + channel_names = readers[0].get_channel_names() + + # Fix channel axis in the case of squeezed or many image stack + channel_axis = readers[0].dims.index(Dimensions.Channel) + channel_axis += 1 + + # Construct basic metadata + meta["name"] = channel_names + meta["channel_axis"] = channel_axis - return [(data, meta)] + return [(data, meta, "image")] -def get_reader( - path: PathLike, compute: bool, processes=True -) -> Optional[ReaderFunction]: +def get_reader(path: PathLike, in_memory: bool) -> Optional[ReaderFunction]: """ Given a single path or list of paths, return the appropriate aicsimageio reader. """ @@ -153,8 +79,8 @@ def get_reader( AICSImage.determine_reader(paths[0]) # The above line didn't error so we know we have a supported reader - # Return a partial function with compute determined - return partial(reader_function, compute=compute, processes=processes) + # Return a partial function with in_memory determined + return partial(reader_function, in_memory=in_memory) # No supported reader, return None except exceptions.UnsupportedFileFormatError: diff --git a/napari_aicsimageio/delayed.py b/napari_aicsimageio/delayed.py index a16ca47..b76efe9 100644 --- a/napari_aicsimageio/delayed.py +++ b/napari_aicsimageio/delayed.py @@ -12,4 +12,4 @@ @napari_hook_implementation def napari_get_reader(path: core.PathLike) -> Optional[core.ReaderFunction]: - return core.get_reader(path, compute=False) + return core.get_reader(path, in_memory=False) diff --git a/napari_aicsimageio/in_memory.py b/napari_aicsimageio/in_memory.py index ed0c33d..e6331bd 100644 --- a/napari_aicsimageio/in_memory.py +++ b/napari_aicsimageio/in_memory.py @@ -12,4 +12,4 @@ @napari_hook_implementation def napari_get_reader(path: core.PathLike) -> Optional[core.ReaderFunction]: - return core.get_reader(path, compute=True) + return core.get_reader(path, in_memory=True) diff --git a/napari_aicsimageio/tests/test_core.py b/napari_aicsimageio/tests/test_core.py index 630746e..e175265 100644 --- a/napari_aicsimageio/tests/test_core.py +++ b/napari_aicsimageio/tests/test_core.py @@ -19,25 +19,25 @@ @pytest.mark.parametrize( - "filename, compute, expected_dtype, expected_shape, expected_channel_axis", + "filename, in_memory, expected_dtype, expected_shape, expected_channel_axis", [ - (PNG_FILE, True, np.ndarray, (800, 537, 4), None), - (GIF_FILE, True, np.ndarray, (72, 268, 268, 4), None), - (CZI_FILE, True, np.ndarray, (325, 475), None), - (CZI_FILE, False, da.core.Array, (325, 475), None), - (OME_FILE, True, np.ndarray, (325, 475), None), - (OME_FILE, False, da.core.Array, (325, 475), None), - (TIF_FILE, True, np.ndarray, (325, 475), None), - (TIF_FILE, False, da.core.Array, (325, 475), None), - ([CZI_FILE, CZI_FILE], True, np.ndarray, (2, 325, 475), None), - ([CZI_FILE, CZI_FILE], False, da.core.Array, (2, 325, 475), None), - (MED_TIF_FILE, False, da.core.Array, (10, 3, 325, 475), None), - (BIG_CZI_FILE, False, da.core.Array, (3, 3, 5, 325, 475), None), - (BIG_OME_FILE, False, da.core.Array, (3, 5, 3, 325, 475), None), + (PNG_FILE, True, np.ndarray, (1, 800, 537, 4), None), + (GIF_FILE, True, np.ndarray, (1, 72, 268, 268, 4), None), + (CZI_FILE, True, np.ndarray, (1, 1, 1, 325, 475), None), + (CZI_FILE, False, da.core.Array, (1, 1, 1, 325, 475), None), + (OME_FILE, True, np.ndarray, (1, 325, 475), None), + (OME_FILE, False, da.core.Array, (1, 325, 475), None), + (TIF_FILE, True, np.ndarray, (1, 325, 475), None), + (TIF_FILE, False, da.core.Array, (1, 325, 475), None), + ([CZI_FILE, CZI_FILE], True, np.ndarray, (2, 1, 1, 325, 475), None), + ([CZI_FILE, CZI_FILE], False, da.core.Array, (2, 1, 1, 325, 475), None), + (MED_TIF_FILE, False, da.core.Array, (1, 10, 3, 325, 475), None), + (BIG_CZI_FILE, False, da.core.Array, (1, 1, 3, 3, 5, 325, 475), None), + (BIG_OME_FILE, False, da.core.Array, (1, 3, 5, 3, 325, 475), None), ], ) def test_reader( - data_dir, filename, compute, expected_dtype, expected_shape, expected_channel_axis + data_dir, filename, in_memory, expected_dtype, expected_shape, expected_channel_axis ): # Append filename(s) to resources dir if isinstance(filename, str): @@ -46,7 +46,7 @@ def test_reader( path = [str(data_dir / _path) for _path in filename] # Get reader - reader = core.get_reader(path, compute, processes=False) + reader = core.get_reader(path, in_memory) # Check callable assert callable(reader) @@ -55,7 +55,7 @@ def test_reader( layer_data = reader(path) # We only return one layer - data, _ = layer_data[0] + data, _, _ = layer_data[0] # Check layer data assert isinstance(data, expected_dtype) diff --git a/setup.py b/setup.py index aebb0b0..50603ef 100644 --- a/setup.py +++ b/setup.py @@ -58,13 +58,17 @@ setup( author="Jackson Maxfield Brown", - author_email="jacksonb@alleninstitute.org", + author_email="jmaxfieldbrown@gmail.com", classifiers=[ "Development Status :: 5 - Production/Stable", - "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "Framework :: napari", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Visualization", + "Topic :: Scientific/Engineering :: Information Analysis", + "Topic :: Scientific/Engineering :: Bio-Informatics", "License :: OSI Approved :: BSD License", "Natural Language :: English", - "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", ], @@ -83,7 +87,7 @@ keywords="napari, aicsimageio, imaging", name="napari-aicsimageio", packages=find_packages(exclude=["tests", "*.tests", "*.tests.*"]), - python_requires=">=3.6", + python_requires=">=3.7", setup_requires=setup_requirements, test_suite="napari_aicsimageio/tests", tests_require=test_requirements, diff --git a/tox.ini b/tox.ini index fcf8893..aa33c5c 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] skipsdist = True -envlist = py36, py37, py38, lint +envlist = py37, py38, lint [testenv:lint] deps =