Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

dask: Data memory/disk #387

Merged
merged 14 commits into from
Apr 28, 2022
156 changes: 30 additions & 126 deletions cf/data/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
abspath,
)
from ..functions import atol as cf_atol
from ..functions import chunksize as cf_chunksize
from ..functions import default_netCDF_fillvals
from ..functions import fm_threshold as cf_fm_threshold
from ..functions import free_memory
Expand Down Expand Up @@ -255,7 +254,7 @@ def __init__(
copy=True,
dtype=None,
mask=None,
persist=False,
to_memory=False,
init_options=None,
_use_array=True,
):
Expand Down Expand Up @@ -376,18 +375,26 @@ def __init__(

.. versionadded:: TODODASK

persist: `bool`, optional
If True then persist the underlying array into memory,
equivalent to calling `persist` on the data
immediately after initialisation.
to_memory: `bool`, optional
If True then ensure that the original data are in
memory, rather than on disk.

If the original data are on disk then reading data
If the original data are on disk, then reading data
into memory during initialisation will slow down the
initialisation process, but can considerably improve
downstream performance by avoiding the need for
independent reads for every dask chunk, each time the
data are computed.

In geneal, setting *to_memory* to True is not the same
davidhassell marked this conversation as resolved.
Show resolved Hide resolved
as calling the `persist` of the newly created `Data`
object, which also decompresses data compressed by
convention and computes any data type, mask and
date-time modifications.

If the input *array* is a `dask.array.Array` object
then *to_memory* is ignored.

.. versionadded:: TODODASK

init_options: `dict`, optional
Expand Down Expand Up @@ -549,11 +556,20 @@ def __init__(
"for compressed input arrays"
)

# Bring the compressed data into memory without
# decompressing it
if to_memory:
try:
array = array.to_memory()
except AttributeError:
pass

# Save the input compressed array, as this will contain
# extra information, such as a count or index variable.
self._set_Array(array)

array = compressed_to_dask(array, chunks)

elif not is_dask_collection(array):
# Turn the data into a dask array
kwargs = init_options.get("from_array", {})
Expand All @@ -564,6 +580,13 @@ def __init__(
"Use the 'chunks' parameter instead."
)

# Bring the data into memory
if to_memory:
try:
array = array.to_memory()
except AttributeError:
pass

array = to_dask(array, chunks, **kwargs)

elif chunks != _DEFAULT_CHUNKS:
Expand Down Expand Up @@ -607,21 +630,6 @@ def __init__(
if mask is not None:
self.where(mask, cf_masked, inplace=True)

# Bring the data into memory
if persist:
self.persist(inplace=True)

# @property#
# def dask_array(s#elf):
# """TODODASK.##
#
# :Returns:
#
# `dask.array.Array`##
#
# """
# return self.get_dask(copy=True)

@property
def dask_compressed_array(self):
"""TODODASK.
Expand Down Expand Up @@ -9744,99 +9752,6 @@ def to_dask_array(self):
"""
return self._get_dask()

def to_disk(self):
"""Store the data array on disk.

There is no change to partition's whose sub-arrays are already on
disk.

:Returns:

`None`

**Examples**

>>> d.to_disk()

"""
print("TODODASK - ???")
config = self.partition_configuration(readonly=True, to_disk=True)

for partition in self.partitions.matrix.flat:
if partition.in_memory:
partition.open(config)
partition.array
partition.close()

def to_memory(self, regardless=False, parallelise=False):
"""Store each partition's data in memory in place if the master
array is smaller than the chunk size.

There is no change to partitions with data that are already in memory.

:Parameters:

regardless: `bool`, optional
If True then store all partitions' data in memory
regardless of the size of the master array. By default
only store all partitions' data in memory if the master
array is smaller than the chunk size.

parallelise: `bool`, optional
If True than only move those partitions to memory that are
flagged for processing on this rank.

:Returns:

`None`

**Examples**

>>> d.to_memory()
>>> d.to_memory(regardless=True)

"""
print("TODODASK - ???")
config = self.partition_configuration(readonly=True)
fm_threshold = cf_fm_threshold()

# If parallelise is False then all partitions are flagged for
# processing on this rank, otherwise only a subset are
self._flag_partitions_for_processing(parallelise)

for partition in self.partitions.matrix.flat:
if partition._process_partition:
# Only move the partition to memory if it is flagged
# for processing
partition.open(config)
if (
partition.on_disk
and partition.nbytes <= free_memory() - fm_threshold
):
partition.array

partition.close()
# --- End: for

@property
def in_memory(self):
"""True if the array is retained in memory.

:Returns:

**Examples**

>>> d.in_memory

"""
print("TODODASK - ???")
for partition in self.partitions.matrix.flat:
if not partition.in_memory:
return False
# --- End: for

return True

@daskified(_DASKIFIED_VERBOSE)
def datum(self, *index):
"""Return an element of the data array as a standard Python
Expand Down Expand Up @@ -10875,17 +10790,6 @@ def swapaxes(self, axis0, axis1, inplace=False, i=False):
d._set_dask(dx, reset_mask_hardness=False)
return d

def save_to_disk(self, itemsize=None):
"""cf.Data.save_to_disk is dead.

Use not cf.Data.fits_in_memory instead.

"""
raise NotImplementedError(
"cf.Data.save_to_disk is dead. Use not "
"cf.Data.fits_in_memory instead."
)

def fits_in_memory(self, itemsize):
"""Return True if the master array is small enough to be
retained in memory.
Expand Down
95 changes: 94 additions & 1 deletion cf/data/mixin/deprecations.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,29 @@ def dtvarray(self):
"""Deprecated at version 3.0.0."""
_DEPRECATION_ERROR_ATTRIBUTE(self, "dtvarray") # pragma: no cover

@property
def in_memory(self):
"""True if the array is retained in memory.

Deprecated at version TODODASK.

davidhassell marked this conversation as resolved.
Show resolved Hide resolved
:Returns:

davidhassell marked this conversation as resolved.
Show resolved Hide resolved
**Examples**

>>> d.in_memory

"""
_DEPRECATION_ERROR_ATTRIBUTE(
self,
"in_memory",
version="TODODASK",
removed_at="5.0.0",
) # pragma: no cover

def files(self):
"""Deprecated at version 3.4.0, use method `get_` instead."""
"""Deprecated at version 3.4.0, use method `get_filenames`
instead."""
_DEPRECATION_ERROR_METHOD(
self,
"files",
Expand Down Expand Up @@ -386,3 +407,75 @@ def partition_boundaries(self):
_DEPRECATION_ERROR_METHOD(
"TODODASK - consider using 'chunks' instead"
) # pragma: no cover

def save_to_disk(self, itemsize=None):
"""Deprecated."""
_DEPRECATION_ERROR_METHOD(
self,
"save_to_disk",
removed_at="4.0.0",
) # pragma: no cover

def to_disk(self):
"""Store the data array on disk.

Deprecated at version TODODASK.

There is no change to partition's whose sub-arrays are already
davidhassell marked this conversation as resolved.
Show resolved Hide resolved
on disk.

:Returns:

`None`

**Examples**

>>> d.to_disk()

"""
_DEPRECATION_ERROR_METHOD(
self,
"to_disk",
version="TODODASK",
removed_at="5.0.0",
) # pragma: no cover

def to_memory(self, regardless=False, parallelise=False):
"""Store each partition's data in memory in place if the master
array is smaller than the chunk size.

Deprecated at version TODODASK. Consider using `persist`
instead.

There is no change to partitions with data that are already in
memory.

:Parameters:

regardless: `bool`, optional
If True then store all partitions' data in memory
regardless of the size of the master array. By default
only store all partitions' data in memory if the
master array is smaller than the chunk size.

parallelise: `bool`, optional
If True than only move those partitions to memory that
are flagged for processing on this rank.

:Returns:

`None`

**Examples**

>>> d.to_memory()
>>> d.to_memory(regardless=True)

"""
_DEPRECATION_ERROR_METHOD(
self,
"to_memory",
message="Consider using 'persist' instead.",
version="TODODASK",
removed_at="5.0.0",
) # pragma: no cover