Skip to content

Commit

Permalink
[deploy_alpha] docs formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
wilsonbb committed Apr 3, 2024
1 parent b31f0d9 commit 123ccb6
Showing 1 changed file with 19 additions and 18 deletions.
37 changes: 19 additions & 18 deletions src/kbmod/region_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ def __init__(
----------
repo_path : `str`
The path to the LSST butler repository.
collections : `list(str)`
collections : `list[str]`
The list of desired collection names within the Butler repository`
dataset_types : `list(str)`
dataset_types : `list[str]`
The list of desired dataset types within the Butler repository.
butler : `lsst.daf.butler.Butler`, optional
The Butler object to use for data access. If None, a new Butler object will be created from `repo_path`.
Expand Down Expand Up @@ -84,8 +84,8 @@ def get_collection_names(butler=None, repo_path=None):
The Butler object or a path to the LSST butler repository from which to create a butler.
Returns
-------
`list(str)`
The list of available collections in the butler repository.
collections : `list[str]`
The list of the names of available collections in the butler repository.
"""
if butler is None:
if repo_path is None:
Expand All @@ -102,7 +102,7 @@ def get_dataset_type_freq(butler=None, repo_path=None, collections=None):
----------
butler | repo_path : `lsst.daf.butler.Butler` | str
The Butler object or a path to the LSST butler repository from which to create a butler.
collections : `list(str)`, optional
collections : `list[str]`, optional
The names of collections from which we can querry the dataset type frequencies. If None, use all collections.
Returns
-------
Expand Down Expand Up @@ -130,6 +130,7 @@ def get_dataset_type_freq(butler=None, repo_path=None, collections=None):
return ref_freq

def is_parallel(self):
"""Returns True if parallel processing was requested."""
return self.max_workers is not None

def new_butler(self):
Expand All @@ -142,7 +143,7 @@ def set_collections(self, collections):
Parameters
----------
collections : `list(str)`
collections : `list[str]`
The list of desired collections to use for the region search.
"""
self.collections = collections
Expand All @@ -166,9 +167,9 @@ def fetch_vdr_data(self, collections=None, dataset_types=None):
Parameters
----------
collections : `list(str)`
collections : `list[str]`
The names of the collection to get the dataset type stats for. If None, use self.collections.
dataset_types : `list(str)`
dataset_types : `list[str]`
The names of the dataset types to get the dataset type stats for. If None, use self.dataset_types.
Returns
Expand Down Expand Up @@ -217,7 +218,7 @@ def get_instruments(self, data_ids=None, first_instrument_only=False):
Returns
-------
instruments : `list(lsst.afw.instrument.Instrument)`
instruments : `list`
A list of instrument objects for the given data IDs.
"""
if data_ids is None:
Expand All @@ -231,23 +232,23 @@ def get_instruments(self, data_ids=None, first_instrument_only=False):
instruments.append(instrument)
return instruments

def get_uris_serial(self, data_ids, dataset_types=None, collections=None, butler=None):
def _get_uris_serial(self, data_ids, dataset_types=None, collections=None, butler=None):
"""Fetch URIs for a list of dataIds in serial fashion.
Parameters
----------
data_ids : `iterable(dict)`
A collection of data Ids to fetch URIs for.
dataset_types : `list(str)`
dataset_types : `list[str]`
The dataset types to use when fetching URIs. If None, use self.dataset_types.
collections : `list(str)`
collections : `list[str]`
The collections to use when fetching URIs. If None, use self.collections.
butler : `lsst.daf.butler.Butler`, optional
The Butler object to use for data access. If None, use self.butler.
Returns
-------
uris : `list(str)`
uris : `list[str]`
The list of URIs for the given data Ids.
"""
if butler is None:
Expand Down Expand Up @@ -279,14 +280,14 @@ def get_uris(self, data_ids, dataset_types=None, collections=None):
----------
data_ids : `iterable(dict)`
A collection of data Ids to fetch URIs for.
dataset_types : `list(str)`
dataset_types : `list[str]`
The dataset types to use when fetching URIs. If None, use self.dataset_types.
collections : `list(str)`
collections : `list[str]`
The collections to use when fetching URIs. If None, use self.collections.
Returns
-------
uris : `list(str)`
uris : `list[str]`
The list of URIs for the given data Ids.
"""
if dataset_types is None:
Expand All @@ -299,7 +300,7 @@ def get_uris(self, data_ids, dataset_types=None, collections=None):
collections = self.collections

if not self.is_parallel():
return self.get_uris_serial(data_ids, dataset_types, collections)
return self._get_uris_serial(data_ids, dataset_types, collections)

# Divide the data_ids into chunks to be processed in parallel
data_id_chunks = list(_chunked_data_ids(data_ids))
Expand All @@ -309,7 +310,7 @@ def get_uris(self, data_ids, dataset_types=None, collections=None):
with ProcessPoolExecutor(max_workers=self.max_workers) as executor:
futures = [
executor.submit(
self.get_uris_serial,
self._get_uris_serial,
chunk,
dataset_types=dataset_types,
collections=collections,
Expand Down

0 comments on commit 123ccb6

Please sign in to comment.