Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

doc(zjow): polish the notation of classes and functions in torch_utils and utils #763

Merged
merged 7 commits into from
Jan 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 29 additions & 15 deletions ding/torch_utils/checkpoint_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@


def build_checkpoint_helper(cfg):
r"""
"""
Overview:
Use config to build checkpoint helper.
Arguments:
Expand All @@ -23,18 +23,18 @@ def build_checkpoint_helper(cfg):


class CheckpointHelper:
r"""
"""
Overview:
Help to save or load checkpoint by give args.
Interface:
save, load
Interfaces:
``__init__``, ``save``, ``load``, ``_remove_prefix``, ``_add_prefix``, ``_load_matched_model_state_dict``
"""

def __init__(self):
pass

def _remove_prefix(self, state_dict: dict, prefix: str = 'module.') -> dict:
r"""
"""
Overview:
Remove prefix in state_dict
Arguments:
Expand All @@ -53,7 +53,7 @@ def _remove_prefix(self, state_dict: dict, prefix: str = 'module.') -> dict:
return new_state_dict

def _add_prefix(self, state_dict: dict, prefix: str = 'module.') -> dict:
r"""
"""
Overview:
Add prefix in state_dict
Arguments:
Expand All @@ -77,7 +77,7 @@ def save(
prefix_op: str = None,
prefix: str = None,
) -> None:
r"""
"""
Overview:
Save checkpoint by given args
Arguments:
Expand Down Expand Up @@ -119,7 +119,7 @@ def save(
logger.info('save checkpoint in {}'.format(path))

def _load_matched_model_state_dict(self, model: torch.nn.Module, ckpt_state_dict: dict) -> None:
r"""
"""
Overview:
Load matched model state_dict, and show mismatch keys between model's state_dict and checkpoint's state_dict
Arguments:
Expand Down Expand Up @@ -169,7 +169,7 @@ def load(
logger_prefix: str = '',
state_dict_mask: list = [],
):
r"""
"""
Overview:
Load checkpoint by given path
Arguments:
Expand Down Expand Up @@ -254,22 +254,36 @@ def load(


class CountVar(object):
r"""
"""
Overview:
Number counter
Interface:
val, update, add
Interfaces:
``__init__``, ``update``, ``add``
Properties:
- val (:obj:`int`): the value of the counter
"""

def __init__(self, init_val: int) -> None:
"""
Overview:
Init the var counter
Arguments:
- init_val (:obj:`int`): the init value of the counter
"""

self._val = init_val

@property
def val(self) -> int:
"""
Overview:
Get the var counter
"""

return self._val

def update(self, val: int) -> None:
r"""
"""
Overview:
Update the var counter
Arguments:
Expand All @@ -278,7 +292,7 @@ def update(self, val: int) -> None:
self._val = val

def add(self, add_num: int):
r"""
"""
Overview:
Add the number to counter
Arguments:
Expand All @@ -288,7 +302,7 @@ def add(self, add_num: int):


def auto_checkpoint(func: Callable) -> Callable:
r"""
"""
Overview:
Create a wrapper to wrap function, and the wrapper will call the save_checkpoint method
whenever an exception happens.
Expand Down
9 changes: 7 additions & 2 deletions ding/torch_utils/data_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -465,7 +465,7 @@ class LogDict(dict):
Overview:
Derived from ``dict``. Would convert ``torch.Tensor`` to ``list`` for convenient logging.
Interfaces:
__setitem__, update.
``_transform``, ``__setitem__``, ``update``.
"""

def _transform(self, data: Any) -> None:
Expand Down Expand Up @@ -525,7 +525,7 @@ class CudaFetcher(object):
Overview:
Fetch data from source, and transfer it to a specified device.
Interfaces:
__init__, run, close, __next__.
``__init__``, ``__next__``, ``run``, ``close``.
"""

def __init__(self, data_source: Iterable, device: str, queue_size: int = 4, sleep: float = 0.1) -> None:
Expand Down Expand Up @@ -577,6 +577,11 @@ def close(self) -> None:
self._end_flag = True

def _producer(self) -> None:
"""
Overview:
Keep fetching data from source, change the device, and put into ``queue`` for request.
"""

with torch.cuda.stream(self._stream):
while not self._end_flag:
if self._queue.full():
Expand Down
23 changes: 23 additions & 0 deletions ding/torch_utils/dataparallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,33 @@


class DataParallel(nn.DataParallel):
"""
Overview:
A wrapper class for nn.DataParallel.
Interfaces:
``__init__``, ``parameters``
"""

def __init__(self, module, device_ids=None, output_device=None, dim=0):
"""
Overview:
Initialize the DataParallel object.
Arguments:
- module (:obj:`nn.Module`): The module to be parallelized.
- device_ids (:obj:`list`): The list of GPU ids.
- output_device (:obj:`int`): The output GPU id.
- dim (:obj:`int`): The dimension to be parallelized.
"""
super().__init__(module, device_ids=None, output_device=None, dim=0)
self.module = module

def parameters(self, recurse: bool = True):
"""
Overview:
Return the parameters of the module.
Arguments:
- recurse (:obj:`bool`): Whether to return the parameters of the submodules.
PaParaZz1 marked this conversation as resolved.
Show resolved Hide resolved
Returns:
- params (:obj:`generator`): The generator of the parameters.
"""
return self.module.parameters(recurse=True)
52 changes: 29 additions & 23 deletions ding/torch_utils/distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,19 +9,19 @@


class Pd(object):
r"""
"""
Overview:
Abstract class for parameterizable probability distributions and sampling functions.
Interface:
neglogp, entropy, noise_mode, mode, sample
Interfaces:
``neglogp``, ``entropy``, ``noise_mode``, ``mode``, ``sample``

.. tip::

In dereived classes, `logits` should be an attribute member stored in class.
"""

def neglogp(self, x: torch.Tensor) -> torch.Tensor:
r"""
"""
Overview:
Calculate cross_entropy between input x and logits
Arguments:
Expand All @@ -32,7 +32,7 @@ def neglogp(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError

def entropy(self) -> torch.Tensor:
r"""
"""
Overview:
Calculate the softmax entropy of logits
Arguments:
Expand All @@ -43,37 +43,37 @@ def entropy(self) -> torch.Tensor:
raise NotImplementedError

def noise_mode(self):
r"""
"""
Overview:
Add noise to logits. This method is designed for randomness
"""
raise NotImplementedError

def mode(self):
r"""
"""
Overview:
Return logits argmax result. This method is designed for deterministic.
"""
raise NotImplementedError

def sample(self):
r"""
"""
Overview:
Sample from logits's distribution by using softmax. This method is designed for multinomial.
"""
raise NotImplementedError


class CategoricalPd(Pd):
r"""
"""
Overview:
Catagorical probility distribution sampler
Interface:
update_logits, neglogp, entropy, noise_mode, mode, sample
Interfaces:
``__init__``, ``neglogp``, ``entropy``, ``noise_mode``, ``mode``, ``sample``
"""

def __init__(self, logits: torch.Tensor = None) -> None:
r"""
"""
Overview:
Init the Pd with logits
Arguments:
Expand All @@ -82,7 +82,7 @@ def __init__(self, logits: torch.Tensor = None) -> None:
self.update_logits(logits)

def update_logits(self, logits: torch.Tensor) -> None:
r"""
"""
Overview:
Updata logits
Arguments:
Expand All @@ -91,7 +91,7 @@ def update_logits(self, logits: torch.Tensor) -> None:
self.logits = logits

def neglogp(self, x, reduction: str = 'mean') -> torch.Tensor:
r"""
"""
Overview:
Calculate cross_entropy between input x and logits
Arguments:
Expand All @@ -103,7 +103,7 @@ def neglogp(self, x, reduction: str = 'mean') -> torch.Tensor:
return F.cross_entropy(self.logits, x, reduction=reduction)

def entropy(self, reduction: str = 'mean') -> torch.Tensor:
r"""
"""
Overview:
Calculate the softmax entropy of logits
Arguments:
Expand Down Expand Up @@ -191,16 +191,22 @@ class CategoricalPdPytorch(torch.distributions.Categorical):
Overview:
Wrapped ``torch.distributions.Categorical``

Interface:
update_logits, update_probs, sample, neglogp, mode, entropy
Interfaces:
``__init__``, ``update_logits``, ``update_probs``, ``sample``, ``neglogp``, ``mode``, ``entropy``
"""

def __init__(self, probs: torch.Tensor = None) -> None:
"""
Overview:
Initialize the CategoricalPdPytorch object.
Arguments:
- probs (:obj:`torch.Tensor`): The tensor of probabilities.
"""
if probs is not None:
self.update_probs(probs)

def update_logits(self, logits: torch.Tensor) -> None:
r"""
"""
Overview:
Updata logits
Arguments:
Expand All @@ -209,7 +215,7 @@ def update_logits(self, logits: torch.Tensor) -> None:
super().__init__(logits=logits)

def update_probs(self, probs: torch.Tensor) -> None:
r"""
"""
Overview:
Updata probs
Arguments:
Expand All @@ -218,7 +224,7 @@ def update_probs(self, probs: torch.Tensor) -> None:
super().__init__(probs=probs)

def sample(self) -> torch.Tensor:
r"""
"""
Overview:
Sample from logits's distribution by using softmax
Return:
Expand All @@ -227,7 +233,7 @@ def sample(self) -> torch.Tensor:
return super().sample()

def neglogp(self, actions: torch.Tensor, reduction: str = 'mean') -> torch.Tensor:
r"""
"""
Overview:
Calculate cross_entropy between input x and logits
Arguments:
Expand All @@ -244,7 +250,7 @@ def neglogp(self, actions: torch.Tensor, reduction: str = 'mean') -> torch.Tenso
return neglogp.mean(dim=0)

def mode(self) -> torch.Tensor:
r"""
"""
Overview:
Return logits argmax result
Return:
Expand All @@ -253,7 +259,7 @@ def mode(self) -> torch.Tensor:
return self.probs.argmax(dim=-1)

def entropy(self, reduction: str = None) -> torch.Tensor:
r"""
"""
Overview:
Calculate the softmax entropy of logits
Arguments:
Expand Down
Loading
Loading