Skip to content

Commit

Permalink
added the l1_loss (ivy-llc#22287)
Browse files Browse the repository at this point in the history
Co-authored-by: nathzi1505 <[email protected]>
  • Loading branch information
2 people authored and arshPratap committed Sep 11, 2023
1 parent 26b693d commit f7aaf50
Show file tree
Hide file tree
Showing 8 changed files with 370 additions and 1 deletion.
46 changes: 45 additions & 1 deletion ivy/data_classes/array/experimental/losses.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,50 @@
# global
import abc
from typing import Optional, Union

# local
import ivy


class _ArrayWithLossesExperimental(abc.ABC):
pass
def l1_loss(
self: Union[ivy.Array, ivy.NativeArray],
target: Union[ivy.Array, ivy.NativeArray],
/,
*,
reduction: Optional[str] = "mean",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""
ivy.Array instance method variant of ivy.l1_loss. This method simply wraps the
function, and so the docstring for ivy.l1_loss also applies to this method with
minimal changes.
Parameters
----------
self
input array.
target
input array containing the targeted values.
reduction
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed.
``'none'``: No reduction will be applied to the output. Default: ``'mean'``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The L1 loss between the input array and the targeticted values.
Examples
--------
>>> x = ivy.array([1.0, 2.0, 3.0])
>>> y = ivy.array([0.7, 1.8, 2.9])
>>> z = x.l1_loss(y)
>>> print(z)
ivy.array(0.20000000000000004)
"""
return ivy.l1_loss(self._data, target, reduction=reduction, out=out)
2 changes: 2 additions & 0 deletions ivy/data_classes/container/container.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
_ContainerWithSortingExperimental,
_ContainerWithStatisticalExperimental,
_ContainerWithUtilityExperimental,
_ContainerWithLossesExperimental,
)


Expand Down Expand Up @@ -87,6 +88,7 @@ class Container(
_ContainerWithSortingExperimental,
_ContainerWithStatisticalExperimental,
_ContainerWithUtilityExperimental,
_ContainerWithLossesExperimental,
):
def __init__(
self,
Expand Down
1 change: 1 addition & 0 deletions ivy/data_classes/container/experimental/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,4 @@
from .sorting import _ContainerWithSortingExperimental
from .statistical import _ContainerWithStatisticalExperimental
from .utility import _ContainerWithUtilityExperimental
from .losses import _ContainerWithLossesExperimental
161 changes: 161 additions & 0 deletions ivy/data_classes/container/experimental/losses.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
# global
from typing import Optional, Union, List, Dict

# local
import ivy
from ivy.data_classes.container.base import ContainerBase


class _ContainerWithLossesExperimental(ContainerBase):
@staticmethod
def _static_l1_loss(
input: Union[ivy.Container, ivy.Array, ivy.NativeArray],
target: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
reduction: Optional[Union[str, ivy.Container]] = "mean",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""
ivy.Container static method variant of ivy.l1_loss. This method simply wraps the
function, and so the docstring for ivy.l1_loss also applies to this method with
minimal changes.
Parameters
----------
input
input array or container.
target
input array or container containing the targeted values.
reduction
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed.
``'none'``: No reduction will be applied to the output. Default: ``'mean'``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If input, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``input``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The L1 loss between the input array and the targeted values.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([4, 5, 6]))
>>> y = ivy.Container(a=ivy.array([2, 2, 2]), b=ivy.array([5, 5, 5]))
>>> z = ivy.Container.static_l1_loss(x, y)
>>> print(z)
{
a: ivy.array(1.),
b: ivy.array(0.)
}
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.Container(a=ivy.array([2, 2, 2]), b=ivy.array([5, 5, 5]))
>>> z = ivy.Container.static_l1_loss(x, y)
>>> print(z)
{
a: ivy.array(1.),
b: ivy.array(4.)
}
"""
return ContainerBase.cont_multi_map_in_function(
"l1_loss",
input,
target,
reduction=reduction,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)

def l1_loss(
self: ivy.Container,
target: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
reduction: Optional[Union[str, ivy.Container]] = "mean",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""
ivy.Container instance method variant of ivy.l1_loss. This method simply wraps
the function, and so the docstring for ivy.l1_loss also applies to this method
with minimal changes.
Parameters
----------
self
input container.
target
input array or container containing the targeticted values.
reduction
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed.
``'none'``: No reduction will be applied to the output. Default: ``'mean'``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If input, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``input``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The L1 loss between the input array and the targeticted values.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([4, 5, 6]))
>>> y = ivy.Container(a=ivy.array([2, 2, 2]), b=ivy.array([5, 5, 5]))
>>> z = x.l1_loss(y)
>>> print(z)
{
a: ivy.array(1.),
b: ivy.array(0.)
}
"""
return self._static_l1_loss(
self,
target,
reduction=reduction,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
36 changes: 36 additions & 0 deletions ivy/functional/backends/paddle/experimental/losses.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# global
from typing import Optional
import paddle
import paddle.nn.functional as F

# local
from ivy.func_wrapper import with_unsupported_device_and_dtypes
from . import backend_version


@with_unsupported_device_and_dtypes(
{
"2.5.1 and below": {
"cpu": (
"float16",
"int8",
"int16",
"int32",
"int64",
"uint8",
"complex64",
"complex128",
"bool",
)
}
},
backend_version,
)
def l1_loss(
input: paddle.Tensor,
target: paddle.Tensor,
/,
*,
reduction: Optional[str] = "mean",
) -> paddle.Tensor:
return F.l1_loss(input, target, reduction=reduction)
24 changes: 24 additions & 0 deletions ivy/functional/backends/torch/experimental/losses.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
from typing import Optional
import torch
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version

# Assuming ivy and backend_version are imported and defined properly


@with_unsupported_dtypes(
{"2.0.1 and below": ("unit8", "int8", "int16", "int32", "int64", "bool")},
backend_version,
)
def l1_loss(
input: torch.Tensor,
target: torch.Tensor,
/,
*,
reduction: Optional[str] = "mean",
) -> torch.Tensor:
return torch.nn.functional.l1_loss(
input,
target,
reduction=reduction,
)
59 changes: 59 additions & 0 deletions ivy/functional/ivy/experimental/losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from ivy.func_wrapper import (
handle_nestable,
inputs_to_ivy_arrays,
handle_array_like_without_promotion,
handle_array_function,
)
from ivy.utils.exceptions import handle_exceptions
Expand Down Expand Up @@ -93,3 +94,61 @@ def log_poisson_loss(
return ivy.mean(loss, axis=axis, out=out)
else:
return ivy.inplace_update(out, loss) if out is not None else loss


@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def l1_loss(
input: Union[ivy.Array, ivy.NativeArray],
target: Union[ivy.Array, ivy.NativeArray],
/,
*,
reduction: Optional[str] = "mean",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""
Compute L1 loss (Mean Absolute Error - MAE) between targeticted and input values.
Parameters
----------
input : Union[ivy.Array, ivy.NativeArray]
Input array containing input values.
target : Union[ivy.Array, ivy.NativeArray]
Input array containing targeted values.
reduction : str, optional
Reduction method for the output loss. Options:
"none" (no reduction), "mean" (mean of losses),
"sum" (sum of losses). Default: "mean".
out : Optional[ivy.Array], optional
Optional output array for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ivy.Array
The L1 loss (MAE) between the given input and targeticted values.
Examples
--------
>>> x = ivy.array([1.0, 2.0, 3.0])
>>> y = ivy.array([0.5, 2.5, 2.0])
>>> print(ivy.l1_loss(x, y))
ivy.array(0.6)
>>> a = ivy.array([[1.0, 2.0], [3.0, 4.0]])
>>> b = ivy.array([[0.5, 1.5], [2.5, 3.5]])
>>> print(ivy.l1_loss(a, b))
ivy.array(0.5)
"""
loss = ivy.abs(target - input)

if reduction == "sum":
return ivy.sum(loss, out=out)
elif reduction == "mean":
return ivy.mean(loss, out=out)
else:
return ivy.inplace_update(out, loss) if out is not None else loss
Loading

0 comments on commit f7aaf50

Please sign in to comment.