From 0722d31795f4ef6db87fa9680bcafd3b745a2205 Mon Sep 17 00:00:00 2001 From: marccgrau Date: Tue, 4 Jul 2023 13:51:30 +0000 Subject: [PATCH 01/12] added lgamma to tensorflow frontend, backend implementation missing --- ivy/functional/frontends/tensorflow/math.py | 5 ++++ .../frontends/tensorflow/raw_ops.py | 2 ++ .../test_tensorflow/test_math.py | 29 +++++++++++++++++++ 3 files changed, 36 insertions(+) diff --git a/ivy/functional/frontends/tensorflow/math.py b/ivy/functional/frontends/tensorflow/math.py index f3f33f90a6e4e..005c1dd05aeee 100644 --- a/ivy/functional/frontends/tensorflow/math.py +++ b/ivy/functional/frontends/tensorflow/math.py @@ -194,6 +194,11 @@ def is_strictly_increasing(x, name="is_strictly_increasing"): return ivy.all(ivy.less(x, ivy.roll(x, -1))) +@to_ivy_arrays_and_back +def lgamma(x, name=None): + return ivy.lgamma(x) + + @to_ivy_arrays_and_back def log_sigmoid(x, name=None): return -ivy.softplus(-x) diff --git a/ivy/functional/frontends/tensorflow/raw_ops.py b/ivy/functional/frontends/tensorflow/raw_ops.py index bb75a07e6b3cb..17f6be7efa750 100644 --- a/ivy/functional/frontends/tensorflow/raw_ops.py +++ b/ivy/functional/frontends/tensorflow/raw_ops.py @@ -289,6 +289,8 @@ def Log(*, x, name="Log"): return ivy.log(x) +Lgamma = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.lgamma)) + Log1p = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.log1p)) diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py index 779de2bb125d6..5c6fee28dd8db 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py @@ -465,6 +465,35 @@ def test_tensorflow_negative( ) +# lgamma +@handle_frontend_test( + fn_tree="tensorflow.math.lgamma", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + small_abs_safety_factor=3, + safety_factor_scale="log", + ), + test_with_out=st.just(False), +) +def test_tensorflow_lgamma( + *, + dtype_and_x, + frontend, + test_flags, + fn_tree, + on_device, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + frontend=frontend, + test_flags=test_flags, + fn_tree=fn_tree, + on_device=on_device, + x=x[0], + ) + + # logical_and @handle_frontend_test( fn_tree="tensorflow.math.logical_and", From f2834c45f1b8e409ed6cf2b1e7808b5d0c1beeb3 Mon Sep 17 00:00:00 2001 From: marccgrau <46031196+marccgrau@users.noreply.github.com> Date: Sat, 8 Jul 2023 13:51:48 +0200 Subject: [PATCH 02/12] add ivy.lgamma to functional API experimental --- .../backends/jax/experimental/elementwise.py | 5 +++ .../mxnet/experimental/elementwise.py | 9 +++++ .../numpy/experimental/elementwise.py | 5 +++ .../paddle/experimental/elementwise.py | 6 ++++ .../tensorflow/experimental/elementwise.py | 9 +++++ .../torch/experimental/elementwise.py | 4 +++ .../ivy/experimental/elementwise.py | 34 +++++++++++++++++++ .../test_tensorflow/test_math.py | 2 +- .../test_core/test_elementwise.py | 29 ++++++++++++++++ 9 files changed, 102 insertions(+), 1 deletion(-) diff --git a/ivy/functional/backends/jax/experimental/elementwise.py b/ivy/functional/backends/jax/experimental/elementwise.py index 6bda9e89180c8..65a53e6d2acbd 100644 --- a/ivy/functional/backends/jax/experimental/elementwise.py +++ b/ivy/functional/backends/jax/experimental/elementwise.py @@ -10,6 +10,7 @@ from ivy.functional.backends.jax import JaxArray import jax.numpy as jnp import jax.scipy as js +import jax.lax as jlax jax_ArrayLike = Union[JaxArray, Number] @@ -18,6 +19,10 @@ def sinc(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray: return jnp.sinc(x) +def lgamma(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray: + return jlax.lgamma(x) + + def fmax( x1: JaxArray, x2: JaxArray, diff --git a/ivy/functional/backends/mxnet/experimental/elementwise.py b/ivy/functional/backends/mxnet/experimental/elementwise.py index c52db13c00796..c5465c0aee1df 100644 --- a/ivy/functional/backends/mxnet/experimental/elementwise.py +++ b/ivy/functional/backends/mxnet/experimental/elementwise.py @@ -5,6 +5,15 @@ from ivy.utils.exceptions import IvyNotImplementedException +def lgamma( + x: Union[(None, mx.ndarray.NDArray)], + /, + *, + out: Optional[Union[(None, mx.ndarray.NDArray)]] = None, +) -> Union[(None, mx.ndarray.NDArray)]: + return mx.log(mx.npx.gamma(x)) + + def sinc( x: Union[(None, mx.ndarray.NDArray)], /, diff --git a/ivy/functional/backends/numpy/experimental/elementwise.py b/ivy/functional/backends/numpy/experimental/elementwise.py index ab4f7280857dd..cc4c12fc45616 100644 --- a/ivy/functional/backends/numpy/experimental/elementwise.py +++ b/ivy/functional/backends/numpy/experimental/elementwise.py @@ -7,6 +7,11 @@ from ivy.functional.backends.numpy.helpers import _scalar_output_to_0d_array from ivy.func_wrapper import with_unsupported_dtypes from . import backend_version +from ivy.utils.exceptions import IvyNotImplementedException + + +def lgamma(x: np.ndarray, /, *, out: Optional[np.ndarray] = None) -> np.ndarray: + raise IvyNotImplementedException() @_scalar_output_to_0d_array diff --git a/ivy/functional/backends/paddle/experimental/elementwise.py b/ivy/functional/backends/paddle/experimental/elementwise.py index 5f62e6ec3abf7..bfb6e9507699e 100644 --- a/ivy/functional/backends/paddle/experimental/elementwise.py +++ b/ivy/functional/backends/paddle/experimental/elementwise.py @@ -17,6 +17,12 @@ from .. import backend_version +def lgamma( + x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None +) -> paddle.Tensor: + return paddle.lgamma(x) + + @with_supported_dtypes( {"2.5.0 and below": ("float64", "float32", "int32", "int64")}, backend_version, diff --git a/ivy/functional/backends/tensorflow/experimental/elementwise.py b/ivy/functional/backends/tensorflow/experimental/elementwise.py index 1e5b3a97436ff..4613b09ddcd2a 100644 --- a/ivy/functional/backends/tensorflow/experimental/elementwise.py +++ b/ivy/functional/backends/tensorflow/experimental/elementwise.py @@ -11,6 +11,15 @@ from .. import backend_version +def lgamma( + x: Union[tf.Tensor, tf.Variable], + /, + *, + out: Optional[Union[tf.Tensor, tf.Variable]] = None, +) -> Union[tf.Tensor, tf.Variable]: + return tf.math.lgamma(x) + + def sinc( x: Union[tf.Tensor, tf.Variable], /, diff --git a/ivy/functional/backends/torch/experimental/elementwise.py b/ivy/functional/backends/torch/experimental/elementwise.py index ebe2c95b09305..d541c61121f02 100644 --- a/ivy/functional/backends/torch/experimental/elementwise.py +++ b/ivy/functional/backends/torch/experimental/elementwise.py @@ -14,6 +14,10 @@ from .. import backend_version +def lgamma(x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None) -> torch.Tensor: + return torch.lgamma(x, out=out) + + @with_unsupported_dtypes({"2.0.1 and below": ("complex",)}, backend_version) def fmax( x1: torch.Tensor, diff --git a/ivy/functional/ivy/experimental/elementwise.py b/ivy/functional/ivy/experimental/elementwise.py index dbc6afc72e77f..a38fda0ac64c5 100644 --- a/ivy/functional/ivy/experimental/elementwise.py +++ b/ivy/functional/ivy/experimental/elementwise.py @@ -16,6 +16,40 @@ from ivy.utils.exceptions import handle_exceptions +@handle_exceptions +@handle_nestable +@handle_array_like_without_promotion +@handle_out_argument +@to_native_arrays_and_back +@integer_arrays_to_float +@handle_array_function +def lgamma( + x: Union[ivy.Array, ivy.NativeArray], + /, + *, + out: Optional[ivy.Array] = None, +) -> ivy.Array: + """ + Compute the natural logarithm of the absolute value of the gamma function on x. + + Parameters + ---------- + x + input array. Should have a floating-point data type. + out + optional output array, for writing the result to. It must have a shape that the + inputs broadcast to. + + Returns + ------- + ret + an array containing the natural log of Gamma(x) of each element in x. + The returned array must have a floating-point data type determined + by :ref:`type-promotion`. + """ + return ivy.current_backend(x).lgamma(x, out=out) + + @handle_exceptions @handle_nestable @handle_array_like_without_promotion diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py index 5c6fee28dd8db..25702e2e5d9a6 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py @@ -469,7 +469,7 @@ def test_tensorflow_negative( @handle_frontend_test( fn_tree="tensorflow.math.lgamma", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + available_dtypes=helpers.get_dtypes("valid"), small_abs_safety_factor=3, safety_factor_scale="log", ), diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py index 3097bcb7575df..c3b7dd621c868 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py @@ -9,6 +9,35 @@ # Helpers # # ------- # +# sinc +@handle_test( + fn_tree="functional.ivy.experimental.lgamma", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + small_abs_safety_factor=3, + safety_factor_scale="log", + ), + test_gradients=st.just(False), +) +def test_lgamma( + *, + dtype_and_x, + test_flags, + backend_fw, + fn_name, + on_device, + ground_truth_backend, +): + input_dtype, x = dtype_and_x + helpers.test_function( + input_dtypes=input_dtype, + test_flags=test_flags, + ground_truth_backend=ground_truth_backend, + on_device=on_device, + fw=backend_fw, + fn_name=fn_name, + x=x[0], + ) # sinc From 58f2c8eb59a257fed1974fc98407c36428891702 Mon Sep 17 00:00:00 2001 From: marccgrau Date: Mon, 10 Jul 2023 12:37:28 +0000 Subject: [PATCH 03/12] added method lgamma to ivy.array and ivy.container --- .../array/experimental/statistical.py | 35 +++++ .../container/experimental/statistical.py | 131 ++++++++++++++++++ 2 files changed, 166 insertions(+) diff --git a/ivy/data_classes/array/experimental/statistical.py b/ivy/data_classes/array/experimental/statistical.py index b304b237162b0..1cd4c193c33a8 100644 --- a/ivy/data_classes/array/experimental/statistical.py +++ b/ivy/data_classes/array/experimental/statistical.py @@ -7,6 +7,41 @@ class _ArrayWithStatisticalExperimental(abc.ABC): + def lgamma(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array: + """ + ivy.Array instance method variant of ivy.lgamma. This method simply wraps the + function, and so the docstring for ivy.lgamma also applies to this method with + minimal changes. + + Parameters + ---------- + self + input array. Should have a real-valued floating-point data type. + out + optional output array, for writing the result to. It must have a shape that + the inputs broadcast to. + + Returns + ------- + ret + an array containing the evaluated result for each element in ``self``. + The returned array must have a real-valued floating-point data + type determined by :ref:`type-promotion`. + + Examples + -------- + >>> x = ivy.array([1 , 2 , 3 ]) + >>> y = x.lgamma() + >>> print(y) + ivy.array([0., 0., 0.69314718]) + + >>> x = ivy.array([4.5, -4, -5.6]) + >>> x.lgamma(out = x) + >>> print(x) + ivy.array([2.45373654, inf, -4.6477685 ]) + """ + return ivy.lgamma(self._data, out=out) + def histogram( self: ivy.Array, /, diff --git a/ivy/data_classes/container/experimental/statistical.py b/ivy/data_classes/container/experimental/statistical.py index dc9e2807210b4..a1957cd7e36b3 100644 --- a/ivy/data_classes/container/experimental/statistical.py +++ b/ivy/data_classes/container/experimental/statistical.py @@ -7,6 +7,137 @@ class _ContainerWithStatisticalExperimental(ContainerBase): + @staticmethod + def static_lgamma( + x: Union[ivy.Container, ivy.Array, ivy.NativeArray], + /, + *, + key_chains: Optional[Union[List[str], Dict[str, str]]] = None, + to_apply: bool = True, + prune_unapplied: bool = False, + map_sequences: bool = False, + out: Optional[ivy.Container] = None, + ) -> ivy.Container: + """ + ivy.Container static method variant of ivy.lgamma. This method simply wraps the + function, and so the docstring for ivy.lgamma also applies to this method with + minimal changes. + + Parameters + ---------- + x + input container. Should have a real-valued floating-point data type. + key_chains + The key-chains to apply or not apply the method to. Default is ``None``. + to_apply + If True, the method will be applied to key_chains, otherwise key_chains + will be skipped. Default is ``True``. + prune_unapplied + Whether to prune key_chains for which the function was not applied. + Default is ``False``. + map_sequences + Whether to also map method to sequences (lists, tuples). + Default is ``False``. + out + optional output container, for writing the result to. It must have a shape + that the inputs broadcast to. + + Returns + ------- + ret + a container containing the evaluated result for each element in ``x``. + The returned array must have a real-valued floating-point data type + determined by :ref:`type-promotion`. + + Examples + -------- + >>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.1])) + >>> y = ivy.Container.static_lgamma(x) + >>> print(y) + { + a: ivy.array([inf, 0., 0.]), + b: ivy.array([0.69314718, 1.79175949, 3.32976389]) + } + + + >>> x = ivy.Container(a=ivy.array([0., 2.]), b=ivy.array([ 4., 5.1])) + >>> ivy.Container.static_lgamma(x, out = x) + >>> print(y) + { + a: ivy.array([inf, 0.]), + b: ivy.array([1.79175949, 3.32976389]) + } + """ + return ContainerBase.cont_multi_map_in_function( + "lgamma", + x, + key_chains=key_chains, + to_apply=to_apply, + prune_unapplied=prune_unapplied, + map_sequences=map_sequences, + out=out, + ) + + def lgamma( + self: ivy.Container, + *, + key_chains: Optional[Union[List[str], Dict[str, str]]] = None, + to_apply: bool = True, + prune_unapplied: bool = False, + map_sequences: bool = False, + out: Optional[ivy.Container] = None, + ) -> ivy.Container: + """ + ivy.Container instance method variant of ivy.lgamma. This method simply wraps + the function, and so the docstring for ivy.lgamma also applies to this method + with minimal changes. + + Parameters + ---------- + self + input container. Should have a real-valued floating-point data type. + key_chains + The key-chains to apply or not apply the method to. Default is ``None``. + to_apply + If True, the method will be applied to key_chains, otherwise key_chains + will be skipped. Default is ``True``. + prune_unapplied + Whether to prune key_chains for which the function was not applied. + Default is ``False``. + map_sequences + Whether to also map method to sequences (lists, tuples). + Default is ``False``. + out + optional output container, for writing the result to. It must have a shape + that the inputs broadcast to. + + Returns + ------- + ret + a container containing the evaluated result for each element in ``self``. + The returned array must have a real-valued floating-point data type + determined by :ref:`type-promotion`. + + Examples + -------- + >>> x = ivy.Container(a=ivy.array([1.6, 2.6, 3.5]), + ... b=ivy.array([4.5, 5.3, 2.3])) + >>> y = x.lgamma() + >>> print(y) + { + a: ivy.array([-0.11259222, 0.3574121, 1.20097375]), + b: ivy.array([2.45373821, 3.63963795, 0.15418935]) + } + """ + return self.static_lgamma( + self, + key_chains=key_chains, + to_apply=to_apply, + prune_unapplied=prune_unapplied, + map_sequences=map_sequences, + out=out, + ) + @staticmethod def static_histogram( a: Union[ivy.Array, ivy.NativeArray, ivy.Container], From d92f80a10527023183e31e9e81d3801cdcf19129 Mon Sep 17 00:00:00 2001 From: marccgrau Date: Mon, 10 Jul 2023 12:52:18 +0000 Subject: [PATCH 04/12] added examples to docstring --- .../ivy/experimental/elementwise.py | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/ivy/functional/ivy/experimental/elementwise.py b/ivy/functional/ivy/experimental/elementwise.py index a38fda0ac64c5..e37e35ec84f4e 100644 --- a/ivy/functional/ivy/experimental/elementwise.py +++ b/ivy/functional/ivy/experimental/elementwise.py @@ -46,6 +46,27 @@ def lgamma( an array containing the natural log of Gamma(x) of each element in x. The returned array must have a floating-point data type determined by :ref:`type-promotion`. + + Examples + -------- + >>> x = ivy.Container(a=ivy.array([1.6, 2.6, 3.5]), + ... b=ivy.array([4.5, 5.3, 2.3])) + >>> y = x.lgamma() + >>> print(y) + { + a: ivy.array([-0.11259222, 0.3574121, 1.20097375]), + b: ivy.array([2.45373821, 3.63963795, 0.15418935]) + } + + >>> x = ivy.array([1 , 2 , 3 ]) + >>> y = x.lgamma() + >>> print(y) + ivy.array([0., 0., 0.69314718]) + + >>> x = ivy.array([4.5, -4, -5.6]) + >>> x.lgamma(out = x) + >>> print(x) + ivy.array([2.45373654, inf, -4.6477685 ]) """ return ivy.current_backend(x).lgamma(x, out=out) From ccaa68ce709df8ccdf0ae969585b5ed5eef89e62 Mon Sep 17 00:00:00 2001 From: marccgrau Date: Mon, 10 Jul 2023 12:53:19 +0000 Subject: [PATCH 05/12] dtypes valid for testing lgamma --- .../test_experimental/test_core/test_elementwise.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py index c3b7dd621c868..b52fa46da3d48 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py @@ -9,11 +9,11 @@ # Helpers # # ------- # -# sinc +# lgamma @handle_test( fn_tree="functional.ivy.experimental.lgamma", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + available_dtypes=helpers.get_dtypes("valid"), small_abs_safety_factor=3, safety_factor_scale="log", ), From bb3b172ec550264cbee43641b864a99a2c226ebb Mon Sep 17 00:00:00 2001 From: marccgrau Date: Mon, 10 Jul 2023 12:57:18 +0000 Subject: [PATCH 06/12] all methods in elementwise.py as the implementations in the backend --- .../array/experimental/elementwise.py | 35 +++++ .../array/experimental/statistical.py | 35 ----- .../container/experimental/elementwise.py | 131 ++++++++++++++++++ .../container/experimental/statistical.py | 131 ------------------ 4 files changed, 166 insertions(+), 166 deletions(-) diff --git a/ivy/data_classes/array/experimental/elementwise.py b/ivy/data_classes/array/experimental/elementwise.py index 484302e838fba..5fb91597b9004 100644 --- a/ivy/data_classes/array/experimental/elementwise.py +++ b/ivy/data_classes/array/experimental/elementwise.py @@ -8,6 +8,41 @@ class _ArrayWithElementWiseExperimental(abc.ABC): + def lgamma(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array: + """ + ivy.Array instance method variant of ivy.lgamma. This method simply wraps the + function, and so the docstring for ivy.lgamma also applies to this method with + minimal changes. + + Parameters + ---------- + self + input array. Should have a real-valued floating-point data type. + out + optional output array, for writing the result to. It must have a shape that + the inputs broadcast to. + + Returns + ------- + ret + an array containing the evaluated result for each element in ``self``. + The returned array must have a real-valued floating-point data + type determined by :ref:`type-promotion`. + + Examples + -------- + >>> x = ivy.array([1 , 2 , 3 ]) + >>> y = x.lgamma() + >>> print(y) + ivy.array([0., 0., 0.69314718]) + + >>> x = ivy.array([4.5, -4, -5.6]) + >>> x.lgamma(out = x) + >>> print(x) + ivy.array([2.45373654, inf, -4.6477685 ]) + """ + return ivy.lgamma(self._data, out=out) + def sinc(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array: """ ivy.Array instance method variant of ivy.sinc. This method simply wraps the diff --git a/ivy/data_classes/array/experimental/statistical.py b/ivy/data_classes/array/experimental/statistical.py index 1cd4c193c33a8..b304b237162b0 100644 --- a/ivy/data_classes/array/experimental/statistical.py +++ b/ivy/data_classes/array/experimental/statistical.py @@ -7,41 +7,6 @@ class _ArrayWithStatisticalExperimental(abc.ABC): - def lgamma(self: ivy.Array, *, out: Optional[ivy.Array] = None) -> ivy.Array: - """ - ivy.Array instance method variant of ivy.lgamma. This method simply wraps the - function, and so the docstring for ivy.lgamma also applies to this method with - minimal changes. - - Parameters - ---------- - self - input array. Should have a real-valued floating-point data type. - out - optional output array, for writing the result to. It must have a shape that - the inputs broadcast to. - - Returns - ------- - ret - an array containing the evaluated result for each element in ``self``. - The returned array must have a real-valued floating-point data - type determined by :ref:`type-promotion`. - - Examples - -------- - >>> x = ivy.array([1 , 2 , 3 ]) - >>> y = x.lgamma() - >>> print(y) - ivy.array([0., 0., 0.69314718]) - - >>> x = ivy.array([4.5, -4, -5.6]) - >>> x.lgamma(out = x) - >>> print(x) - ivy.array([2.45373654, inf, -4.6477685 ]) - """ - return ivy.lgamma(self._data, out=out) - def histogram( self: ivy.Array, /, diff --git a/ivy/data_classes/container/experimental/elementwise.py b/ivy/data_classes/container/experimental/elementwise.py index 5d3f384a309bb..05cea7e03a898 100644 --- a/ivy/data_classes/container/experimental/elementwise.py +++ b/ivy/data_classes/container/experimental/elementwise.py @@ -8,6 +8,137 @@ class _ContainerWithElementWiseExperimental(ContainerBase): + @staticmethod + def static_lgamma( + x: Union[ivy.Container, ivy.Array, ivy.NativeArray], + /, + *, + key_chains: Optional[Union[List[str], Dict[str, str]]] = None, + to_apply: bool = True, + prune_unapplied: bool = False, + map_sequences: bool = False, + out: Optional[ivy.Container] = None, + ) -> ivy.Container: + """ + ivy.Container static method variant of ivy.lgamma. This method simply wraps the + function, and so the docstring for ivy.lgamma also applies to this method with + minimal changes. + + Parameters + ---------- + x + input container. Should have a real-valued floating-point data type. + key_chains + The key-chains to apply or not apply the method to. Default is ``None``. + to_apply + If True, the method will be applied to key_chains, otherwise key_chains + will be skipped. Default is ``True``. + prune_unapplied + Whether to prune key_chains for which the function was not applied. + Default is ``False``. + map_sequences + Whether to also map method to sequences (lists, tuples). + Default is ``False``. + out + optional output container, for writing the result to. It must have a shape + that the inputs broadcast to. + + Returns + ------- + ret + a container containing the evaluated result for each element in ``x``. + The returned array must have a real-valued floating-point data type + determined by :ref:`type-promotion`. + + Examples + -------- + >>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.1])) + >>> y = ivy.Container.static_lgamma(x) + >>> print(y) + { + a: ivy.array([inf, 0., 0.]), + b: ivy.array([0.69314718, 1.79175949, 3.32976389]) + } + + + >>> x = ivy.Container(a=ivy.array([0., 2.]), b=ivy.array([ 4., 5.1])) + >>> ivy.Container.static_lgamma(x, out = x) + >>> print(y) + { + a: ivy.array([inf, 0.]), + b: ivy.array([1.79175949, 3.32976389]) + } + """ + return ContainerBase.cont_multi_map_in_function( + "lgamma", + x, + key_chains=key_chains, + to_apply=to_apply, + prune_unapplied=prune_unapplied, + map_sequences=map_sequences, + out=out, + ) + + def lgamma( + self: ivy.Container, + *, + key_chains: Optional[Union[List[str], Dict[str, str]]] = None, + to_apply: bool = True, + prune_unapplied: bool = False, + map_sequences: bool = False, + out: Optional[ivy.Container] = None, + ) -> ivy.Container: + """ + ivy.Container instance method variant of ivy.lgamma. This method simply wraps + the function, and so the docstring for ivy.lgamma also applies to this method + with minimal changes. + + Parameters + ---------- + self + input container. Should have a real-valued floating-point data type. + key_chains + The key-chains to apply or not apply the method to. Default is ``None``. + to_apply + If True, the method will be applied to key_chains, otherwise key_chains + will be skipped. Default is ``True``. + prune_unapplied + Whether to prune key_chains for which the function was not applied. + Default is ``False``. + map_sequences + Whether to also map method to sequences (lists, tuples). + Default is ``False``. + out + optional output container, for writing the result to. It must have a shape + that the inputs broadcast to. + + Returns + ------- + ret + a container containing the evaluated result for each element in ``self``. + The returned array must have a real-valued floating-point data type + determined by :ref:`type-promotion`. + + Examples + -------- + >>> x = ivy.Container(a=ivy.array([1.6, 2.6, 3.5]), + ... b=ivy.array([4.5, 5.3, 2.3])) + >>> y = x.lgamma() + >>> print(y) + { + a: ivy.array([-0.11259222, 0.3574121, 1.20097375]), + b: ivy.array([2.45373821, 3.63963795, 0.15418935]) + } + """ + return self.static_lgamma( + self, + key_chains=key_chains, + to_apply=to_apply, + prune_unapplied=prune_unapplied, + map_sequences=map_sequences, + out=out, + ) + @staticmethod def static_sinc( x: ivy.Container, diff --git a/ivy/data_classes/container/experimental/statistical.py b/ivy/data_classes/container/experimental/statistical.py index a1957cd7e36b3..dc9e2807210b4 100644 --- a/ivy/data_classes/container/experimental/statistical.py +++ b/ivy/data_classes/container/experimental/statistical.py @@ -7,137 +7,6 @@ class _ContainerWithStatisticalExperimental(ContainerBase): - @staticmethod - def static_lgamma( - x: Union[ivy.Container, ivy.Array, ivy.NativeArray], - /, - *, - key_chains: Optional[Union[List[str], Dict[str, str]]] = None, - to_apply: bool = True, - prune_unapplied: bool = False, - map_sequences: bool = False, - out: Optional[ivy.Container] = None, - ) -> ivy.Container: - """ - ivy.Container static method variant of ivy.lgamma. This method simply wraps the - function, and so the docstring for ivy.lgamma also applies to this method with - minimal changes. - - Parameters - ---------- - x - input container. Should have a real-valued floating-point data type. - key_chains - The key-chains to apply or not apply the method to. Default is ``None``. - to_apply - If True, the method will be applied to key_chains, otherwise key_chains - will be skipped. Default is ``True``. - prune_unapplied - Whether to prune key_chains for which the function was not applied. - Default is ``False``. - map_sequences - Whether to also map method to sequences (lists, tuples). - Default is ``False``. - out - optional output container, for writing the result to. It must have a shape - that the inputs broadcast to. - - Returns - ------- - ret - a container containing the evaluated result for each element in ``x``. - The returned array must have a real-valued floating-point data type - determined by :ref:`type-promotion`. - - Examples - -------- - >>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.1])) - >>> y = ivy.Container.static_lgamma(x) - >>> print(y) - { - a: ivy.array([inf, 0., 0.]), - b: ivy.array([0.69314718, 1.79175949, 3.32976389]) - } - - - >>> x = ivy.Container(a=ivy.array([0., 2.]), b=ivy.array([ 4., 5.1])) - >>> ivy.Container.static_lgamma(x, out = x) - >>> print(y) - { - a: ivy.array([inf, 0.]), - b: ivy.array([1.79175949, 3.32976389]) - } - """ - return ContainerBase.cont_multi_map_in_function( - "lgamma", - x, - key_chains=key_chains, - to_apply=to_apply, - prune_unapplied=prune_unapplied, - map_sequences=map_sequences, - out=out, - ) - - def lgamma( - self: ivy.Container, - *, - key_chains: Optional[Union[List[str], Dict[str, str]]] = None, - to_apply: bool = True, - prune_unapplied: bool = False, - map_sequences: bool = False, - out: Optional[ivy.Container] = None, - ) -> ivy.Container: - """ - ivy.Container instance method variant of ivy.lgamma. This method simply wraps - the function, and so the docstring for ivy.lgamma also applies to this method - with minimal changes. - - Parameters - ---------- - self - input container. Should have a real-valued floating-point data type. - key_chains - The key-chains to apply or not apply the method to. Default is ``None``. - to_apply - If True, the method will be applied to key_chains, otherwise key_chains - will be skipped. Default is ``True``. - prune_unapplied - Whether to prune key_chains for which the function was not applied. - Default is ``False``. - map_sequences - Whether to also map method to sequences (lists, tuples). - Default is ``False``. - out - optional output container, for writing the result to. It must have a shape - that the inputs broadcast to. - - Returns - ------- - ret - a container containing the evaluated result for each element in ``self``. - The returned array must have a real-valued floating-point data type - determined by :ref:`type-promotion`. - - Examples - -------- - >>> x = ivy.Container(a=ivy.array([1.6, 2.6, 3.5]), - ... b=ivy.array([4.5, 5.3, 2.3])) - >>> y = x.lgamma() - >>> print(y) - { - a: ivy.array([-0.11259222, 0.3574121, 1.20097375]), - b: ivy.array([2.45373821, 3.63963795, 0.15418935]) - } - """ - return self.static_lgamma( - self, - key_chains=key_chains, - to_apply=to_apply, - prune_unapplied=prune_unapplied, - map_sequences=map_sequences, - out=out, - ) - @staticmethod def static_histogram( a: Union[ivy.Array, ivy.NativeArray, ivy.Container], From 9352660c15d6b091c30085aaee77c4a1d153ed9a Mon Sep 17 00:00:00 2001 From: marccgrau <46031196+marccgrau@users.noreply.github.com> Date: Thu, 13 Jul 2023 13:59:56 +0200 Subject: [PATCH 07/12] remove frontend parts in tf --- ivy/functional/frontends/tensorflow/math.py | 5 ----- ivy/functional/frontends/tensorflow/raw_ops.py | 2 -- 2 files changed, 7 deletions(-) diff --git a/ivy/functional/frontends/tensorflow/math.py b/ivy/functional/frontends/tensorflow/math.py index 005c1dd05aeee..f3f33f90a6e4e 100644 --- a/ivy/functional/frontends/tensorflow/math.py +++ b/ivy/functional/frontends/tensorflow/math.py @@ -194,11 +194,6 @@ def is_strictly_increasing(x, name="is_strictly_increasing"): return ivy.all(ivy.less(x, ivy.roll(x, -1))) -@to_ivy_arrays_and_back -def lgamma(x, name=None): - return ivy.lgamma(x) - - @to_ivy_arrays_and_back def log_sigmoid(x, name=None): return -ivy.softplus(-x) diff --git a/ivy/functional/frontends/tensorflow/raw_ops.py b/ivy/functional/frontends/tensorflow/raw_ops.py index 17f6be7efa750..bb75a07e6b3cb 100644 --- a/ivy/functional/frontends/tensorflow/raw_ops.py +++ b/ivy/functional/frontends/tensorflow/raw_ops.py @@ -289,8 +289,6 @@ def Log(*, x, name="Log"): return ivy.log(x) -Lgamma = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.lgamma)) - Log1p = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.log1p)) From 762bafc8319cdf4f54cbef41c8843c46befc2226 Mon Sep 17 00:00:00 2001 From: marccgrau <46031196+marccgrau@users.noreply.github.com> Date: Thu, 13 Jul 2023 14:01:22 +0200 Subject: [PATCH 08/12] remove tf frontend test --- .../test_tensorflow/test_math.py | 29 ------------------- 1 file changed, 29 deletions(-) diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py index 25702e2e5d9a6..779de2bb125d6 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_math.py @@ -465,35 +465,6 @@ def test_tensorflow_negative( ) -# lgamma -@handle_frontend_test( - fn_tree="tensorflow.math.lgamma", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - small_abs_safety_factor=3, - safety_factor_scale="log", - ), - test_with_out=st.just(False), -) -def test_tensorflow_lgamma( - *, - dtype_and_x, - frontend, - test_flags, - fn_tree, - on_device, -): - input_dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=input_dtype, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - x=x[0], - ) - - # logical_and @handle_frontend_test( fn_tree="tensorflow.math.logical_and", From 97fb72928a35fa1d2b483fc4c5796ce7f975d92a Mon Sep 17 00:00:00 2001 From: marccgrau <46031196+marccgrau@users.noreply.github.com> Date: Mon, 17 Jul 2023 21:19:13 +0200 Subject: [PATCH 09/12] remove decorator not yet in main --- ivy/functional/ivy/experimental/elementwise.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ivy/functional/ivy/experimental/elementwise.py b/ivy/functional/ivy/experimental/elementwise.py index 8829a76849121..46938c2a53a72 100644 --- a/ivy/functional/ivy/experimental/elementwise.py +++ b/ivy/functional/ivy/experimental/elementwise.py @@ -21,7 +21,6 @@ @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back -@integer_arrays_to_float @handle_array_function def lgamma( x: Union[ivy.Array, ivy.NativeArray], From 04a2b58eb4616550d47b879b3a0ed17bd25050e2 Mon Sep 17 00:00:00 2001 From: marccgrau <46031196+marccgrau@users.noreply.github.com> Date: Mon, 17 Jul 2023 21:26:59 +0200 Subject: [PATCH 10/12] remove decorator from sinc too --- ivy/functional/ivy/experimental/elementwise.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ivy/functional/ivy/experimental/elementwise.py b/ivy/functional/ivy/experimental/elementwise.py index 46938c2a53a72..b334957b3a320 100644 --- a/ivy/functional/ivy/experimental/elementwise.py +++ b/ivy/functional/ivy/experimental/elementwise.py @@ -75,7 +75,6 @@ def lgamma( @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back -@integer_arrays_to_float @handle_device_shifting def sinc( x: Union[ivy.Array, ivy.NativeArray], From 656b45bd7a076aa5a751ac4791f6814348ce7e74 Mon Sep 17 00:00:00 2001 From: marccgrau Date: Tue, 18 Jul 2023 08:51:29 +0000 Subject: [PATCH 11/12] add supported dtypes for lgamma in backends --- ivy/functional/backends/jax/experimental/elementwise.py | 5 +++++ ivy/functional/backends/mxnet/experimental/elementwise.py | 8 +++++++- .../backends/paddle/experimental/elementwise.py | 4 ++++ .../backends/tensorflow/experimental/elementwise.py | 4 ++++ ivy/functional/backends/torch/experimental/elementwise.py | 1 + .../test_experimental/test_core/test_elementwise.py | 6 ++---- 6 files changed, 23 insertions(+), 5 deletions(-) diff --git a/ivy/functional/backends/jax/experimental/elementwise.py b/ivy/functional/backends/jax/experimental/elementwise.py index 64cac76bf6044..fad47349749b0 100644 --- a/ivy/functional/backends/jax/experimental/elementwise.py +++ b/ivy/functional/backends/jax/experimental/elementwise.py @@ -7,10 +7,14 @@ default_float_dtype, is_float_dtype, ) +from ivy.func_wrapper import ( + with_supported_dtypes, +) from ivy.functional.backends.jax import JaxArray import jax.numpy as jnp import jax.scipy as js import jax.lax as jlax +from .. import backend_version jax_ArrayLike = Union[JaxArray, Number] @@ -19,6 +23,7 @@ def sinc(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray: return jnp.sinc(x) +@with_supported_dtypes({"0.4.13 and below": ("float",)}, backend_version) def lgamma(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray: return jlax.lgamma(x) diff --git a/ivy/functional/backends/mxnet/experimental/elementwise.py b/ivy/functional/backends/mxnet/experimental/elementwise.py index f431883b2d10b..ce69fe9e91ed3 100644 --- a/ivy/functional/backends/mxnet/experimental/elementwise.py +++ b/ivy/functional/backends/mxnet/experimental/elementwise.py @@ -3,15 +3,21 @@ import mxnet as mx from ivy.utils.exceptions import IvyNotImplementedException +from ivy.func_wrapper import with_supported_dtypes +from .. import backend_version +@with_supported_dtypes( + {"1.9.1 and below": ("float16", "float32", "float64")}, + backend_version, +) def lgamma( x: Union[(None, mx.ndarray.NDArray)], /, *, out: Optional[Union[(None, mx.ndarray.NDArray)]] = None, ) -> Union[(None, mx.ndarray.NDArray)]: - return mx.log(mx.npx.gamma(x)) + return mx.np.log(mx.npx.gamma(x)) def sinc( diff --git a/ivy/functional/backends/paddle/experimental/elementwise.py b/ivy/functional/backends/paddle/experimental/elementwise.py index 87e60c245aaf8..1dba17b9cf54b 100644 --- a/ivy/functional/backends/paddle/experimental/elementwise.py +++ b/ivy/functional/backends/paddle/experimental/elementwise.py @@ -17,6 +17,10 @@ from .. import backend_version +@with_supported_dtypes( + {"2.5.0 and below": ("float32", "float64")}, + backend_version, +) def lgamma( x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None ) -> paddle.Tensor: diff --git a/ivy/functional/backends/tensorflow/experimental/elementwise.py b/ivy/functional/backends/tensorflow/experimental/elementwise.py index 5dc55907036dd..3bd70c83ff1ea 100644 --- a/ivy/functional/backends/tensorflow/experimental/elementwise.py +++ b/ivy/functional/backends/tensorflow/experimental/elementwise.py @@ -11,6 +11,10 @@ from .. import backend_version +@with_supported_dtypes( + {"2.13.0 and below": ("float16", "float32", "float64")}, + backend_version, +) def lgamma( x: Union[tf.Tensor, tf.Variable], /, diff --git a/ivy/functional/backends/torch/experimental/elementwise.py b/ivy/functional/backends/torch/experimental/elementwise.py index d147584aaa158..2d45e817077e1 100644 --- a/ivy/functional/backends/torch/experimental/elementwise.py +++ b/ivy/functional/backends/torch/experimental/elementwise.py @@ -14,6 +14,7 @@ from .. import backend_version +@with_unsupported_dtypes({"2.0.1 and below": ("bfloat16", "complex")}, backend_version) def lgamma(x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None) -> torch.Tensor: return torch.lgamma(x, out=out) diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py index d083e8ec25642..182de5072396f 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py @@ -13,7 +13,7 @@ @handle_test( fn_tree="functional.ivy.experimental.lgamma", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), + available_dtypes=helpers.get_dtypes("float"), small_abs_safety_factor=3, safety_factor_scale="log", ), @@ -26,13 +26,11 @@ def test_lgamma( backend_fw, fn_name, on_device, - ground_truth_backend, ): input_dtype, x = dtype_and_x helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, - ground_truth_backend=ground_truth_backend, on_device=on_device, fw=backend_fw, fn_name=fn_name, @@ -429,7 +427,7 @@ def test_diff( test_flags, backend_fw, fn_name, - on_device + on_device, ): input_dtype, x, axis = dtype_n_x_n_axis _, prepend = dtype_prepend From 6962ccb61e043cc394eafe0d63ce7912364ddd00 Mon Sep 17 00:00:00 2001 From: marccgrau Date: Tue, 18 Jul 2023 11:58:31 +0000 Subject: [PATCH 12/12] tested dtypes instead of documented dtypes --- ivy/functional/backends/jax/experimental/elementwise.py | 4 +++- ivy/functional/backends/torch/experimental/elementwise.py | 2 +- .../test_experimental/test_core/test_elementwise.py | 1 - 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ivy/functional/backends/jax/experimental/elementwise.py b/ivy/functional/backends/jax/experimental/elementwise.py index fad47349749b0..3d75ad0268a04 100644 --- a/ivy/functional/backends/jax/experimental/elementwise.py +++ b/ivy/functional/backends/jax/experimental/elementwise.py @@ -23,7 +23,9 @@ def sinc(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray: return jnp.sinc(x) -@with_supported_dtypes({"0.4.13 and below": ("float",)}, backend_version) +@with_supported_dtypes( + {"0.4.13 and below": ("float16", "float32", "float64")}, backend_version +) def lgamma(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray: return jlax.lgamma(x) diff --git a/ivy/functional/backends/torch/experimental/elementwise.py b/ivy/functional/backends/torch/experimental/elementwise.py index 2d45e817077e1..427843cc2d9f1 100644 --- a/ivy/functional/backends/torch/experimental/elementwise.py +++ b/ivy/functional/backends/torch/experimental/elementwise.py @@ -14,7 +14,7 @@ from .. import backend_version -@with_unsupported_dtypes({"2.0.1 and below": ("bfloat16", "complex")}, backend_version) +@with_supported_dtypes({"2.0.1 and below": ("float32", "float64")}, backend_version) def lgamma(x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None) -> torch.Tensor: return torch.lgamma(x, out=out) diff --git a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py index 182de5072396f..e472c881067ce 100644 --- a/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py +++ b/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_elementwise.py @@ -14,7 +14,6 @@ fn_tree="functional.ivy.experimental.lgamma", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), - small_abs_safety_factor=3, safety_factor_scale="log", ), test_gradients=st.just(False),