From b50266f7d765e94a93365c94ec9956cfe3b66d93 Mon Sep 17 00:00:00 2001 From: mcw-anasuya Date: Wed, 23 Oct 2024 16:40:29 +0000 Subject: [PATCH] #13674: Update documentation for backward ops --- .../sweeps/eltwise/unary_backward/acosh_bw.py | 18 +-- .../unary_backward/unary_backward_pybind.hpp | 118 ++++++++++++++++-- 2 files changed, 116 insertions(+), 20 deletions(-) diff --git a/tests/sweep_framework/sweeps/eltwise/unary_backward/acosh_bw.py b/tests/sweep_framework/sweeps/eltwise/unary_backward/acosh_bw.py index 47e1ce4162ed..c27f0bae6cd4 100644 --- a/tests/sweep_framework/sweeps/eltwise/unary_backward/acosh_bw.py +++ b/tests/sweep_framework/sweeps/eltwise/unary_backward/acosh_bw.py @@ -6,7 +6,6 @@ from functools import partial import torch -import random import ttnn from tests.sweep_framework.sweep_utils.utils import gen_shapes from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt @@ -14,11 +13,6 @@ from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time from models.utility_functions import torch_random -# Override the default timeout in seconds for hang detection. -TIMEOUT = 30 - -random.seed(0) - # Parameters provided to the test vector generator are defined here. # They are defined as dict-type suites that contain the arguments to the run function as keys, and lists of possible inputs as values. @@ -40,6 +34,15 @@ } +# Invalidate vector is called during the generation phase where each vector will be passed in. +# If invalidated, the vector will still be stored but will be skipped. +# Returns False, None if the vector is valid, and True, str with a reason for invalidation if it is invalid. +def invalidate_vector(test_vector) -> Tuple[bool, Optional[str]]: + if test_vector["input_a_dtype"] == ttnn.bfloat8_b or test_vector["grad_dtype"] == ttnn.bfloat8_b: + return True, "ttnn.bfloat8_b are not supported" + return False, None + + # This is the run instructions for the test, defined by the developer. # The run function must take the above-defined parameters as inputs. # The runner will call this run function with each test vector, and the returned results from this function will be stored. @@ -56,8 +59,7 @@ def run( *, device, ) -> list: - data_seed = random.randint(0, 20000000) - torch.manual_seed(data_seed) + torch.manual_seed(0) torch_grad_tensor = gen_func_with_cast_tt(partial(torch_random, low=-10, high=10, dtype=torch.float32), grad_dtype)( input_shape diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp index f8025bb997b0..e6e96b578ba5 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp @@ -957,7 +957,7 @@ void bind_unary_backward_opt(py::module& module, const unary_backward_operation_ template void bind_unary_backward( - py::module& module, const unary_backward_operation_t& operation, const std::string& description) { + py::module& module, const unary_backward_operation_t& operation, const std::string& description, const std::string& note = "") { auto doc = fmt::format( R"doc( {2} @@ -972,6 +972,9 @@ void bind_unary_backward( Returns: List of ttnn.Tensor: the output tensor. + Note: + {3} + Example: >>> grad_tensor = ttnn.to_device(ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16)), device=device) @@ -980,7 +983,8 @@ void bind_unary_backward( )doc", operation.base_name(), operation.python_fully_qualified_name(), - description); + description, + note); bind_registered_operation( module, @@ -1266,14 +1270,32 @@ void py_module(py::module& module) { ttnn::multigammaln_bw, R"doc(Performs backward operations for multigammaln on :attr:`input_tensor` with given :attr:`grad_tensor` and value of P is taken as 4. mvlgamma is refered as multigammaln. - Input value must be greater than 2.5f)doc"); + Input value must be greater than 2.5f)doc", + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_prod_bw(module, ttnn::prod_bw); detail::bind_unary_backward( module, ttnn::lgamma_bw, - R"doc(Performs backward operations for lgamma on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + R"doc(Performs backward operations for lgamma on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_optional( module, @@ -1284,17 +1306,44 @@ void py_module(py::module& module) { detail::bind_unary_backward( module, ttnn::hardsigmoid_bw, - R"doc(Performs backward operations for hardsigmoid on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + R"doc(Performs backward operations for hardsigmoid on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward( module, ttnn::cos_bw, - R"doc(Performs backward operations for cos on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + R"doc(Performs backward operations for cosine on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward( module, ttnn::acosh_bw, - R"doc(Performs backward operations for inverse cosine (acos) on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + R"doc(Performs backward operations for inverse hyperbolic cosine (acosh) on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module, @@ -1467,17 +1516,44 @@ void py_module(py::module& module) { detail::bind_unary_backward( module, ttnn::relu_bw, - R"doc(Performs backward operations for relu on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + R"doc(Performs backward operations for relu on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward( module, ttnn::logit_bw, - R"doc(Performs backward operations for logit on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + R"doc(Performs backward operations for logit on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward( module, ttnn::floor_bw, - R"doc(Performs backward operations for floor on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + R"doc(Performs backward operations for floor on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE, ROW MAJOR | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_float( module, @@ -1488,12 +1564,30 @@ void py_module(py::module& module) { detail::bind_unary_backward( module, ttnn::round_bw, - R"doc(Performs backward operations for round on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); + R"doc(Performs backward operations for round on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE, ROW MAJOR | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward( module, ttnn::log_bw, - R"doc(Performs backward operations for logarithm on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); + R"doc(Performs backward operations for logarithm on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", + R"doc(Supported dtypes, layouts, and ranks: + + +----------------------------+---------------------------------+-------------------+ + | Dtypes | Layouts | Ranks | + +----------------------------+---------------------------------+-------------------+ + | BFLOAT16 | TILE | 2, 3, 4 | + +----------------------------+---------------------------------+-------------------+ + + )doc"); detail::bind_unary_backward_op( module,