diff --git a/.github/workflows/ttnn-run-sweeps.yaml b/.github/workflows/ttnn-run-sweeps.yaml index 7668377bf1e3..49a403ecc2c6 100644 --- a/.github/workflows/ttnn-run-sweeps.yaml +++ b/.github/workflows/ttnn-run-sweeps.yaml @@ -18,6 +18,8 @@ on: - eltwise.unary.cos.cos - eltwise.unary.sin.sin - eltwise.unary.clamp.clamp + - eltwise.unary.clamp.clamp_pytorch2 + - eltwise.unary.clamp_min.clamp_min_pytorch2 - eltwise.unary.clip.clip - eltwise.unary.cbrt.cbrt - eltwise.unary.rsub.rsub @@ -47,6 +49,8 @@ on: - eltwise.unary.bitwise.bitwise_xor - eltwise.unary.log_sigmoid.log_sigmoid - eltwise.unary.logical_not_.logical_not_ + - eltwise.unary.logical_not.logical_not_pytorch2 + - eltwise.unary.neg.neg_pytorch2 - eltwise.unary.erf.erf - eltwise.unary.erfinv.erfinv - eltwise.unary.i0.i0 @@ -55,6 +59,8 @@ on: - eltwise.unary.lgamma.lgamma - eltwise.unary.sigmoid.sigmoid - eltwise.unary.sigmoid_accurate.sigmoid_accurate + - eltwise.unary.hardswish.hardswish_pytorch2 + - eltwise.unary.hardtanh.hardtanh_pytorch2 - eltwise.binary.subtract.subtract - eltwise.binary.multiply.multiply - eltwise.binary.div.div diff --git a/tests/sweep_framework/sweeps/eltwise/unary/clamp/clamp_pytorch2.py b/tests/sweep_framework/sweeps/eltwise/unary/clamp/clamp_pytorch2.py new file mode 100644 index 000000000000..65b45606c2fe --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/unary/clamp/clamp_pytorch2.py @@ -0,0 +1,122 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import random +import ttnn +from tests.sweep_framework.utils import gen_shapes, gen_low_high_scalars +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + +# Override the default timeout in seconds for hang detection. +TIMEOUT = 30 + +random.seed(0) + +parameters = { + "clamp_1": { + "input_specs": [ + {"shape": [0, 1], "max": 4.135166556742356}, + {"shape": [0, 2], "min": 0, "max": 1066}, + {"shape": [0, 2], "min": 0, "max": 800}, + {"shape": [1, 1, 1, 42], "min": 0, "max": 82}, + {"shape": [1, 1, 32, 1], "min": 0, "max": 49}, + {"shape": [1066], "min": 0.0}, + {"shape": [1066], "max": 639}, + {"shape": [12, 1, 1], "max": 4.605170185988092}, + {"shape": [120], "min": 0.0}, + {"shape": [120], "max": 59}, + {"shape": [128], "min": 0.0}, + {"shape": [128], "max": 127}, + {"shape": [128], "max": 15}, + {"shape": [128], "max": 31}, + {"shape": [128], "max": 63}, + {"shape": [16, 1, 1], "max": 4.605170185988092}, + {"shape": [160], "min": 0.0}, + {"shape": [160], "max": 79}, + {"shape": [24, 1, 1], "max": 4.605170185988092}, + {"shape": [240], "min": 0.0}, + {"shape": [240], "max": 119}, + {"shape": [3, 1, 1], "max": 4.605170185988092}, + {"shape": [300], "min": 0.0}, + {"shape": [300], "max": 479}, + {"shape": [300], "max": 639}, + {"shape": [30], "min": 0.0}, + {"shape": [30], "max": 14}, + {"shape": [32, 1, 1], "max": 4.605170185988092}, + {"shape": [320], "min": 0.0}, + {"shape": [320], "max": 159}, + {"shape": [320], "max": 319}, + {"shape": [320], "max": 479}, + {"shape": [320], "max": 639}, + {"shape": [3234, 1], "max": 4.135166556742356}, + {"shape": [3234, 2], "min": 0, "max": 320}, + {"shape": [4, 1, 1], "max": 4.605170185988092}, + {"shape": [4, 2], "min": 0, "max": 1}, + {"shape": [40], "min": 0.0}, + {"shape": [40], "max": 19}, + {"shape": [480], "min": 0.0}, + {"shape": [480], "max": 239}, + {"shape": [6, 1, 1], "max": 4.605170185988092}, + {"shape": [6, 2], "min": 0, "max": 1}, + {"shape": [60], "min": 0.0}, + {"shape": [60], "max": 29}, + {"shape": [640], "min": 0.0}, + {"shape": [640], "max": 319}, + {"shape": [8, 1, 1], "max": 4.605170185988092}, + {"shape": [800], "min": 0.0}, + {"shape": [800], "max": 479}, + {"shape": [80], "min": 0.0}, + {"shape": [80], "max": 39}, + {"shape": [8732, 1], "max": 4.135166556742356}, + {"shape": [8732, 2], "min": 0, "max": 300}, + ], + "input_a_dtype": [ttnn.bfloat16], + "input_a_layout": [ttnn.TILE_LAYOUT], + "input_a_memory_config": [ttnn.DRAM_MEMORY_CONFIG, ttnn.L1_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG, ttnn.L1_MEMORY_CONFIG], + }, +} + + +def run( + input_specs, + input_a_dtype, + input_a_layout, + input_a_memory_config, + output_memory_config, + *, + device, +) -> list: + data_seed = random.randint(0, 20000000) + torch.manual_seed(data_seed) + + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_a_dtype + )(input_specs["shape"]) + + min_val = input_specs.get("min", None) + max_val = input_specs.get("max", None) + + torch_output_tensor = torch.clamp(torch_input_tensor_a, min_val, max_val) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_a_dtype, + layout=input_a_layout, + device=device, + memory_config=input_a_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.clamp(input_tensor_a, min_val, max_val, memory_config=output_memory_config) + output_tensor = ttnn.to_torch(result) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf] diff --git a/tests/sweep_framework/sweeps/eltwise/unary/clamp_min/clamp_min_pytorch2.py b/tests/sweep_framework/sweeps/eltwise/unary/clamp_min/clamp_min_pytorch2.py new file mode 100644 index 000000000000..27074b5a3ea0 --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/unary/clamp_min/clamp_min_pytorch2.py @@ -0,0 +1,75 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import random +import ttnn +from tests.sweep_framework.utils import gen_shapes, gen_low_high_scalars +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + +# Override the default timeout in seconds for hang detection. +TIMEOUT = 30 + +random.seed(0) + +parameters = { + "clamp_min_1": { + "input_specs": [ + {"shape": [1, 1, 38, 38], "min": 1e-12}, + {"shape": [1, 1], "min": 1e-12}, + {"shape": [1, 24, 64, 1], "min": 1e-12}, + {"shape": [1, 32, 64, 1], "min": 1e-12}, + {"shape": [16, 6, 64, 1], "min": 1e-12}, + {"shape": [16, 8, 64, 1], "min": 1e-12}, + {"shape": [4, 12, 64, 1], "min": 1e-12}, + {"shape": [4, 16, 64, 1], "min": 1e-12}, + {"shape": [64, 3, 64, 1], "min": 1e-12}, + {"shape": [64, 4, 64, 1], "min": 1e-12}, + ], + "input_a_dtype": [ttnn.bfloat16], + "input_a_layout": [ttnn.TILE_LAYOUT], + "input_a_memory_config": [ttnn.DRAM_MEMORY_CONFIG, ttnn.L1_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG, ttnn.L1_MEMORY_CONFIG], + }, +} + + +def run( + input_specs, + input_a_dtype, + input_a_layout, + input_a_memory_config, + output_memory_config, + *, + device, +) -> list: + data_seed = random.randint(0, 20000000) + torch.manual_seed(data_seed) + + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_a_dtype + )(input_specs["shape"]) + + torch_output_tensor = torch.clamp(torch_input_tensor_a, input_specs["min"]) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_a_dtype, + layout=input_a_layout, + device=device, + memory_config=input_a_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.clamp(input_tensor_a, input_specs["min"], memory_config=output_memory_config) + output_tensor = ttnn.to_torch(result) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf] diff --git a/tests/sweep_framework/sweeps/eltwise/unary/hardswish/hardswish_pytorch2.py b/tests/sweep_framework/sweeps/eltwise/unary/hardswish/hardswish_pytorch2.py new file mode 100644 index 000000000000..25ffb0b1bf03 --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/unary/hardswish/hardswish_pytorch2.py @@ -0,0 +1,92 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import random +import ttnn +from tests.sweep_framework.utils import gen_shapes, gen_low_high_scalars +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + +# Override the default timeout in seconds for hang detection. +TIMEOUT = 30 + +random.seed(0) + +parameters = { + "hardswish_1": { + "input_shape": [ + [1, 1024], + [1, 120, 14, 14], + [1, 1280], + [1, 144, 14, 14], + [1, 16, 112, 112], + [1, 16, 160, 160], + [1, 184, 14, 14], + [1, 184, 20, 20], + [1, 200, 14, 14], + [1, 200, 20, 20], + [1, 240, 14, 14], + [1, 240, 20, 20], + [1, 240, 28, 28], + [1, 240, 40, 40], + [1, 288, 14, 14], + [1, 288, 7, 7], + [1, 480, 10, 10], + [1, 480, 14, 14], + [1, 480, 20, 20], + [1, 576, 7, 7], + [1, 672, 10, 10], + [1, 672, 14, 14], + [1, 672, 20, 20], + [1, 672, 7, 7], + [1, 96, 14, 14], + [1, 96, 28, 28], + [1, 960, 7, 7], + ], + "input_dtype": [ttnn.bfloat16, ttnn.bfloat8_b], + "input_layout": [ttnn.TILE_LAYOUT, ttnn.ROW_MAJOR_LAYOUT], + "input_memory_config": [ttnn.DRAM_MEMORY_CONFIG, ttnn.L1_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG, ttnn.L1_MEMORY_CONFIG], + }, +} + + +def run( + input_shape, + input_dtype, + input_layout, + input_memory_config, + output_memory_config, + *, + device, +) -> list: + data_seed = random.randint(0, 20000000) + torch.manual_seed(data_seed) + + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_dtype + )(input_shape) + + torch_output_tensor = torch.nn.functional.hardswish(torch_input_tensor_a) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_dtype, + layout=input_layout, + device=device, + memory_config=input_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.hardswish(input_tensor_a, memory_config=output_memory_config) + output_tensor = ttnn.to_torch(result) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf] diff --git a/tests/sweep_framework/sweeps/eltwise/unary/hardtanh/hardtanh_pytorch2.py b/tests/sweep_framework/sweeps/eltwise/unary/hardtanh/hardtanh_pytorch2.py new file mode 100644 index 000000000000..d175273a97c2 --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/unary/hardtanh/hardtanh_pytorch2.py @@ -0,0 +1,181 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import random +import ttnn +from tests.sweep_framework.utils import gen_shapes, gen_low_high_scalars +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + +# Override the default timeout in seconds for hang detection. +TIMEOUT = 30 + +random.seed(0) + +parameters = { + "hardswish_1": { + "input_shape": [ + {"shape": [1, 1024, 7, 7], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 1152, 7, 7], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 1152, 8, 8], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 116, 14, 14], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 1248, 9, 9], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 128, 1, 1], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 128, 2, 2], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 128, 28, 28], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 128, 3, 3], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 128, 5, 5], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 128, 56, 56], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 1280, 10, 10], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 1280, 12, 12], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 1280, 7, 7], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 1280, 8, 8], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 1280, 9, 9], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 134, 28, 28], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 1392, 10, 10], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 14, 56, 56], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 144, 150, 150], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 144, 190, 190], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 144, 28, 28], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 144, 30, 30], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 144, 33, 33], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 144, 56, 56], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 144, 60, 60], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 144, 65, 65], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 144, 75, 75], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 144, 95, 95], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 16, 28, 28], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 160, 7, 7], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 1632, 12, 12], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 168, 28, 28], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 192, 14, 14], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 192, 28, 28], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 192, 38, 38], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 192, 48, 48], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 192, 75, 75], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 192, 95, 95], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 196, 14, 14], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 20, 28, 28], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 24, 56, 56], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 240, 14, 14], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 240, 15, 15], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 240, 28, 28], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 240, 30, 30], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 256, 10, 10], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 256, 14, 14], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 256, 2, 2], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 256, 28, 28], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 256, 3, 3], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 256, 5, 5], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 272, 7, 7], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 28, 28, 28], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 288, 17, 17], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 288, 19, 19], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 288, 33, 33], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 288, 38, 38], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 32, 112, 112], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 32, 120, 120], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 32, 130, 130], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 32, 150, 150], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 32, 190, 190], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 320, 28, 28], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 334, 14, 14], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 336, 24, 24], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 336, 48, 48], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 34, 28, 28], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 384, 14, 14], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 40, 14, 14], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 40, 56, 56], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 46, 28, 28], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 462, 7, 7], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 480, 10, 10], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 480, 14, 14], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 480, 15, 15], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 512, 14, 14], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 512, 5, 5], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 512, 7, 7], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 528, 17, 17], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 576, 14, 14], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 576, 19, 19], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 576, 7, 7], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 58, 28, 28], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 64, 1, 1], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 64, 112, 112], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 64, 2, 2], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 64, 56, 56], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 640, 14, 14], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 672, 14, 14], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 672, 15, 15], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 672, 20, 20], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 672, 24, 24], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 672, 7, 7], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 672, 8, 8], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 68, 14, 14], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 68, 56, 56], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 720, 17, 17], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 720, 9, 9], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 78, 28, 28], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 816, 10, 10], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 816, 19, 19], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 96, 112, 112], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 96, 120, 120], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 96, 130, 130], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 96, 56, 56], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 96, 60, 60], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 96, 65, 65], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 960, 12, 12], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 960, 24, 24], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 960, 7, 7], "min_val": 0.0, "max_val": 6.0}, + {"shape": [1, 98, 28, 28], "min_val": 0.0, "max_val": 6.0}, + ], + "input_dtype": [ttnn.bfloat16, ttnn.bfloat8_b], + "input_layout": [ttnn.TILE_LAYOUT, ttnn.ROW_MAJOR_LAYOUT], + "input_memory_config": [ttnn.DRAM_MEMORY_CONFIG, ttnn.L1_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG, ttnn.L1_MEMORY_CONFIG], + }, +} + + +def run( + input_specs, + input_dtype, + input_layout, + input_memory_config, + output_memory_config, + *, + device, +) -> list: + data_seed = random.randint(0, 20000000) + torch.manual_seed(data_seed) + + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_dtype + )((input_specs["shape"])) + + torch_output_tensor = torch.nn.functional.hardtanh( + torch_input_tensor_a, input_specs["min_val"], input_specs["max_val"] + ) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_dtype, + layout=input_layout, + device=device, + memory_config=input_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.hardtanh( + input_tensor_a, input_specs["min_val"], input_specs["max_val"], memory_config=output_memory_config + ) + output_tensor = ttnn.to_torch(result) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf] diff --git a/tests/sweep_framework/sweeps/eltwise/unary/logical_not/logical_not_pytorch2.py b/tests/sweep_framework/sweeps/eltwise/unary/logical_not/logical_not_pytorch2.py new file mode 100644 index 000000000000..a0f1a4c8e7bb --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/unary/logical_not/logical_not_pytorch2.py @@ -0,0 +1,66 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import random +import ttnn +from tests.sweep_framework.utils import gen_shapes +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + +# Override the default timeout in seconds for hang detection. +TIMEOUT = 30 + +random.seed(0) + +parameters = { + "nightly": { + "input_shape": [ + [7, 7], + ], + "input_dtype": [ttnn.bfloat16, ttnn.bfloat8_b], + "input_layout": [ttnn.TILE_LAYOUT], + "input_memory_config": [ttnn.DRAM_MEMORY_CONFIG, ttnn.L1_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG, ttnn.L1_MEMORY_CONFIG], + }, +} + + +def run( + input_shape, + input_dtype, + input_layout, + input_memory_config, + output_memory_config, + *, + device, +) -> list: + data_seed = random.randint(0, 20000000) + torch.manual_seed(data_seed) + + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_dtype + )(input_shape) + + torch_output_tensor = torch.logical_not(torch_input_tensor_a) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_dtype, + layout=input_layout, + device=device, + memory_config=input_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.logical_not(input_tensor_a, memory_config=output_memory_config) + output_tensor = ttnn.to_torch(result) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf] diff --git a/tests/sweep_framework/sweeps/eltwise/unary/neg/neg_pytorch2.py b/tests/sweep_framework/sweeps/eltwise/unary/neg/neg_pytorch2.py new file mode 100644 index 000000000000..db01f4761f7c --- /dev/null +++ b/tests/sweep_framework/sweeps/eltwise/unary/neg/neg_pytorch2.py @@ -0,0 +1,73 @@ +# SPDX-FileCopyrightText: © 2024 Tenstorrent Inc. + +# SPDX-License-Identifier: Apache-2.0 + +from typing import Optional, Tuple +from functools import partial + +import torch +import random +import ttnn +from tests.sweep_framework.utils import gen_shapes +from tests.tt_eager.python_api_testing.sweep_tests.generation_funcs import gen_func_with_cast_tt + +from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time +from models.utility_functions import torch_random + +# Override the default timeout in seconds for hang detection. +TIMEOUT = 30 + +random.seed(0) + +parameters = { + "nightly": { + "input_shape": [ + [1, 1, 16, 16], + [1, 1, 7, 32], + [1, 1], + [1, 5, 16, 16], + [1, 71, 7, 32], + [17, 17], + [2, 2], + # [s0 + 1, s0 + 1] + ], + "input_dtype": [ttnn.bfloat16, ttnn.bfloat8_b], + "input_layout": [ttnn.TILE_LAYOUT], + "input_memory_config": [ttnn.DRAM_MEMORY_CONFIG, ttnn.L1_MEMORY_CONFIG], + "output_memory_config": [ttnn.DRAM_MEMORY_CONFIG, ttnn.L1_MEMORY_CONFIG], + }, +} + + +def run( + input_shape, + input_dtype, + input_layout, + input_memory_config, + output_memory_config, + *, + device, +) -> list: + data_seed = random.randint(0, 20000000) + torch.manual_seed(data_seed) + + torch_input_tensor_a = gen_func_with_cast_tt( + partial(torch_random, low=-100, high=100, dtype=torch.float32), input_dtype + )(input_shape) + + torch_output_tensor = torch.neg(torch_input_tensor_a) + + input_tensor_a = ttnn.from_torch( + torch_input_tensor_a, + dtype=input_dtype, + layout=input_layout, + device=device, + memory_config=input_memory_config, + ) + + start_time = start_measuring_time() + result = ttnn.neg(input_tensor_a, memory_config=output_memory_config) + output_tensor = ttnn.to_torch(result) + e2e_perf = stop_measuring_time(start_time) + + return [check_with_pcc(torch_output_tensor, output_tensor, 0.999), e2e_perf]