From 0d7630f8561f697649d9addc041c997c0d7f5593 Mon Sep 17 00:00:00 2001 From: umadevimcw Date: Thu, 14 Nov 2024 13:13:01 +0000 Subject: [PATCH] #13929: Update document and sweep test file --- .github/workflows/ttnn-run-sweeps.yaml | 2 +- .../eltwise/binary_backward/{ => ldexp_bw}/ldexp_bw.py | 9 +++------ .../eltwise/binary_backward/binary_backward_pybind.hpp | 9 ++++++--- 3 files changed, 10 insertions(+), 10 deletions(-) rename tests/sweep_framework/sweeps/eltwise/binary_backward/{ => ldexp_bw}/ldexp_bw.py (95%) diff --git a/.github/workflows/ttnn-run-sweeps.yaml b/.github/workflows/ttnn-run-sweeps.yaml index 92b69dae806d..325a4ce8a612 100644 --- a/.github/workflows/ttnn-run-sweeps.yaml +++ b/.github/workflows/ttnn-run-sweeps.yaml @@ -232,7 +232,7 @@ on: - eltwise.binary.ne.ne_scalar_pytorch2 - eltwise.binary.hypot.hypot - eltwise.binary.xlogy.xlogy - - eltwise.binary_backward.ldexp_bw + - eltwise.binary_backward.ldexp_bw.ldexp_bw - eltwise.binary_backward.logaddexp_bw - eltwise.binary_backward.logaddexp2_bw - eltwise.binary_backward.addalpha_bw.addalpha_bw diff --git a/tests/sweep_framework/sweeps/eltwise/binary_backward/ldexp_bw.py b/tests/sweep_framework/sweeps/eltwise/binary_backward/ldexp_bw/ldexp_bw.py similarity index 95% rename from tests/sweep_framework/sweeps/eltwise/binary_backward/ldexp_bw.py rename to tests/sweep_framework/sweeps/eltwise/binary_backward/ldexp_bw/ldexp_bw.py index bb76d248a189..c07d9818a30d 100644 --- a/tests/sweep_framework/sweeps/eltwise/binary_backward/ldexp_bw.py +++ b/tests/sweep_framework/sweeps/eltwise/binary_backward/ldexp_bw/ldexp_bw.py @@ -14,9 +14,6 @@ from tests.ttnn.utils_for_testing import check_with_pcc, start_measuring_time, stop_measuring_time from models.utility_functions import torch_random -# Override the default timeout in seconds for hang detection. -TIMEOUT = 30 - random.seed(0) @@ -25,7 +22,7 @@ # Each suite has a key name (in this case "suite_1" and "suite_2") which will associate the test vectors to this specific suite of inputs. # Developers can create their own generator functions and pass them to the parameters as inputs. parameters = { - "nightly": { + "ldx_exp": { "input_shape": gen_shapes([1, 1, 1, 1], [6, 12, 256, 256], [1, 1, 1, 1], 16) + gen_shapes([1, 1, 1], [12, 256, 256], [1, 1, 1], 16) + gen_shapes([1, 1], [256, 256], [1, 1], 16), @@ -74,13 +71,13 @@ def run( ) torch_input_tensor_a = gen_func_with_cast_tt( - partial(torch_random, low=-90, high=90, dtype=torch.float32), input_a_dtype + partial(torch_random, low=-80, high=80, dtype=torch.float32), input_a_dtype )(input_shape) torch_input_tensor_a.requires_grad = True torch_input_tensor_a.retain_grad() torch_input_tensor_b = gen_func_with_cast_tt( - partial(torch_random, low=-90, high=90, dtype=torch.float32), input_b_dtype + partial(torch_random, low=-80, high=80, dtype=torch.float32), input_b_dtype )(input_shape) torch_input_tensor_b.requires_grad = True torch_input_tensor_b.retain_grad() diff --git a/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp b/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp index b99897762e76..c86a12eb2c7f 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/binary_backward/binary_backward_pybind.hpp @@ -22,7 +22,7 @@ namespace binary_backward { namespace detail { template -void bind_binary_backward_ops(py::module& module, const binary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "BFLOAT16") { +void bind_binary_backward_ops(py::module& module, const binary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "BFLOAT16", const std::string_view note = "") { auto doc = fmt::format( R"doc( {2} @@ -53,6 +53,8 @@ void bind_binary_backward_ops(py::module& module, const binary_backward_operatio bfloat8_b/bfloat4_b is only supported on TILE_LAYOUT + {4} + Example: >>> grad_tensor = ttnn.to_device(ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16)), device=device) >>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16)), device=device) @@ -65,7 +67,8 @@ void bind_binary_backward_ops(py::module& module, const binary_backward_operatio operation.base_name(), operation.python_fully_qualified_name(), description, - supported_dtype); + supported_dtype, + note); bind_registered_operation( module, @@ -1110,7 +1113,7 @@ void py_module(py::module& module) { module, ttnn::ldexp_bw, R"doc(Performs backward operations for ldexp of :attr:`input_tensor_a` and :attr:`input_tensor_b` with given :attr:`grad_tensor`.)doc", - R"doc(BFLOAT16)doc"); + R"doc(BFLOAT16)doc", R"doc(Recommended input range : [-80, 80]. Performance of the PCC may degrade if the input falls outside this range.)doc"); detail::bind_binary_backward_ops(