From 5ac97fb1394365368f96cd56c7a17b9dc49784f1 Mon Sep 17 00:00:00 2001 From: Dalar Vartanians Date: Mon, 25 Mar 2024 22:26:33 +0000 Subject: [PATCH] #5079: add unit tests for vanilla YOLOv4 --- .../ttnn/unit_tests/operations/test_conv2d.py | 106 ++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/tests/ttnn/unit_tests/operations/test_conv2d.py b/tests/ttnn/unit_tests/operations/test_conv2d.py index 9b57fb5b790..e6f02dccdf7 100644 --- a/tests/ttnn/unit_tests/operations/test_conv2d.py +++ b/tests/ttnn/unit_tests/operations/test_conv2d.py @@ -1632,3 +1632,109 @@ def test_yolov4_conv_groups_1_low_resolution( padded_input_channels=16 if input_channels == 3 else None, output_layout=output_layout, ) + + +@skip_for_wormhole_b0() +@pytest.mark.parametrize( + "batch_size, output_channels, input_channels, input_height, input_width, filter_height, filter_width, stride_h, stride_w, pad_h, pad_w, use_1d_systolic_array, config_override, use_shallow_conv_variant", + ( + (1, 64, 64, 80, 80, 3, 3, 1, 1, 1, 1, True, None, False), + (1, 255, 512, 20, 20, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 64, 128, 80, 80, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 32, 3, 320, 320, 3, 3, 1, 1, 1, 1, True, None, False), + (1, 128, 128, 80, 80, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 128, 256, 20, 20, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 512, 256, 20, 20, 3, 3, 1, 1, 1, 1, True, None, False), + (1, 64, 32, 320, 320, 3, 3, 2, 2, 1, 1, True, None, False), + (1, 256, 512, 20, 20, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 64, 64, 80, 80, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 256, 128, 40, 40, 3, 3, 1, 1, 1, 1, True, None, False), + (1, 512, 1024, 10, 10, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 512, 256, 40, 40, 3, 3, 2, 2, 1, 1, True, None, False), + (1, 64, 128, 160, 160, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 256, 512, 10, 10, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 256, 128, 40, 40, 3, 3, 2, 2, 1, 1, True, None, False), + (1, 64, 64, 160, 160, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 512, 512, 20, 20, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 512, 256, 20, 20, 3, 3, 2, 2, 1, 1, True, None, False), + (1, 128, 256, 40, 40, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 255, 256, 40, 40, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 512, 2048, 10, 10, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 512, 512, 10, 10, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 64, 32, 160, 160, 3, 3, 1, 1, 1, 1, True, None, False), + (1, 128, 128, 40, 40, 3, 3, 1, 1, 1, 1, True, None, False), + (1, 32, 64, 160, 160, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 256, 128, 80, 80, 3, 3, 2, 2, 1, 1, True, None, False), + (1, 256, 256, 20, 20, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 256, 256, 20, 20, 3, 3, 1, 1, 1, 1, True, None, False), + (1, 512, 512, 10, 10, 3, 3, 1, 1, 1, 1, True, None, False), + (1, 1024, 512, 10, 10, 3, 3, 1, 1, 1, 1, True, None, False), + (1, 128, 64, 160, 160, 3, 3, 2, 2, 1, 1, True, None, False), + (1, 255, 1024, 10, 10, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 1024, 1024, 10, 10, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 1024, 512, 20, 20, 3, 3, 2, 2, 1, 1, True, None, False), + (1, 256, 256, 40, 40, 1, 1, 1, 1, 0, 0, True, None, False), + (1, 128, 128, 40, 40, 1, 1, 1, 1, 0, 0, True, None, False), + ), +) +@pytest.mark.parametrize( + "weights_dtype", + [ttnn.bfloat8_b], +) +@pytest.mark.parametrize( + "activations_dtype", + # [ttnn.bfloat8_b, ttnn.bfloat16], + [ttnn.bfloat8_b], +) +@pytest.mark.parametrize("math_fidelity", [ttnn.MathFidelity.LoFi]) +# @pytest.mark.parametrize("output_layout", [ttnn.ROW_MAJOR_LAYOUT, ttnn.TILE_LAYOUT]) +@pytest.mark.parametrize("output_layout", [ttnn.TILE_LAYOUT]) +def test_yolov4_REPO_320_320( + device, + use_program_cache, + math_fidelity, + activations_dtype, + weights_dtype, + batch_size, + output_channels, + input_channels, + input_height, + input_width, + filter_height, + filter_width, + stride_h, + stride_w, + pad_h, + pad_w, + use_1d_systolic_array, + config_override, + use_shallow_conv_variant, + output_layout, +): + if output_layout == ttnn.ROW_MAJOR_LAYOUT and activations_dtype == ttnn.bfloat8_b: + pytest.skip("Row major layout not compatible with bfloat8_b") + if output_layout == ttnn.ROW_MAJOR_LAYOUT and input_height >= 1056: + pytest.skip("OOM") + run_conv( + device, + math_fidelity, + activations_dtype, + weights_dtype, + batch_size, + output_channels, + input_channels, + input_height, + input_width, + filter_height, + filter_width, + stride_h, + stride_w, + pad_h, + pad_w, + use_1d_systolic_array, + config_override, + use_shallow_conv_variant=use_shallow_conv_variant, + # groups=groups, + padded_input_channels=16 if input_channels == 3 else None, + output_layout=output_layout, + )