Skip to content

Commit

Permalink
#0: add tests and remove asserts to enable greater RM slice support
Browse files Browse the repository at this point in the history
  • Loading branch information
sjameelTT committed Nov 22, 2024
1 parent e612654 commit 56c4420
Show file tree
Hide file tree
Showing 3 changed files with 32 additions and 32 deletions.
36 changes: 31 additions & 5 deletions tests/ttnn/unit_tests/operations/test_slice.py
Original file line number Diff line number Diff line change
Expand Up @@ -752,8 +752,37 @@ def test_slice_adversarial_fixed(input_shape, dim, start, end, step, layout, dev
@pytest.mark.parametrize(
"input_shape, dim, start, end, step, layout",
(
([8732, 4], 1, 0, -1, 4, ttnn.TILE_LAYOUT), # Need tensor for this or a padding aware tiled kernel
([1, 7], 0, 0, -1, 1, ttnn.ROW_MAJOR_LAYOUT), # page size must equal buffer size
([1, 8, 2, 2], 2, -1, -1, 1, ttnn.TILE_LAYOUT), # Buffer size and page size should be larger than 0 bytes
([3], 0, 0, -1, 1, ttnn.TILE_LAYOUT), # Difference in expected shape as it's a 1D tensor
),
)
def test_slice_adversarial(input_shape, dim, start, end, step, layout, device):
pytest.skip("These tests are known to fail")
torch_input = torch.randn(input_shape, dtype=torch.bfloat16)

slice_obj = slice(start, end, step)

# Prepare indices for slicing in the specified dimension
indices = [slice(None)] * len(input_shape) # By default, select all elements along every dimension
indices[dim] = slice_obj # Apply slicing to the target dimension
indices = tuple(indices)

# Apply slicing to the input_tensor
torch_output_tensor = torch_input[indices]

ttnn_tensor = ttnn.from_torch(torch_input, device=device, layout=layout, dtype=ttnn.bfloat16)
ttnn_output = ttnn_tensor[indices]

ttnn_output_tensor = ttnn.to_torch(ttnn_output)

assert_with_pcc(torch_output_tensor, ttnn_output_tensor, 0.999)


@pytest.mark.parametrize(
"input_shape, dim, start, end, step, layout",
(
([8732, 4], 1, 0, -1, 4, ttnn.TILE_LAYOUT), # Need tensor for this or a padding aware tiled kernel
(
[1, 7, 71, 64],
3,
Expand All @@ -762,12 +791,9 @@ def test_slice_adversarial_fixed(input_shape, dim, start, end, step, layout, dev
1,
ttnn.ROW_MAJOR_LAYOUT,
), # An unpadding slice operations for a RowMajor layout on the output tensor requires the last dimension to be on a 32 bit boundary
([1, 8, 2, 2], 2, -1, -1, 1, ttnn.TILE_LAYOUT), # Buffer size and page size should be larger than 0 bytes
([3], 0, 0, -1, 1, ttnn.TILE_LAYOUT), # Difference in expected shape as it's a 1D tensor
),
)
def test_slice_adversarial(input_shape, dim, start, end, step, layout, device):
pytest.skip("These tests are expected to fail at the moment")
def test_slice_adversarial_fixed(input_shape, dim, start, end, step, layout, device):
torch_input = torch.randn(input_shape, dtype=torch.bfloat16)

slice_obj = slice(start, end, step)
Expand Down
21 changes: 1 addition & 20 deletions tests/ttnn/unit_tests/test_to_layout.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import ttnn

from tests.ttnn.utils_for_testing import assert_with_pcc, check_with_pcc_without_tensor_printout
from models.utility_functions import is_grayskull, is_blackhole, torch_random
from models.utility_functions import is_grayskull, is_blackhole, torch_random, skip_for_grayskull


@pytest.mark.parametrize("height", [32, 30])
Expand Down Expand Up @@ -140,22 +140,3 @@ def test_to_layout_device(device, h, w, input_layout, output_layout):
torch_brought_back = ttnn.to_torch(new_layout_tensor)

assert_with_pcc(torch_input_tensor, torch_brought_back)


@pytest.mark.parametrize("device_params", [{"l1_small_size": 16384}], indirect=True)
def test_to_layout_unet_shallow(device, use_program_cache):
torch_input = torch.rand([1, 1, 337920, 1])
input = ttnn.from_torch(torch_input, dtype=ttnn.bfloat16)

input = ttnn.to_layout(input, ttnn.TILE_LAYOUT)
input = ttnn.to_device(input, device)

sharded_memory_config = ttnn.create_sharded_memory_config(
[1, 1, 337920, 32], ttnn.CoreGrid(x=8, y=8), ttnn.ShardStrategy.HEIGHT
)
input = ttnn.to_memory_config(input, sharded_memory_config)
input = ttnn.to_memory_config(input, ttnn.L1_MEMORY_CONFIG)

input = ttnn.to_layout(input, ttnn.ROW_MAJOR_LAYOUT) # This fails
torch_output = ttnn.to_torch(input)
assert_with_pcc(torch_input, torch_output)
Original file line number Diff line number Diff line change
Expand Up @@ -102,18 +102,11 @@ void SliceDeviceOperation::validate_with_output_tensors(
(output_tensor_shape[-1] % TILE_WIDTH == 0) && (this->slice_start[-1] % TILE_WIDTH == 0),
"Can only unpad tilized tensor with full tiles");
} else if (input_tensor_a.get_layout() == Layout::ROW_MAJOR) {
TT_FATAL(
(output_tensor_shape[-1] * input_tensor_a.element_size() % sizeof(uint32_t) == 0),
"An unpadding slice operations for a RowMajor layout on the output tensor requires the last dimension to be on a 32 bit boundary. For example, the final dimension needs to be divisible by 2 for bfloat16. The resulting tensor shape is {}, which is not 4B aligned as the last dimension is {}",
output_tensor_shape[-1], input_tensor_a.element_size());
if (has_step) {
for (uint32_t i = 0; i < input_tensor_a.get_legacy_shape().rank(); i++) {
TT_FATAL(step[i] > 0, "Step({}) = {} should be positive", i, step[i]);
}
}
else {
TT_FATAL(this->slice_start[-1] * input_tensor_a.element_size() % sizeof(uint32_t) == 0, "Slice needs to start at an aligned position");
}
}
}

Expand Down

0 comments on commit 56c4420

Please sign in to comment.