Skip to content

Commit

Permalink
#0: add tests and remove asserts to enable greater RM slice support
Browse files Browse the repository at this point in the history
  • Loading branch information
sjameelTT committed Nov 22, 2024
1 parent e612654 commit 6e5ef31
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 12 deletions.
36 changes: 31 additions & 5 deletions tests/ttnn/unit_tests/operations/test_slice.py
Original file line number Diff line number Diff line change
Expand Up @@ -752,8 +752,37 @@ def test_slice_adversarial_fixed(input_shape, dim, start, end, step, layout, dev
@pytest.mark.parametrize(
"input_shape, dim, start, end, step, layout",
(
([8732, 4], 1, 0, -1, 4, ttnn.TILE_LAYOUT), # Need tensor for this or a padding aware tiled kernel
([1, 7], 0, 0, -1, 1, ttnn.ROW_MAJOR_LAYOUT), # page size must equal buffer size
([1, 8, 2, 2], 2, -1, -1, 1, ttnn.TILE_LAYOUT), # Buffer size and page size should be larger than 0 bytes
([3], 0, 0, -1, 1, ttnn.TILE_LAYOUT), # Difference in expected shape as it's a 1D tensor
),
)
def test_slice_adversarial(input_shape, dim, start, end, step, layout, device):
pytest.skip("These tests are known to fail")
torch_input = torch.randn(input_shape, dtype=torch.bfloat16)

slice_obj = slice(start, end, step)

# Prepare indices for slicing in the specified dimension
indices = [slice(None)] * len(input_shape) # By default, select all elements along every dimension
indices[dim] = slice_obj # Apply slicing to the target dimension
indices = tuple(indices)

# Apply slicing to the input_tensor
torch_output_tensor = torch_input[indices]

ttnn_tensor = ttnn.from_torch(torch_input, device=device, layout=layout, dtype=ttnn.bfloat16)
ttnn_output = ttnn_tensor[indices]

ttnn_output_tensor = ttnn.to_torch(ttnn_output)

assert_with_pcc(torch_output_tensor, ttnn_output_tensor, 0.999)


@pytest.mark.parametrize(
"input_shape, dim, start, end, step, layout",
(
([8732, 4], 1, 0, -1, 4, ttnn.TILE_LAYOUT), # Need tensor for this or a padding aware tiled kernel
(
[1, 7, 71, 64],
3,
Expand All @@ -762,12 +791,9 @@ def test_slice_adversarial_fixed(input_shape, dim, start, end, step, layout, dev
1,
ttnn.ROW_MAJOR_LAYOUT,
), # An unpadding slice operations for a RowMajor layout on the output tensor requires the last dimension to be on a 32 bit boundary
([1, 8, 2, 2], 2, -1, -1, 1, ttnn.TILE_LAYOUT), # Buffer size and page size should be larger than 0 bytes
([3], 0, 0, -1, 1, ttnn.TILE_LAYOUT), # Difference in expected shape as it's a 1D tensor
),
)
def test_slice_adversarial(input_shape, dim, start, end, step, layout, device):
pytest.skip("These tests are expected to fail at the moment")
def test_slice_adversarial_fixed(input_shape, dim, start, end, step, layout, device):
torch_input = torch.randn(input_shape, dtype=torch.bfloat16)

slice_obj = slice(start, end, step)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,18 +102,11 @@ void SliceDeviceOperation::validate_with_output_tensors(
(output_tensor_shape[-1] % TILE_WIDTH == 0) && (this->slice_start[-1] % TILE_WIDTH == 0),
"Can only unpad tilized tensor with full tiles");
} else if (input_tensor_a.get_layout() == Layout::ROW_MAJOR) {
TT_FATAL(
(output_tensor_shape[-1] * input_tensor_a.element_size() % sizeof(uint32_t) == 0),
"An unpadding slice operations for a RowMajor layout on the output tensor requires the last dimension to be on a 32 bit boundary. For example, the final dimension needs to be divisible by 2 for bfloat16. The resulting tensor shape is {}, which is not 4B aligned as the last dimension is {}",
output_tensor_shape[-1], input_tensor_a.element_size());
if (has_step) {
for (uint32_t i = 0; i < input_tensor_a.get_legacy_shape().rank(); i++) {
TT_FATAL(step[i] > 0, "Step({}) = {} should be positive", i, step[i]);
}
}
else {
TT_FATAL(this->slice_start[-1] * input_tensor_a.element_size() % sizeof(uint32_t) == 0, "Slice needs to start at an aligned position");
}
}
}

Expand Down

0 comments on commit 6e5ef31

Please sign in to comment.