Skip to content

Commit

Permalink
#4003: Setting __all__ = [] to block whild card imports
Browse files Browse the repository at this point in the history
  • Loading branch information
eyonland committed Jan 20, 2024
1 parent 0202082 commit e643fe4
Show file tree
Hide file tree
Showing 9 changed files with 17 additions and 44 deletions.
3 changes: 2 additions & 1 deletion ttnn/binary.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ def binary_function(
binary_function.__doc__ = doc

setattr(THIS_MODULE, name, binary_function)
__all__.append(name)


TTL_BINARY_FUNCTIONS = [
Expand Down Expand Up @@ -79,3 +78,5 @@ def binary_function(

for binary_function_name, ttl_binary_function, doc in TTL_BINARY_FUNCTIONS:
register_ttl_binary_function(binary_function_name, ttl_binary_function, doc)

__all__ = []
3 changes: 3 additions & 0 deletions ttnn/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,3 +112,6 @@ def copy_input_to_device(self, input: ttnn.Tensor):

def copy_output_from_device(self, output: ttnn.Tensor):
return ttnn.Tensor(self.conv.copy_output_from_device(output.value))


__all__ = []
14 changes: 1 addition & 13 deletions ttnn/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -1358,16 +1358,4 @@ def mean(input_tensor: ttnn.Tensor, dim: Union[int, Tuple[int]], keepdim: bool =
return output_tensor


__all__ = [
"matmul",
"add",
"sub",
"subtract",
"mul",
"multiply",
"embedding",
"softmax",
"mean",
"pad_to_tile",
"unpad_from_tile",
]
__all__ = []
2 changes: 1 addition & 1 deletion ttnn/data_movement.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,4 +271,4 @@ def repeat_interleave(tensor: ttnn.Tensor, repeats: Union[ttnn.Tensor, int], dim
return ttnn.from_torch(output_tensor, dtype=dtype, device=device, layout=layout)


__all__ = ["pad", "reshape", "permute", "concat", "split", "repeat_interleave"]
__all__ = []
3 changes: 3 additions & 0 deletions ttnn/model_preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,3 +310,6 @@ def convert_to_ttnn(model, full_name):
logger.info(f"Moved model weights to device")

return parameters


__all__ = []
6 changes: 1 addition & 5 deletions ttnn/normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,4 @@ def group_norm(
return ttnn.from_torch(output, dtype=input_tensor.dtype, layout=input_tensor.layout, device=input_tensor.device)


__all__ = [
"layer_norm",
"rms_norm",
"group_norm",
]
__all__ = []
24 changes: 1 addition & 23 deletions ttnn/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -531,26 +531,4 @@ def impl(file_name, tensor):
ttl.tensor.decorate_external_operation(impl, function_name="ttnn.dump_tensor")(file_name, tensor)


__all__ = [
"Device",
"DataType",
"uint32",
"float32",
"bfloat16",
"bfloat8_b",
"DRAM_MEMORY_CONFIG",
"L1_MEMORY_CONFIG",
"ROW_MAJOR_LAYOUT",
"TILE_LAYOUT",
"TILE_SIZE",
"Tensor",
"from_torch",
"to_torch",
"to_device",
"from_device",
"deallocate",
"reallocate",
"load_tensor",
"dump_tensor",
"to_layout",
]
__all__ = []
3 changes: 3 additions & 0 deletions ttnn/transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,3 +346,6 @@ def concatenate_heads(
output_tensor = ttnn.reshape(output_tensor, (batch_size, sequence_size, num_heads * head_size))

return output_tensor


__all__ = []
3 changes: 2 additions & 1 deletion ttnn/unary.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ def unary_function(
"""
setattr(THIS_MODULE, name, unary_function)
__all__.append(name)


TTL_UNARY_FUNCTIONS = [
Expand All @@ -87,3 +86,5 @@ def unary_function(

for unary_function_name, ttl_unary_function in TTL_UNARY_FUNCTIONS:
register_ttl_unary_function(unary_function_name, ttl_unary_function)

__all__ = []

0 comments on commit e643fe4

Please sign in to comment.