diff --git a/docs/overview/volunteer_ranks.rst b/docs/overview/volunteer_ranks.rst index bcc2c9c7e0206..99f5a110397a4 100644 --- a/docs/overview/volunteer_ranks.rst +++ b/docs/overview/volunteer_ranks.rst @@ -72,9 +72,6 @@ Contributors * - Ayush Lokare - `ayush111111 `_ - Merging Master, Ivy Inspector - * - Chaitanya Lakhchaura - - `ZenithFlux `_ - - Debugging Dynamo, Merging Master * - David Adlai Nettey - `Adlai-1 `_ - Merging Master, Ivy Inspector @@ -108,6 +105,9 @@ Contributors * - Aryan Pandey - `Aryan8912 `_ - Merging Master + * - Chaitanya Lakhchaura + - `ZenithFlux `_ + - Merging Master * - Dhruv Sharma - `druvdub `_ - Merging Master diff --git a/ivy/__init__.py b/ivy/__init__.py index 9b4316c61d8bd..b113a2893ee6f 100644 --- a/ivy/__init__.py +++ b/ivy/__init__.py @@ -361,6 +361,17 @@ def __sizeof__(self): def __dir__(self): return self._shape.__dir__() + def __getnewargs__(self): + if self._shape is None: + raise ivy.utils.exceptions.IvyException( + "Cannot calculate the number of elements in a partially known Shape" + ) + return ( + builtins.tuple( + self._shape, + ), + ) + @property def shape(self): return self._shape @@ -477,6 +488,19 @@ def as_list(self): ) return list(self._shape) + def numel(self): + if self._shape is None: + raise ivy.utils.exceptions.IvyException( + "Cannot calculate the number of elements in a partially known Shape" + ) + res = 1 + for dim in self._shape: + res *= dim + return res + + def __concat__(self, other): + return self.concatenate(other) + class IntDtype(Dtype): def __new__(cls, dtype_str): diff --git a/ivy/functional/frontends/paddle/creation.py b/ivy/functional/frontends/paddle/creation.py index 10f7054b17e9a..8ac3f6dd1b6ee 100644 --- a/ivy/functional/frontends/paddle/creation.py +++ b/ivy/functional/frontends/paddle/creation.py @@ -19,11 +19,7 @@ def arange(start, end=None, step=1, dtype=None, name=None): ) @to_ivy_arrays_and_back def assign(x, output=None): - if len(ivy.shape(x)) == 0: - x = ivy.reshape(ivy.Array(x), (1,)) - if ivy.exists(output): - output = ivy.reshape(ivy.Array(output), (1,)) - else: + if len(ivy.shape(x)) != 0: x = ivy.reshape(x, ivy.shape(x)) ret = ivy.copy_array(x, to_ivy_array=False, out=output) return ret diff --git a/ivy/functional/frontends/paddle/search.py b/ivy/functional/frontends/paddle/search.py index 29608b493561c..2fbe9456b5dc1 100644 --- a/ivy/functional/frontends/paddle/search.py +++ b/ivy/functional/frontends/paddle/search.py @@ -39,7 +39,9 @@ def argsort(x, /, *, axis=-1, descending=False, name=None): ) @to_ivy_arrays_and_back def index_sample(x, index): - return x[ivy.arange(x.shape[0])[:, None], index] + index_dtype = index.dtype + arange_tensor = ivy.arange(x.shape[0], dtype=index_dtype)[:, None] + return x[arange_tensor, index] # kthvalue diff --git a/ivy/functional/frontends/sklearn/metrics/_classification.py b/ivy/functional/frontends/sklearn/metrics/_classification.py index 4404653e97c0d..0ad23d568cc41 100644 --- a/ivy/functional/frontends/sklearn/metrics/_classification.py +++ b/ivy/functional/frontends/sklearn/metrics/_classification.py @@ -69,6 +69,35 @@ def f1_score(y_true, y_pred, *, sample_weight=None): return ret +@to_ivy_arrays_and_back +def hamming_loss(y_true, y_pred, *, sample_weight=None): + # Ensure that y_true and y_pred have the same shape + if y_true.shape != y_pred.shape: + raise IvyValueError("y_true and y_pred must have the same shape") + + # Check if sample_weight is provided and normalize it + if sample_weight is not None: + sample_weight = ivy.array(sample_weight) + if sample_weight.shape[0] != y_true.shape[0]: + raise IvyValueError( + "sample_weight must have the same length as y_true and y_pred" + ) + sample_weight = sample_weight / ivy.sum(sample_weight) + else: + sample_weight = ivy.ones_like(y_true) + + # Calculate the Hamming loss + incorrect_predictions = ivy.not_equal(y_true, y_pred).astype("int64") + # Apply sample weights + weighted_incorrect_predictions = ivy.multiply(incorrect_predictions, sample_weight) + + # Compute hamming loss + loss = ivy.sum(weighted_incorrect_predictions) / y_true.shape[0] + + loss = loss.astype("float64") + return loss + + @to_ivy_arrays_and_back def precision_score(y_true, y_pred, *, sample_weight=None): # Ensure that y_true and y_pred have the same shape diff --git a/ivy_tests/test_ivy/test_frontends/test_sklearn/test_metrics/test_classification.py b/ivy_tests/test_ivy/test_frontends/test_sklearn/test_metrics/test_classification.py index ee51db86a56aa..c2a11908ddd5a 100644 --- a/ivy_tests/test_ivy/test_frontends/test_sklearn/test_metrics/test_classification.py +++ b/ivy_tests/test_ivy/test_frontends/test_sklearn/test_metrics/test_classification.py @@ -108,6 +108,71 @@ def test_sklearn_f1_score( ) +@handle_frontend_test( + fn_tree="sklearn.metrics.hamming_loss", + arrays_and_dtypes=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("integer"), + num_arrays=2, + min_value=0, + max_value=1, # Hamming loss is for binary classification + shared_dtype=True, + shape=(helpers.ints(min_value=2, max_value=5)), + ), + sample_weight=st.lists( + st.floats(min_value=0.1, max_value=1), min_size=2, max_size=5 + ), +) +def test_sklearn_hamming_loss( + arrays_and_dtypes, + on_device, + fn_tree, + frontend, + test_flags, + backend_fw, + sample_weight, +): + dtypes, values = arrays_and_dtypes + # Ensure the values are binary by rounding and converting to int + for i in range(2): + values[i] = np.round(values[i]).astype(int) + + # Adjust sample_weight to have the correct length + sample_weight = np.array(sample_weight).astype(float) + if len(sample_weight) != len(values[0]): + # If sample_weight is shorter, extend it with ones + sample_weight = np.pad( + sample_weight, + (0, max(0, len(values[0]) - len(sample_weight))), + "constant", + constant_values=1.0, + ) + # If sample_weight is longer, truncate it + sample_weight = sample_weight[: len(values[0])] + + # Detach tensors if they require grad before converting to NumPy arrays + if backend_fw == "torch": + values = [ + ( + value.detach().numpy() + if isinstance(value, torch.Tensor) and value.requires_grad + else value + ) + for value in values + ] + + helpers.test_frontend_function( + input_dtypes=dtypes, + backend_to_test=backend_fw, + test_flags=test_flags, + fn_tree=fn_tree, + frontend=frontend, + on_device=on_device, + y_true=values[0], + y_pred=values[1], + sample_weight=sample_weight, + ) + + @handle_frontend_test( fn_tree="sklearn.metrics.precision_score", arrays_and_dtypes=helpers.dtype_and_values( diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_blas_and_lapack_ops.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_blas_and_lapack_ops.py index a9eb41dd76b55..2a4459409693c 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_blas_and_lapack_ops.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_blas_and_lapack_ops.py @@ -1,15 +1,11 @@ # global import sys import numpy as np -from hypothesis import strategies as st, assume +from hypothesis import strategies as st # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test -from ivy_tests.test_ivy.test_functional.test_core.test_linalg import _matrix_rank_helper -from ivy_tests.test_ivy.helpers.hypothesis_helpers.general_helpers import ( - matrix_is_stable, -) # --- Helpers --- # @@ -691,37 +687,6 @@ def test_torch_matmul( ) -# matrix_rank -@handle_frontend_test( - fn_tree="torch.linalg.matrix_rank", - # aliases=["torch.matrix_rank",], deprecated since 1.9. uncomment with multi-version - # testing pipeline - dtype_x_hermitian_atol_rtol=_matrix_rank_helper(), -) -def test_torch_matrix_rank( - dtype_x_hermitian_atol_rtol, - on_device, - fn_tree, - frontend, - test_flags, - backend_fw, -): - dtype, x, hermitian, atol, rtol = dtype_x_hermitian_atol_rtol - assume(matrix_is_stable(x, cond_limit=10)) - helpers.test_frontend_function( - input_dtypes=dtype, - backend_to_test=backend_fw, - frontend=frontend, - test_flags=test_flags, - fn_tree=fn_tree, - on_device=on_device, - A=x, - atol=atol, - rtol=rtol, - hermitian=hermitian, - ) - - # mm @handle_frontend_test( fn_tree="torch.mm", diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_convolution_functions.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_convolution_functions.py index de8d285f5d7d3..6c69aa5b0dddd 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_convolution_functions.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_convolution_functions.py @@ -523,6 +523,9 @@ def test_torch_unfold( backend_fw, ): dtype, vals, kernel_shape, dilations, strides, padding = dtype_vals + # TODO add bfloat16 to unsupported dtypes of the tested function + if backend_fw == "paddle": + assume("bfloat16" not in dtype[0]) helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, diff --git a/ivy_tests/test_ivy/test_misc/test_shape.py b/ivy_tests/test_ivy/test_misc/test_shape.py index 83e4b9644b8f8..7be8231d909d4 100644 --- a/ivy_tests/test_ivy/test_misc/test_shape.py +++ b/ivy_tests/test_ivy/test_misc/test_shape.py @@ -182,6 +182,36 @@ def test_shape__getitem__( ) +@handle_method( + init_tree=CLASS_TREE, + method_tree="Shape.__getnewargs__", + shape=helpers.get_shape(), +) +def test_shape__getnewargs__( + shape, + method_name, + class_name, + ground_truth_backend, + backend_fw, + init_flags, + method_flags, + on_device, +): + helpers.test_method( + on_device=on_device, + ground_truth_backend=ground_truth_backend, + backend_to_test=backend_fw, + init_flags=init_flags, + method_flags=method_flags, + init_all_as_kwargs_np={"shape_tup": shape}, + init_input_dtypes=DUMMY_DTYPE, + method_input_dtypes=[], + method_all_as_kwargs_np={}, + class_name=class_name, + method_name=method_name, + ) + + @handle_method( init_tree=CLASS_TREE, method_tree="Shape.__gt__", @@ -442,3 +472,33 @@ def test_shape_in_conditions(): shape = ivy.Shape(()) condition_is_true = True if shape else False assert not condition_is_true + + +@handle_method( + init_tree=CLASS_TREE, + method_tree="Shape.numel", + shape=helpers.get_shape(), +) +def test_shape_numel( + shape, + method_name, + class_name, + ground_truth_backend, + backend_fw, + init_flags, + method_flags, + on_device, +): + helpers.test_method( + on_device=on_device, + ground_truth_backend=ground_truth_backend, + backend_to_test=backend_fw, + init_flags=init_flags, + method_flags=method_flags, + init_all_as_kwargs_np={"shape_tup": shape}, + init_input_dtypes=DUMMY_DTYPE, + method_input_dtypes=[], + method_all_as_kwargs_np={}, + class_name=class_name, + method_name=method_name, + )