From 9be9cc03571364a42b46fad7dfc2a5c64e888a79 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 17 Jun 2024 17:03:07 +0000 Subject: [PATCH] chore(pre-commit.ci): auto fixes --- README.md | 1 + src/boost_loss/base.py | 2 +- src/boost_loss/regression/regression.py | 4 +--- src/boost_loss/regression/sklearn.py | 31 ++++++++++++++----------- src/boost_loss/sklearn.py | 6 ++--- 5 files changed, 23 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 7ff98e8..a09a715 100644 --- a/README.md +++ b/README.md @@ -117,4 +117,5 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! [^bokbokbok]: Inspired by [orchardbirds/bokbokbok](https://github.com/orchardbirds/bokbokbok) + [^autograd]: Inspired by [TomerRonen34/treeboost_autograd](https://github.com/TomerRonen34/treeboost_autograd) diff --git a/src/boost_loss/base.py b/src/boost_loss/base.py index fe9bcae..f41f606 100644 --- a/src/boost_loss/base.py +++ b/src/boost_loss/base.py @@ -343,7 +343,7 @@ def eval_metric_lgb( self, y_true: NDArray | lgb.Dataset | xgb.DMatrix, y_pred: NDArray | lgb.Dataset | xgb.DMatrix, - sample_weight: NDArray | lgb.Dataset | xgb.DMatrix | None = None + sample_weight: NDArray | lgb.Dataset | xgb.DMatrix | None = None, # not used, exists for eval_metric_xgb_sklearn ) -> tuple[str, float, bool]: """LightGBM-compatible interface""" diff --git a/src/boost_loss/regression/regression.py b/src/boost_loss/regression/regression.py index 66228c6..d87a043 100644 --- a/src/boost_loss/regression/regression.py +++ b/src/boost_loss/regression/regression.py @@ -204,9 +204,7 @@ def grad(self, y_true: NDArray, y_pred: NDArray) -> NDArray: return -y_true / (1 + np.exp(y_true * y_pred)) def hess(self, y_true: NDArray, y_pred: NDArray) -> NDArray: - return ( - y_true**2 * np.exp(y_true * y_pred) / (1 + np.exp(y_true * y_pred)) ** 2 - ) + return y_true**2 * np.exp(y_true * y_pred) / (1 + np.exp(y_true * y_pred)) ** 2 class MSLELoss(LossBase): diff --git a/src/boost_loss/regression/sklearn.py b/src/boost_loss/regression/sklearn.py index 8b507ed..d0cb4c3 100644 --- a/src/boost_loss/regression/sklearn.py +++ b/src/boost_loss/regression/sklearn.py @@ -217,32 +217,37 @@ def predict_raw(self, X: Any, **predict_params: Any) -> NDArray[Any]: def predict( self, X: Any, - type_: Literal["mean", "median", "var", "std", "range", "mae", "mse"] - | None = None, + type_: ( + Literal["mean", "median", "var", "std", "range", "mae", "mse"] | None + ) = None, return_std: Literal[False] = False, **predict_params: Any, - ) -> NDArray[Any]: - ... + ) -> NDArray[Any]: ... @overload def predict( self, X: Any, - type_: tuple[ - Literal["mean", "median"], Literal["var", "std", "range", "mae", "mse"] - ] - | None = None, + type_: ( + tuple[ + Literal["mean", "median"], Literal["var", "std", "range", "mae", "mse"] + ] + | None + ) = None, return_std: Literal[True] = ..., **predict_params: Any, - ) -> tuple[NDArray[Any], NDArray[Any]]: - ... + ) -> tuple[NDArray[Any], NDArray[Any]]: ... def predict( self, X: Any, - type_: Literal["mean", "median", "var", "std", "range", "mae", "mse"] - | tuple[Literal["mean", "median"], Literal["var", "std", "range", "mae", "mse"]] - | None = None, + type_: ( + Literal["mean", "median", "var", "std", "range", "mae", "mse"] + | tuple[ + Literal["mean", "median"], Literal["var", "std", "range", "mae", "mse"] + ] + | None + ) = None, return_std: bool = False, **predict_params: Any, ) -> NDArray[Any] | tuple[NDArray[Any], NDArray[Any]]: diff --git a/src/boost_loss/sklearn.py b/src/boost_loss/sklearn.py index aa0cca1..0988df4 100644 --- a/src/boost_loss/sklearn.py +++ b/src/boost_loss/sklearn.py @@ -30,8 +30,7 @@ def apply_custom_loss( target_transformer: None = ..., recursive: bool = ..., recursive_strict: bool = ..., -) -> TEstimator: - ... +) -> TEstimator: ... @overload @@ -46,8 +45,7 @@ def apply_custom_loss( target_transformer: BaseEstimator = ..., recursive: bool = ..., recursive_strict: bool = ..., -) -> TransformedTargetRegressor: - ... +) -> TransformedTargetRegressor: ... def apply_custom_loss(