diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8ada61d..25c00ff 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks exclude: "CHANGELOG.md|.copier-answers.yml" -default_stages: [commit] +default_stages: [pre-commit] ci: autofix_commit_msg: "chore(pre-commit.ci): auto fixes" @@ -9,12 +9,12 @@ ci: repos: - repo: https://github.com/commitizen-tools/commitizen - rev: v3.30.1 + rev: v4.0.0 hooks: - id: commitizen stages: [commit-msg] - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v5.0.0 hooks: - id: debug-statements - id: check-builtin-literals @@ -28,11 +28,11 @@ repos: - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/python-poetry/poetry - rev: 1.7.1 + rev: 1.8.0 hooks: - id: poetry-check - repo: https://github.com/pre-commit/mirrors-prettier - rev: v3.0.3 + rev: v4.0.0-alpha.8 hooks: - id: prettier args: ["--tab-width", "2"] @@ -42,7 +42,7 @@ repos: - id: pyupgrade args: [--py38-plus] - repo: https://github.com/PyCQA/autoflake - rev: v2.2.1 + rev: v2.3.1 hooks: - id: autoflake - repo: https://github.com/PyCQA/isort @@ -50,7 +50,7 @@ repos: hooks: - id: isort - repo: https://github.com/psf/black - rev: 23.12.1 + rev: 24.10.0 hooks: - id: black - repo: https://github.com/codespell-project/codespell @@ -58,20 +58,20 @@ repos: hooks: - id: codespell - repo: https://github.com/PyCQA/flake8 - rev: 6.1.0 + rev: 7.1.1 hooks: - id: flake8 - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.8.0 + rev: v1.13.0 hooks: - id: mypy additional_dependencies: ["attrs"] - repo: https://github.com/PyCQA/bandit - rev: 1.7.10 + rev: 1.8.0 hooks: - id: bandit args: [-x, tests] - repo: https://github.com/srstevenson/nb-clean - rev: "3.2.0" + rev: "4.0.1" hooks: - id: nb-clean diff --git a/README.md b/README.md index 7ff98e8..a09a715 100644 --- a/README.md +++ b/README.md @@ -117,4 +117,5 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! [^bokbokbok]: Inspired by [orchardbirds/bokbokbok](https://github.com/orchardbirds/bokbokbok) + [^autograd]: Inspired by [TomerRonen34/treeboost_autograd](https://github.com/TomerRonen34/treeboost_autograd) diff --git a/src/boost_loss/base.py b/src/boost_loss/base.py index fe9bcae..f41f606 100644 --- a/src/boost_loss/base.py +++ b/src/boost_loss/base.py @@ -343,7 +343,7 @@ def eval_metric_lgb( self, y_true: NDArray | lgb.Dataset | xgb.DMatrix, y_pred: NDArray | lgb.Dataset | xgb.DMatrix, - sample_weight: NDArray | lgb.Dataset | xgb.DMatrix | None = None + sample_weight: NDArray | lgb.Dataset | xgb.DMatrix | None = None, # not used, exists for eval_metric_xgb_sklearn ) -> tuple[str, float, bool]: """LightGBM-compatible interface""" diff --git a/src/boost_loss/regression/regression.py b/src/boost_loss/regression/regression.py index 66228c6..d87a043 100644 --- a/src/boost_loss/regression/regression.py +++ b/src/boost_loss/regression/regression.py @@ -204,9 +204,7 @@ def grad(self, y_true: NDArray, y_pred: NDArray) -> NDArray: return -y_true / (1 + np.exp(y_true * y_pred)) def hess(self, y_true: NDArray, y_pred: NDArray) -> NDArray: - return ( - y_true**2 * np.exp(y_true * y_pred) / (1 + np.exp(y_true * y_pred)) ** 2 - ) + return y_true**2 * np.exp(y_true * y_pred) / (1 + np.exp(y_true * y_pred)) ** 2 class MSLELoss(LossBase): diff --git a/src/boost_loss/regression/sklearn.py b/src/boost_loss/regression/sklearn.py index 8b507ed..d0cb4c3 100644 --- a/src/boost_loss/regression/sklearn.py +++ b/src/boost_loss/regression/sklearn.py @@ -217,32 +217,37 @@ def predict_raw(self, X: Any, **predict_params: Any) -> NDArray[Any]: def predict( self, X: Any, - type_: Literal["mean", "median", "var", "std", "range", "mae", "mse"] - | None = None, + type_: ( + Literal["mean", "median", "var", "std", "range", "mae", "mse"] | None + ) = None, return_std: Literal[False] = False, **predict_params: Any, - ) -> NDArray[Any]: - ... + ) -> NDArray[Any]: ... @overload def predict( self, X: Any, - type_: tuple[ - Literal["mean", "median"], Literal["var", "std", "range", "mae", "mse"] - ] - | None = None, + type_: ( + tuple[ + Literal["mean", "median"], Literal["var", "std", "range", "mae", "mse"] + ] + | None + ) = None, return_std: Literal[True] = ..., **predict_params: Any, - ) -> tuple[NDArray[Any], NDArray[Any]]: - ... + ) -> tuple[NDArray[Any], NDArray[Any]]: ... def predict( self, X: Any, - type_: Literal["mean", "median", "var", "std", "range", "mae", "mse"] - | tuple[Literal["mean", "median"], Literal["var", "std", "range", "mae", "mse"]] - | None = None, + type_: ( + Literal["mean", "median", "var", "std", "range", "mae", "mse"] + | tuple[ + Literal["mean", "median"], Literal["var", "std", "range", "mae", "mse"] + ] + | None + ) = None, return_std: bool = False, **predict_params: Any, ) -> NDArray[Any] | tuple[NDArray[Any], NDArray[Any]]: diff --git a/src/boost_loss/sklearn.py b/src/boost_loss/sklearn.py index aa0cca1..0988df4 100644 --- a/src/boost_loss/sklearn.py +++ b/src/boost_loss/sklearn.py @@ -30,8 +30,7 @@ def apply_custom_loss( target_transformer: None = ..., recursive: bool = ..., recursive_strict: bool = ..., -) -> TEstimator: - ... +) -> TEstimator: ... @overload @@ -46,8 +45,7 @@ def apply_custom_loss( target_transformer: BaseEstimator = ..., recursive: bool = ..., recursive_strict: bool = ..., -) -> TransformedTargetRegressor: - ... +) -> TransformedTargetRegressor: ... def apply_custom_loss(