diff --git a/solidago/src/solidago/preference_learning/generalized_bradley_terry.py b/solidago/src/solidago/preference_learning/generalized_bradley_terry.py index 832117c144..7af2b2a55b 100644 --- a/solidago/src/solidago/preference_learning/generalized_bradley_terry.py +++ b/solidago/src/solidago/preference_learning/generalized_bradley_terry.py @@ -57,12 +57,11 @@ def log_likelihood_function(self) -> Callable[[npt.NDArray, npt.NDArray], float] """ @cached_property - def loss_increase_to_solve(self): - """ - This function is a convex negative log likelihood, translated such - that its minimum has a constant negative value at `delta=0`. The - roots of this function are used to compute the uncertainties - intervals. If it has only a single root, then uncertainty on the + def translated_negative_log_likelihood(self): + """This function is a convex negative log likelihood, translated such + that its minimum has a constant negative value at `delta=0`. The + roots of this function are used to compute the uncertainties + intervals. If it has only a single root, then uncertainty on the other side is considered infinite. """ ll_function = self.log_likelihood_function @@ -162,7 +161,7 @@ def get_derivative_args(coord: int, sol: np.ndarray): ).to_numpy() try: uncertainties_left[coordinate] = -1 * njit_brentq( - self.loss_increase_to_solve, + self.translated_negative_log_likelihood, args=(score_diff, r_actual, comparison_indicator, ll_actual), xtol=1e-2, a=-self.MAX_UNCERTAINTY, @@ -174,7 +173,7 @@ def get_derivative_args(coord: int, sol: np.ndarray): try: uncertainties_right[coordinate] = njit_brentq( - self.loss_increase_to_solve, + self.translated_negative_log_likelihood, args=(score_diff, r_actual, comparison_indicator, ll_actual), xtol=1e-2, a=0.0,