diff --git a/backend/tournesol/serializers/inconsistencies.py b/backend/tournesol/serializers/inconsistencies.py index 5423d16c85..a42fb0135e 100644 --- a/backend/tournesol/serializers/inconsistencies.py +++ b/backend/tournesol/serializers/inconsistencies.py @@ -56,6 +56,7 @@ class ScoreInconsistencySerializer(Serializer): entity_1_rating = serializers.FloatField() entity_2_rating = serializers.FloatField() comparison_score = serializers.FloatField() + comparison_score_max = serializers.IntegerField() expected_comparison_score = serializers.FloatField() diff --git a/backend/tournesol/tests/test_api_inconsistencies.py b/backend/tournesol/tests/test_api_inconsistencies.py index 562758cff5..d4abf5e946 100644 --- a/backend/tournesol/tests/test_api_inconsistencies.py +++ b/backend/tournesol/tests/test_api_inconsistencies.py @@ -405,6 +405,7 @@ def test_response_format(self): self.assertEqual(results["entity_1_rating"], rating_1_score) self.assertEqual(results["entity_2_rating"], rating_2_score) self.assertEqual(results["comparison_score"], comparison_score) + self.assertEqual(results["comparison_score_max"], 10) self.assertGreater(results["expected_comparison_score"], 0) self.assertLess(results["expected_comparison_score"], 1) diff --git a/backend/tournesol/views/inconsistencies.py b/backend/tournesol/views/inconsistencies.py index a8f5d27803..de652b813d 100755 --- a/backend/tournesol/views/inconsistencies.py +++ b/backend/tournesol/views/inconsistencies.py @@ -13,7 +13,6 @@ ScoreInconsistenciesFilterSerializer, ScoreInconsistenciesSerializer, ) -from tournesol.utils.constants import COMPARISON_MAX from tournesol.views.mixins.poll import PollScopedViewMixin @@ -214,6 +213,7 @@ def get(self, request, *args, **kwargs): "comparison__entity_2__uid", "criteria", "score", + "score_max", ) ) @@ -229,16 +229,18 @@ def get(self, request, *args, **kwargs): ) ) - response = self._list_inconsistent_comparisons(contributor_comparisons_criteria, - ratings, - filters["inconsistency_threshold"], - poll.criterias_list) + response = self._list_inconsistent_comparisons( + contributor_comparisons_criteria, + ratings, + filters["inconsistency_threshold"], + poll.criterias_list, + ) return Response(ScoreInconsistenciesSerializer(response).data) @staticmethod def _list_inconsistent_comparisons( # pylint: disable=too-many-locals - criteria_comparisons: list, + criteria_comparisons: list[dict], criteria_ratings: list, threshold: float, criteria_list: list @@ -265,6 +267,7 @@ def _list_inconsistent_comparisons( # pylint: disable=too-many-locals entity_2 = comparison_criteria["comparison__entity_2__uid"] criteria = comparison_criteria["criteria"] comparison_score = comparison_criteria["score"] + comparison_score_max = comparison_criteria["score_max"] try: rating_1 = ratings_map[(entity_1, criteria)] @@ -278,6 +281,7 @@ def _list_inconsistent_comparisons( # pylint: disable=too-many-locals rating_1["score"], rating_2["score"], comparison_score, + comparison_score_max, uncertainty, ) @@ -296,6 +300,7 @@ def _list_inconsistent_comparisons( # pylint: disable=too-many-locals "entity_1_rating": rating_1["score"], "entity_2_rating": rating_2["score"], "comparison_score": comparison_score, + "comparison_score_max": comparison_score_max, "expected_comparison_score": ideal_comparison_score, } ) @@ -322,10 +327,13 @@ def _list_inconsistent_comparisons( # pylint: disable=too-many-locals return response @staticmethod - def _calculate_inconsistency(entity_1_calculated_rating, - entity_2_calculated_rating, - comparison_score, - uncertainty) -> float: + def _calculate_inconsistency( + entity_1_calculated_rating, + entity_2_calculated_rating, + comparison_score, + comparison_score_max, + uncertainty, + ) -> tuple[float, float]: """ Calculate the inconsistency between the comparison criteria score and the general rating of the entity. @@ -361,18 +369,20 @@ def _calculate_inconsistency(entity_1_calculated_rating, base_rating_difference = entity_2_calculated_rating - entity_1_calculated_rating - def inconsistency_calculation(rating_diff): - return abs(comparison_score - COMPARISON_MAX * rating_diff / sqrt(rating_diff**2 + 1)) + def inconsistency_calculation(rating_diff) -> float: + return abs( + comparison_score - comparison_score_max * rating_diff / sqrt(rating_diff**2 + 1) + ) min_rating_difference = base_rating_difference - uncertainty max_rating_difference = base_rating_difference + uncertainty - if comparison_score <= -COMPARISON_MAX: + if comparison_score <= -comparison_score_max: min_inconsistency = inconsistency_calculation(min_rating_difference) - elif comparison_score >= COMPARISON_MAX: + elif comparison_score >= comparison_score_max: min_inconsistency = inconsistency_calculation(max_rating_difference) else: - root = comparison_score / sqrt(COMPARISON_MAX ** 2 - comparison_score ** 2) + root = comparison_score / sqrt(comparison_score_max**2 - comparison_score**2) if max_rating_difference < root: # The inconsistency is decreasing with the rating_difference min_inconsistency = inconsistency_calculation(max_rating_difference) @@ -381,12 +391,12 @@ def inconsistency_calculation(rating_diff): min_inconsistency = inconsistency_calculation(min_rating_difference) else: # The root is a possible value for the rating_difference - min_inconsistency = 0 + min_inconsistency = 0.0 # Comparison imprecision of 0.5, because comparisons scores are on integers, not floats inconsistency = max(min_inconsistency - 0.5, 0) - - expected_comparison_score = \ - COMPARISON_MAX * base_rating_difference / sqrt(base_rating_difference**2 + 1) + expected_comparison_score = ( + comparison_score_max * base_rating_difference / sqrt(base_rating_difference**2 + 1) + ) return inconsistency, expected_comparison_score