From d01e7a7c1d8af9682bed09f7f82f18424afe625b Mon Sep 17 00:00:00 2001 From: RangiLyu Date: Tue, 21 Feb 2023 21:46:12 +0800 Subject: [PATCH 01/13] [Feature] Add LVIS metric. --- mmeval/metrics/__init__.py | 3 +- mmeval/metrics/coco_detection.py | 4 +- mmeval/metrics/lvis.py | 338 ++++++++++++++++ .../test_coco_detection_metric.py | 8 +- .../test_lvis_detection_metric.py | 383 ++++++++++++++++++ 5 files changed, 729 insertions(+), 7 deletions(-) create mode 100644 mmeval/metrics/lvis.py create mode 100644 tests/test_metrics/test_lvis_detection_metric.py diff --git a/mmeval/metrics/__init__.py b/mmeval/metrics/__init__.py index ea384e00..2fcc67a8 100644 --- a/mmeval/metrics/__init__.py +++ b/mmeval/metrics/__init__.py @@ -15,6 +15,7 @@ from .keypoint_auc import KeypointAUC from .keypoint_epe import KeypointEndPointError from .keypoint_nme import KeypointNME +from .lvis import LVISDetection from .mae import MeanAbsoluteError from .matting_mse import MattingMeanSquaredError from .mean_iou import MeanIoU @@ -41,7 +42,7 @@ 'AveragePrecision', 'AVAMeanAP', 'BLEU', 'DOTAMeanAP', 'SumAbsoluteDifferences', 'GradientError', 'MattingMeanSquaredError', 'ConnectivityError', 'ROUGE', 'Perplexity', 'KeypointEndPointError', - 'KeypointAUC', 'KeypointNME' + 'KeypointAUC', 'KeypointNME', 'LVISDetection' ] _deprecated_msg = ( diff --git a/mmeval/metrics/coco_detection.py b/mmeval/metrics/coco_detection.py index 2b746cbc..5c809dfb 100644 --- a/mmeval/metrics/coco_detection.py +++ b/mmeval/metrics/coco_detection.py @@ -325,7 +325,7 @@ def gt_to_coco_json(self, gt_dicts: Sequence[dict], categories = [ dict(id=id, name=name) for id, name in enumerate( - self.dataset_meta['CLASSES']) # type:ignore + self.dataset_meta['classes']) # type:ignore ] image_infos: list = [] annotations: list = [] @@ -502,7 +502,7 @@ def compute_metric(self, results: list) -> Dict[str, float]: # handle lazy init if len(self.cat_ids) == 0: self.cat_ids = self._coco_api.get_cat_ids( - cat_names=self.dataset_meta['CLASSES']) # type: ignore + cat_names=self.dataset_meta['classes']) # type: ignore if len(self.img_ids) == 0: self.img_ids = self._coco_api.get_img_ids() diff --git a/mmeval/metrics/lvis.py b/mmeval/metrics/lvis.py new file mode 100644 index 00000000..2ae00985 --- /dev/null +++ b/mmeval/metrics/lvis.py @@ -0,0 +1,338 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import itertools +import os.path as osp +import tempfile +import warnings +from collections import OrderedDict +from typing import Dict, List, Optional, Sequence, Union + +import numpy as np +from .coco_detection import COCODetection +from mmeval.fileio import get_local_path + +try: + from lvis import LVIS, LVISEval, LVISResults + HAS_LVISAPI = True +except ImportError: + HAS_LVISAPI = False + + +class LVISDetection(COCODetection): + """LVIS evaluation metric. + + Evaluate AR, AP, and mAP for detection tasks including proposal/box + detection and instance segmentation. + + Args: + ann_file (str, optional): Path to the coco lvis format annotation file. + If not specified, ground truth annotations from the dataset will + be converted to coco lvis format. Defaults to None. + metric (str | List[str]): Metrics to be evaluated. Valid metrics + include 'bbox', 'segm', and 'proposal'. Defaults to 'bbox'. + iou_thrs (float | List[float], optional): IoU threshold to compute AP + and AR. If not specified, IoUs from 0.5 to 0.95 will be used. + Defaults to None. + classwise (bool): Whether to return the computed results of each + class. Defaults to False. + proposal_nums (Sequence[int]): Numbers of proposals to be evaluated. + Defaults to (100, 300, 1000). + metric_items (List[str], optional): Metric result names to be + recorded in the evaluation result. Defaults to None. + format_only (bool): Format the output results without perform + evaluation. It is useful when you want to format the result + to a specific format and submit it to the test server. + Defaults to False. + outfile_prefix (str, optional): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Defaults to None. + backend_args (dict, optional): Arguments to instantiate the + preifx of uri corresponding backend. Defaults to None. + **kwargs: Keyword parameters passed to :class:`BaseMetric`. + + Examples: + >>> import numpy as np + >>> from mmeval import LVISDetection + >>> try: + >>> from mmeval.metrics.utils.coco_wrapper import mask_util + >>> except ImportError as e: + >>> mask_util = None + >>> + >>> num_classes = 4 + >>> fake_dataset_metas = { + ... 'CLASSES': tuple([str(i) for i in range(num_classes)]) + ... } + >>> + >>> lvis_det_metric = LVISDetection( + ... dataset_meta=fake_dataset_metas, + ... metric=['bbox', 'segm'] + ... ) + >>> def _gen_bboxes(num_bboxes, img_w=256, img_h=256): + ... # random generate bounding boxes in 'xyxy' formart. + ... x = np.random.rand(num_bboxes, ) * img_w + ... y = np.random.rand(num_bboxes, ) * img_h + ... w = np.random.rand(num_bboxes, ) * (img_w - x) + ... h = np.random.rand(num_bboxes, ) * (img_h - y) + ... return np.stack([x, y, x + w, y + h], axis=1) + >>> + >>> def _gen_masks(bboxes, img_w=256, img_h=256): + ... if mask_util is None: + ... raise ImportError( + ... 'Please try to install official pycocotools by ' + ... '"pip install pycocotools"') + ... masks = [] + ... for i, bbox in enumerate(bboxes): + ... mask = np.zeros((img_h, img_w)) + ... bbox = bbox.astype(np.int32) + ... box_mask = (np.random.rand( + ... bbox[3] - bbox[1], + ... bbox[2] - bbox[0]) > 0.3).astype(np.int32) + ... mask[bbox[1]:bbox[3], bbox[0]:bbox[2]] = box_mask + ... masks.append( + ... mask_util.encode( + ... np.array(mask[:, :, np.newaxis], order='F', + ... dtype='uint8'))[0]) # encoded with RLE + ... return masks + >>> + >>> img_id = 1 + >>> img_w, img_h = 256, 256 + >>> num_bboxes = 10 + >>> pred_boxes = _gen_bboxes( + ... num_bboxes=num_bboxes, + ... img_w=img_w, + ... img_h=img_h) + >>> pred_masks = _gen_masks( + ... bboxes=pred_boxes, + ... img_w=img_w, + ... img_h=img_h) + >>> prediction = { + ... 'img_id': img_id, + ... 'bboxes': pred_boxes, + ... 'scores': np.random.rand(num_bboxes, ), + ... 'labels': np.random.randint(0, num_classes, size=(num_bboxes, )), + ... 'masks': pred_masks + ... } + >>> gt_boxes = _gen_bboxes( + ... num_bboxes=num_bboxes, + ... img_w=img_w, + ... img_h=img_h) + >>> gt_masks = _gen_masks( + ... bboxes=pred_boxes, + ... img_w=img_w, + ... img_h=img_h) + >>> groundtruth = { + ... 'img_id': img_id, + ... 'width': img_w, + ... 'height': img_h, + ... 'bboxes': gt_boxes, + ... 'labels': np.random.randint(0, num_classes, size=(num_bboxes, )), + ... 'masks': gt_masks, + ... 'ignore_flags': np.zeros(num_bboxes) + ... } + >>> lvis_det_metric(predictions=[prediction, ], groundtruths=[groundtruth, ]) # doctest: +ELLIPSIS # noqa: E501 + {'bbox_mAP': ..., 'bbox_mAP_50': ..., ..., + 'segm_mAP': ..., 'segm_mAP_50': ..., ..., + 'bbox_result': ..., 'segm_result': ..., ...} + """ + + def __init__(self, + ann_file: Optional[str] = None, + metric: Union[str, List[str]] = 'bbox', + classwise: bool = False, + proposal_nums: Sequence[int] = (100, 300, 1000), + iou_thrs: Optional[Union[float, Sequence[float]]] = None, + metric_items: Optional[Sequence[str]] = None, + format_only: bool = False, + outfile_prefix: Optional[str] = None, + backend_args: Optional[dict] = None, + **kwargs) -> None: + if not HAS_LVISAPI: + raise RuntimeError( + 'Package lvis is not installed. Please run "pip install ' + 'git+https://github.com/lvis-dataset/lvis-api.git".') + super().__init__( + metric=metric, + classwise=classwise, + proposal_nums=proposal_nums, + iou_thrs=iou_thrs, + metric_items=metric_items, + format_only=format_only, + outfile_prefix=outfile_prefix, + backend_args=backend_args, + **kwargs) + # if ann_file is not specified, + # initialize lvis api with the converted dataset + self._lvis_api: Optional[LVIS] # type: ignore + if ann_file is not None: + with get_local_path( + filepath=ann_file, + backend_args=backend_args) as local_path: + self._lvis_api = LVIS(local_path) + else: + self._lvis_api = None + + # @property + # def _coco_api(self): + # return self._lvis_api + + # @_coco_api.setter + # def _coco_api(self, _coco_api): + # self._lvis_api = _coco_api + + def add_predictions(self, predictions: Sequence[Dict]) -> None: + """Add predictions only. + + If the `ann_file` has been passed, we can add predictions only. + + Args: + predictions (Sequence[dict]): Refer to + :class:`LVISDetection.add`. + """ + assert self._lvis_api is not None, 'The `ann_file` should be ' \ + 'passesd when use the `LVISDetection.add_predictions` ' \ + 'method, otherwisw use the `LVISDetection.add` instead!' + self.add(predictions, groundtruths=[{}] * len(predictions)) + + def __call__(self, *args, **kwargs) -> Dict: + """Stateless call for a metric compute.""" + + # cache states + cache_results = self._results + cache_lvis_api = self._lvis_api + cache_cat_ids = self.cat_ids + cache_img_ids = self.img_ids + + self._results = [] + self.add(*args, **kwargs) + metric_result = self.compute_metric(self._results) + + # recover states from cache + self._results = cache_results + self._lvis_api = cache_lvis_api + self.cat_ids = cache_cat_ids + self.img_ids = cache_img_ids + + return metric_result + + def compute_metric(self, results: list) -> Dict[str, float]: + """Compute the COCO metrics. + + Args: + results (List[tuple]): A list of tuple. Each tuple is the + prediction and ground truth of an image. This list has already + been synced across all ranks. + + Returns: + dict: The computed metric. The keys are the names of + the metrics, and the values are corresponding results. + """ + tmp_dir = None + if self.outfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + outfile_prefix = osp.join(tmp_dir.name, 'results') + else: + outfile_prefix = self.outfile_prefix + + # split gt and prediction list + preds, gts = zip(*results) + + if self._lvis_api is None: + # use converted gt json file to initialize coco api + print('Converting ground truth to coco lvis format...') + coco_json_path = self.gt_to_coco_json( + gt_dicts=gts, outfile_prefix=outfile_prefix) + self._lvis_api = LVIS(coco_json_path) + + # handle lazy init + if len(self.cat_ids) == 0: + self.cat_ids = self._lvis_api.get_cat_ids() + if len(self.img_ids) == 0: + self.img_ids = self._lvis_api.get_img_ids() + + # convert predictions to coco format and dump to json file + result_files = self.results2json(preds, outfile_prefix) + + eval_results: OrderedDict = OrderedDict() + if self.format_only: + print('results are saved in ' + f'{osp.dirname(outfile_prefix)}') + return eval_results + + lvis_gt = self._lvis_api + + for metric in self.metrics: + print(f'Evaluating {metric}...') + + try: + lvis_dt = LVISResults(lvis_gt, result_files[metric]) + except IndexError: + print( + 'The testing results of the whole dataset is empty.') + break + + iou_type = 'bbox' if metric == 'proposal' else metric + lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type) + lvis_eval.params.imgIds = self.img_ids + metric_items = self.metric_items + if metric == 'proposal': + lvis_eval.params.useCats = 0 + lvis_eval.params.maxDets = list(self.proposal_nums) + lvis_eval.evaluate() + lvis_eval.accumulate() + lvis_eval.summarize() + if metric_items is None: + metric_items = ['AR@300', 'ARs@300', 'ARm@300', 'ARl@300'] + for k, v in lvis_eval.get_results().items(): + if k in metric_items: + val = float('{:.3f}'.format(float(v))) + eval_results[k] = val + + else: + lvis_eval.evaluate() + lvis_eval.accumulate() + lvis_eval.summarize() + lvis_results = lvis_eval.get_results() + if self.classwise: # Compute per-category AP + # Compute per-category AP + # from https://github.com/facebookresearch/detectron2/ + precisions = lvis_eval.eval['precision'] + # precision: (iou, recall, cls, area range, max dets) + assert len(self.cat_ids) == precisions.shape[2] + + results_per_category = [] + for idx, catId in enumerate(self.cat_ids): + # area range index 0: all area ranges + # max dets index -1: typically 100 per image + # the dimensions of precisions are + # [num_thrs, num_recalls, num_cats, num_area_rngs] + nm = self._lvis_api.load_cats([catId])[0] + precision = precisions[:, :, idx, 0] + precision = precision[precision > -1] + if precision.size: + ap = np.mean(precision) + else: + ap = float('nan') + results_per_category.append( + (f'{nm["name"]}', f'{float(ap):0.3f}')) + eval_results[f'{metric}_{nm["name"]}_precision'] = round(ap, 3) + + eval_results[f'{metric}_classwise_result'] = \ + results_per_category + if metric_items is None: + metric_items = [ + 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'APr', + 'APc', 'APf' + ] + + results_list = [] + for metric_item, v in lvis_results.items(): + if metric_item in metric_items: + key = f'{metric}_{metric_item}' + val = float('{:.3f}'.format(float(v))) + results_list.append(f'{round(v, 3) * 100:.1f}') + eval_results[key] = val + eval_results[f'{metric}_result'] = results_list + + lvis_eval.print_results() + if tmp_dir is not None: + tmp_dir.cleanup() + return eval_results diff --git a/tests/test_metrics/test_coco_detection_metric.py b/tests/test_metrics/test_coco_detection_metric.py index 6ea2426a..1a515d40 100644 --- a/tests/test_metrics/test_coco_detection_metric.py +++ b/tests/test_metrics/test_coco_detection_metric.py @@ -233,7 +233,7 @@ def test_box_metric_interface(metric_kwargs): metric = ['bbox'] # Avoid some potential error fake_dataset_metas = { - 'CLASSES': tuple([str(i) for i in range(num_classes)]) + 'classes': tuple([str(i) for i in range(num_classes)]) } coco_det_metric = COCODetection( metric=metric, dataset_meta=fake_dataset_metas, **metric_kwargs) @@ -278,7 +278,7 @@ def test_segm_metric_interface(metric_kwargs): metric = ['segm'] # Avoid some potential error fake_dataset_metas = { - 'CLASSES': tuple([str(i) for i in range(num_classes)]) + 'classes': tuple([str(i) for i in range(num_classes)]) } coco_det_metric = COCODetection( metric=metric, dataset_meta=fake_dataset_metas, **metric_kwargs) @@ -313,7 +313,7 @@ def test_metric_invalid_usage(): num_classes = 10 # Avoid some potential error fake_dataset_metas = { - 'CLASSES': tuple([str(i) for i in range(num_classes)]) + 'classes': tuple([str(i) for i in range(num_classes)]) } coco_det_metric = COCODetection(dataset_meta=fake_dataset_metas) @@ -338,7 +338,7 @@ def test_compute_metric(): fake_json_file = osp.join(tmp_dir.name, 'fake_data.json') _create_dummy_coco_json(fake_json_file) dummy_pred = _create_dummy_results() - fake_dataset_metas = dict(CLASSES=['car', 'bicycle']) + fake_dataset_metas = dict(classes=['car', 'bicycle']) # test single coco dataset evaluation coco_det_metric = COCODetection( diff --git a/tests/test_metrics/test_lvis_detection_metric.py b/tests/test_metrics/test_lvis_detection_metric.py new file mode 100644 index 00000000..b221460c --- /dev/null +++ b/tests/test_metrics/test_lvis_detection_metric.py @@ -0,0 +1,383 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import os.path as osp +import pytest +import tempfile +from json import dump + +from mmeval.core.base_metric import BaseMetric +from mmeval.metrics import LVISDetection +from mmeval.utils import try_import + +coco_wrapper = try_import('mmeval.metrics.utils.coco_wrapper') + + +def _create_dummy_coco_json(json_name): + dummy_mask = np.zeros((10, 10), order='F', dtype=np.uint8) + dummy_mask[:5, :5] = 1 + rle_mask = coco_wrapper.mask_util.encode(dummy_mask) + rle_mask['counts'] = rle_mask['counts'].decode('utf-8') + image = { + 'id': 0, + 'width': 640, + 'height': 640, + 'neg_category_ids': [], + 'not_exhaustive_category_ids': [], + 'coco_url': 'http://images.cocodataset.org/val2017/0.jpg', + } + + annotation_1 = { + 'id': 1, + 'image_id': 0, + 'category_id': 1, + 'area': 400, + 'bbox': [50, 60, 20, 20], + 'segmentation': rle_mask, + } + + annotation_2 = { + 'id': 2, + 'image_id': 0, + 'category_id': 1, + 'area': 900, + 'bbox': [100, 120, 30, 30], + 'segmentation': rle_mask, + } + + annotation_3 = { + 'id': 3, + 'image_id': 0, + 'category_id': 2, + 'area': 1600, + 'bbox': [150, 160, 40, 40], + 'segmentation': rle_mask, + } + + annotation_4 = { + 'id': 4, + 'image_id': 0, + 'category_id': 1, + 'area': 10000, + 'bbox': [250, 260, 100, 100], + 'segmentation': rle_mask, + } + + categories = [ + { + 'id': 1, + 'name': 'aerosol_can', + 'frequency': 'c', + 'image_count': 64 + }, + { + 'id': 2, + 'name': 'air_conditioner', + 'frequency': 'f', + 'image_count': 364 + }, + ] + + fake_json = { + 'images': [image], + 'annotations': + [annotation_1, annotation_2, annotation_3, annotation_4], + 'categories': categories + } + + with open(json_name, 'w') as f: + dump(fake_json, f) + + +def _create_dummy_results(): + # create fake results + bboxes = np.array([[50, 60, 70, 80], [100, 120, 130, 150], + [150, 160, 190, 200], [250, 260, 350, 360]]) + scores = np.array([1.0, 0.98, 0.96, 0.95]) + labels = np.array([0, 0, 1, 0]) + + mask = np.zeros((10, 10), dtype=np.uint8) + mask[:5, :5] = 1 + + dummy_mask = [ + coco_wrapper.mask_util.encode( + np.array(mask[:, :, np.newaxis], order='F', dtype='uint8'))[0] + for _ in range(4) + ] + return dict( + img_id=0, + bboxes=bboxes, + scores=scores, + labels=labels, + masks=dummy_mask) + + +# TODO: move necessary function to somewhere +def _gen_bboxes(num_bboxes, img_w=256, img_h=256): + # random generate bounding boxes in 'xyxy' formart. + x = np.random.rand(num_bboxes, ) * img_w + y = np.random.rand(num_bboxes, ) * img_h + w = np.random.rand(num_bboxes, ) * (img_w - x) + h = np.random.rand(num_bboxes, ) * (img_h - y) + return np.stack([x, y, x + w, y + h], axis=1) + + +def _gen_masks(bboxes, img_w=256, img_h=256): + # random generate masks + if coco_wrapper is None: + raise ImportError('Please try to install official pycocotools by ' + '"pip install pycocotools"') + masks = [] + for i, bbox in enumerate(bboxes): + mask = np.zeros((img_h, img_w)) + bbox = bbox.astype(np.int32) + box_mask = (np.random.rand(bbox[3] - bbox[1], bbox[2] - bbox[0]) > + 0.3).astype(np.int32) + mask[bbox[1]:bbox[3], bbox[0]:bbox[2]] = box_mask + masks.append( + coco_wrapper.mask_util.encode( + np.array(mask[:, :, np.newaxis], order='F', + dtype='uint8'))[0]) # encoded with RLE + return masks + + +def _gen_prediction(num_pred=10, + num_classes=2, + img_w=256, + img_h=256, + img_id=0, + with_mask=False): + # random create prediction + pred_boxes = _gen_bboxes(num_bboxes=num_pred, img_w=img_w, img_h=img_h) + prediction = { + 'img_id': img_id, + 'bboxes': pred_boxes, + 'scores': np.random.rand(num_pred, ), + 'labels': np.random.randint(0, num_classes, size=(num_pred, )) + } + if with_mask: + pred_masks = _gen_masks(bboxes=pred_boxes, img_w=img_w, img_h=img_h) + prediction['masks'] = pred_masks + return prediction + + +def _gen_groundtruth(num_gt=10, + num_classes=2, + img_w=256, + img_h=256, + img_id=0, + with_mask=False): + # random create prediction + gt_boxes = _gen_bboxes(num_bboxes=num_gt, img_w=img_w, img_h=img_h) + groundtruth = { + 'img_id': img_id, + 'width': img_w, + 'height': img_h, + 'neg_category_ids': [], + 'bboxes': gt_boxes, + 'labels': np.random.randint(0, num_classes, size=(num_gt, )), + 'ignore_flags': np.zeros(num_gt) + } + if with_mask: + pred_masks = _gen_masks(bboxes=gt_boxes, img_w=img_w, img_h=img_h) + groundtruth['masks'] = pred_masks + return groundtruth + + +@pytest.mark.skipif( + coco_wrapper is None, reason='coco_wrapper is not available!') +@pytest.mark.parametrize( + argnames='metric_kwargs', + argvalues=[ + {}, + { + 'iou_thrs': [0.5, 0.75] + }, + { + 'classwise': True + }, + { + 'metric_items': ['AP', 'AP50'] + }, + { + 'proposal_nums': [10, 30, 100] + }, + ]) +def test_box_metric_interface(metric_kwargs): + tmp_dir = tempfile.TemporaryDirectory() + + # create dummy data + fake_json_file = osp.join(tmp_dir.name, 'fake_data.json') + _create_dummy_coco_json(fake_json_file) + + num_classes = 2 + metric = ['bbox'] + # Avoid some potential error + fake_dataset_metas = { + 'classes': tuple([str(i) for i in range(num_classes)]) + } + coco_det_metric = LVISDetection(ann_file=fake_json_file, + metric=metric, dataset_meta=fake_dataset_metas, **metric_kwargs) + assert isinstance(coco_det_metric, BaseMetric) + + metric_results = coco_det_metric(predictions=[_create_dummy_results()], + groundtruths=[dict()], + + ) + assert isinstance(metric_results, dict) + assert 'bbox_AP' in metric_results + + +@pytest.mark.skipif( + coco_wrapper is None, reason='coco_wrapper is not available!') +@pytest.mark.parametrize( + argnames='metric_kwargs', + argvalues=[ + {}, + { + 'iou_thrs': [0.5, 0.75] + }, + { + 'classwise': True + }, + { + 'metric_items': ['AP', 'AP50'] + }, + { + 'proposal_nums': [10, 30, 100] + }, + ]) +def test_segm_metric_interface(metric_kwargs): + tmp_dir = tempfile.TemporaryDirectory() + + # create dummy data + fake_json_file = osp.join(tmp_dir.name, 'fake_data.json') + _create_dummy_coco_json(fake_json_file) + + num_classes = 2 + metric = ['segm'] + # Avoid some potential error + fake_dataset_metas = { + 'classes': tuple([str(i) for i in range(num_classes)]) + } + coco_det_metric = LVISDetection(ann_file=fake_json_file, + metric=metric, dataset_meta=fake_dataset_metas, **metric_kwargs) + assert isinstance(coco_det_metric, BaseMetric) + + metric_results = coco_det_metric( + predictions=[_create_dummy_results()], + groundtruths=[dict()], + ) + assert isinstance(metric_results, dict) + assert 'segm_AP' in metric_results + + +@pytest.mark.skipif( + coco_wrapper is None, reason='coco_wrapper is not available!') +def test_metric_invalid_usage(): + with pytest.raises(KeyError): + LVISDetection(metric='xxx') + + with pytest.raises(TypeError): + LVISDetection(iou_thrs=1) + + with pytest.raises(AssertionError): + LVISDetection(format_only=True) + + num_classes = 2 + # Avoid some potential error + fake_dataset_metas = { + 'classes': tuple([str(i) for i in range(num_classes)]) + } + coco_det_metric = LVISDetection(dataset_meta=fake_dataset_metas) + + with pytest.raises(KeyError): + prediction = _gen_prediction(num_classes=num_classes) + groundtruth = _gen_groundtruth(num_classes=num_classes) + del prediction['bboxes'] + coco_det_metric([prediction], [groundtruth]) + + with pytest.raises(AssertionError): + prediction = _gen_prediction(num_classes=num_classes) + groundtruth = _gen_groundtruth(num_classes=num_classes) + coco_det_metric(prediction, groundtruth) + + +@pytest.mark.skipif( + coco_wrapper is None, reason='coco_wrapper is not available!') +def test_compute_metric(): + tmp_dir = tempfile.TemporaryDirectory() + + # create dummy data + fake_json_file = osp.join(tmp_dir.name, 'fake_data.json') + _create_dummy_coco_json(fake_json_file) + dummy_pred = _create_dummy_results() + fake_dataset_metas = dict(classes=['car', 'bicycle']) + + # test single coco dataset evaluation + coco_det_metric = LVISDetection( + ann_file=fake_json_file, + classwise=False, + outfile_prefix=f'{tmp_dir.name}/test', + dataset_meta=fake_dataset_metas) + eval_results = coco_det_metric([dummy_pred], [dict()]) + target = { + 'bbox_AP': 1.0, + 'bbox_AP50': 1.0, + 'bbox_AP75': 1.0, + 'bbox_APc': 1.0, + 'bbox_APf': 1.0, + 'bbox_APr': -1.0, + 'bbox_APs': 1.0, + 'bbox_APm': 1.0, + 'bbox_APl': 1.0, + } + + eval_results.pop('bbox_result') + assert eval_results == target + assert osp.isfile(osp.join(tmp_dir.name, 'test.bbox.json')) + + # test box and segm coco dataset evaluation + coco_det_metric = LVISDetection( + ann_file=fake_json_file, + classwise=False, + metric=['bbox', 'segm'], + outfile_prefix=f'{tmp_dir.name}/test', + dataset_meta=fake_dataset_metas) + eval_results = coco_det_metric([dummy_pred], [dict()]) + target = { + 'bbox_AP': 1.0, + 'bbox_AP50': 1.0, + 'bbox_AP75': 1.0, + 'bbox_APc': 1.0, + 'bbox_APf': 1.0, + 'bbox_APr': -1.0, + 'bbox_APs': 1.0, + 'bbox_APm': 1.0, + 'bbox_APl': 1.0, + 'segm_AP': 1.0, + 'segm_AP50': 1.0, + 'segm_AP75': 1.0, + 'segm_APc': 1.0, + 'segm_APf': 1.0, + 'segm_APr': -1.0, + 'segm_APs': 1.0, + 'segm_APm': 1.0, + 'segm_APl': 1.0, + } + eval_results.pop('bbox_result') + eval_results.pop('segm_result') + assert eval_results == target + assert osp.isfile(osp.join(tmp_dir.name, 'test.bbox.json')) + assert osp.isfile(osp.join(tmp_dir.name, 'test.segm.json')) + + # test format only evaluation + coco_det_metric = LVISDetection( + ann_file=fake_json_file, + classwise=False, + format_only=True, + outfile_prefix=f'{tmp_dir.name}/test', + dataset_meta=fake_dataset_metas) + eval_results = coco_det_metric([dummy_pred], [dict()]) + assert osp.exists(f'{tmp_dir.name}/test.bbox.json') + assert eval_results == dict() + tmp_dir.cleanup() From 2ad8371ef06fa7c5367fda1d33e7908ef279d090 Mon Sep 17 00:00:00 2001 From: RangiLyu Date: Tue, 21 Feb 2023 21:48:05 +0800 Subject: [PATCH 02/13] requirements --- requirements/optional.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements/optional.txt b/requirements/optional.txt index 1bd0bbd5..4ffd33e6 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -2,3 +2,4 @@ opencv-python!=4.5.5.62,!=4.5.5.64 pycocotools scipy shapely +git+https://github.com/lvis-dataset/lvis-api.git From 870eca2423c9a6343292ad315ffe6a0839a8c2a4 Mon Sep 17 00:00:00 2001 From: RangiLyu Date: Thu, 23 Feb 2023 14:12:01 +0800 Subject: [PATCH 03/13] update --- mmeval/metrics/lvis.py | 110 ++++++++---------- requirements/optional.txt | 2 +- .../test_lvis_detection_metric.py | 45 ++++--- 3 files changed, 78 insertions(+), 79 deletions(-) diff --git a/mmeval/metrics/lvis.py b/mmeval/metrics/lvis.py index 2ae00985..7ea39b74 100644 --- a/mmeval/metrics/lvis.py +++ b/mmeval/metrics/lvis.py @@ -1,14 +1,12 @@ # Copyright (c) OpenMMLab. All rights reserved. -import itertools +import numpy as np import os.path as osp import tempfile -import warnings from collections import OrderedDict from typing import Dict, List, Optional, Sequence, Union -import numpy as np -from .coco_detection import COCODetection from mmeval.fileio import get_local_path +from .coco_detection import COCODetection try: from lvis import LVIS, LVISEval, LVISResults @@ -24,9 +22,7 @@ class LVISDetection(COCODetection): detection and instance segmentation. Args: - ann_file (str, optional): Path to the coco lvis format annotation file. - If not specified, ground truth annotations from the dataset will - be converted to coco lvis format. Defaults to None. + ann_file (str, optional): Path to the LVIS dataset annotation file. metric (str | List[str]): Metrics to be evaluated. Valid metrics include 'bbox', 'segm', and 'proposal'. Defaults to 'bbox'. iou_thrs (float | List[float], optional): IoU threshold to compute AP @@ -34,8 +30,8 @@ class LVISDetection(COCODetection): Defaults to None. classwise (bool): Whether to return the computed results of each class. Defaults to False. - proposal_nums (Sequence[int]): Numbers of proposals to be evaluated. - Defaults to (100, 300, 1000). + proposal_nums (int): Numbers of proposals to be evaluated. + Defaults to 300. metric_items (List[str], optional): Metric result names to be recorded in the evaluation result. Defaults to None. format_only (bool): Format the output results without perform @@ -59,10 +55,11 @@ class LVISDetection(COCODetection): >>> >>> num_classes = 4 >>> fake_dataset_metas = { - ... 'CLASSES': tuple([str(i) for i in range(num_classes)]) + ... 'classes': tuple([str(i) for i in range(num_classes)]) ... } >>> >>> lvis_det_metric = LVISDetection( + ... ann_file='data/lvis_v1/annotations/lvis_v1_train.json' ... dataset_meta=fake_dataset_metas, ... metric=['bbox', 'segm'] ... ) @@ -135,10 +132,10 @@ class LVISDetection(COCODetection): """ def __init__(self, - ann_file: Optional[str] = None, + ann_file: str, metric: Union[str, List[str]] = 'bbox', classwise: bool = False, - proposal_nums: Sequence[int] = (100, 300, 1000), + proposal_nums: int = 300, iou_thrs: Optional[Union[float, Sequence[float]]] = None, metric_items: Optional[Sequence[str]] = None, format_only: bool = False, @@ -150,33 +147,19 @@ def __init__(self, 'Package lvis is not installed. Please run "pip install ' 'git+https://github.com/lvis-dataset/lvis-api.git".') super().__init__( - metric=metric, + metric=metric, classwise=classwise, - proposal_nums=proposal_nums, iou_thrs=iou_thrs, metric_items=metric_items, format_only=format_only, outfile_prefix=outfile_prefix, backend_args=backend_args, **kwargs) - # if ann_file is not specified, - # initialize lvis api with the converted dataset - self._lvis_api: Optional[LVIS] # type: ignore - if ann_file is not None: - with get_local_path( - filepath=ann_file, - backend_args=backend_args) as local_path: - self._lvis_api = LVIS(local_path) - else: - self._lvis_api = None + self.proposal_nums = proposal_nums # type: ignore - # @property - # def _coco_api(self): - # return self._lvis_api - - # @_coco_api.setter - # def _coco_api(self, _coco_api): - # self._lvis_api = _coco_api + with get_local_path( + filepath=ann_file, backend_args=backend_args) as local_path: + self._lvis_api = LVIS(local_path) def add_predictions(self, predictions: Sequence[Dict]) -> None: """Add predictions only. @@ -214,7 +197,7 @@ def __call__(self, *args, **kwargs) -> Dict: return metric_result def compute_metric(self, results: list) -> Dict[str, float]: - """Compute the COCO metrics. + """Compute the LVIS metrics. Args: results (List[tuple]): A list of tuple. Each tuple is the @@ -231,17 +214,10 @@ def compute_metric(self, results: list) -> Dict[str, float]: outfile_prefix = osp.join(tmp_dir.name, 'results') else: outfile_prefix = self.outfile_prefix - + # split gt and prediction list preds, gts = zip(*results) - if self._lvis_api is None: - # use converted gt json file to initialize coco api - print('Converting ground truth to coco lvis format...') - coco_json_path = self.gt_to_coco_json( - gt_dicts=gts, outfile_prefix=outfile_prefix) - self._lvis_api = LVIS(coco_json_path) - # handle lazy init if len(self.cat_ids) == 0: self.cat_ids = self._lvis_api.get_cat_ids() @@ -254,7 +230,7 @@ def compute_metric(self, results: list) -> Dict[str, float]: eval_results: OrderedDict = OrderedDict() if self.format_only: print('results are saved in ' - f'{osp.dirname(outfile_prefix)}') + f'{osp.dirname(outfile_prefix)}') return eval_results lvis_gt = self._lvis_api @@ -265,8 +241,7 @@ def compute_metric(self, results: list) -> Dict[str, float]: try: lvis_dt = LVISResults(lvis_gt, result_files[metric]) except IndexError: - print( - 'The testing results of the whole dataset is empty.') + print('The testing results of the whole dataset is empty.') break iou_type = 'bbox' if metric == 'proposal' else metric @@ -274,16 +249,21 @@ def compute_metric(self, results: list) -> Dict[str, float]: lvis_eval.params.imgIds = self.img_ids metric_items = self.metric_items if metric == 'proposal': - lvis_eval.params.useCats = 0 - lvis_eval.params.maxDets = list(self.proposal_nums) + lvis_eval.params.use_cats = 0 + lvis_eval.params.max_dets = self.proposal_nums lvis_eval.evaluate() lvis_eval.accumulate() lvis_eval.summarize() if metric_items is None: - metric_items = ['AR@300', 'ARs@300', 'ARm@300', 'ARl@300'] + metric_items = [ + f'AR@{self.proposal_nums}', + f'ARs@{self.proposal_nums}', + f'ARm@{self.proposal_nums}', + f'ARl@{self.proposal_nums}' + ] for k, v in lvis_eval.get_results().items(): if k in metric_items: - val = float('{:.3f}'.format(float(v))) + val = float(f'{float(v):.3f}') eval_results[k] = val else: @@ -291,6 +271,22 @@ def compute_metric(self, results: list) -> Dict[str, float]: lvis_eval.accumulate() lvis_eval.summarize() lvis_results = lvis_eval.get_results() + + if metric_items is None: + metric_items = [ + 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'APr', + 'APc', 'APf' + ] + + results_list = [] + for metric_item, v in lvis_results.items(): + if metric_item in metric_items: + key = f'{metric}_{metric_item}' + val = float(v) + results_list.append(f'{round(val * 100, 2)}') + eval_results[key] = val + eval_results[f'{metric}_result'] = results_list + if self.classwise: # Compute per-category AP # Compute per-category AP # from https://github.com/facebookresearch/detectron2/ @@ -312,25 +308,11 @@ def compute_metric(self, results: list) -> Dict[str, float]: else: ap = float('nan') results_per_category.append( - (f'{nm["name"]}', f'{float(ap):0.3f}')) - eval_results[f'{metric}_{nm["name"]}_precision'] = round(ap, 3) - + (f'{nm["name"]}', f'{round(ap * 100, 2)}')) + eval_results[f'{metric}_{nm["name"]}_precision'] = ap + eval_results[f'{metric}_classwise_result'] = \ results_per_category - if metric_items is None: - metric_items = [ - 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'APr', - 'APc', 'APf' - ] - - results_list = [] - for metric_item, v in lvis_results.items(): - if metric_item in metric_items: - key = f'{metric}_{metric_item}' - val = float('{:.3f}'.format(float(v))) - results_list.append(f'{round(v, 3) * 100:.1f}') - eval_results[key] = val - eval_results[f'{metric}_result'] = results_list lvis_eval.print_results() if tmp_dir is not None: diff --git a/requirements/optional.txt b/requirements/optional.txt index 4ffd33e6..d45d5679 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -1,5 +1,5 @@ +git+https://github.com/lvis-dataset/lvis-api.git opencv-python!=4.5.5.62,!=4.5.5.64 pycocotools scipy shapely -git+https://github.com/lvis-dataset/lvis-api.git diff --git a/tests/test_metrics/test_lvis_detection_metric.py b/tests/test_metrics/test_lvis_detection_metric.py index b221460c..afc5272b 100644 --- a/tests/test_metrics/test_lvis_detection_metric.py +++ b/tests/test_metrics/test_lvis_detection_metric.py @@ -199,7 +199,7 @@ def _gen_groundtruth(num_gt=10, 'metric_items': ['AP', 'AP50'] }, { - 'proposal_nums': [10, 30, 100] + 'proposal_nums': 30 }, ]) def test_box_metric_interface(metric_kwargs): @@ -215,13 +215,16 @@ def test_box_metric_interface(metric_kwargs): fake_dataset_metas = { 'classes': tuple([str(i) for i in range(num_classes)]) } - coco_det_metric = LVISDetection(ann_file=fake_json_file, - metric=metric, dataset_meta=fake_dataset_metas, **metric_kwargs) + coco_det_metric = LVISDetection( + ann_file=fake_json_file, + metric=metric, + dataset_meta=fake_dataset_metas, + **metric_kwargs) assert isinstance(coco_det_metric, BaseMetric) - metric_results = coco_det_metric(predictions=[_create_dummy_results()], + metric_results = coco_det_metric( + predictions=[_create_dummy_results()], groundtruths=[dict()], - ) assert isinstance(metric_results, dict) assert 'bbox_AP' in metric_results @@ -243,7 +246,7 @@ def test_box_metric_interface(metric_kwargs): 'metric_items': ['AP', 'AP50'] }, { - 'proposal_nums': [10, 30, 100] + 'proposal_nums': 30 }, ]) def test_segm_metric_interface(metric_kwargs): @@ -259,8 +262,11 @@ def test_segm_metric_interface(metric_kwargs): fake_dataset_metas = { 'classes': tuple([str(i) for i in range(num_classes)]) } - coco_det_metric = LVISDetection(ann_file=fake_json_file, - metric=metric, dataset_meta=fake_dataset_metas, **metric_kwargs) + coco_det_metric = LVISDetection( + ann_file=fake_json_file, + metric=metric, + dataset_meta=fake_dataset_metas, + **metric_kwargs) assert isinstance(coco_det_metric, BaseMetric) metric_results = coco_det_metric( @@ -269,26 +275,34 @@ def test_segm_metric_interface(metric_kwargs): ) assert isinstance(metric_results, dict) assert 'segm_AP' in metric_results + tmp_dir.cleanup() @pytest.mark.skipif( coco_wrapper is None, reason='coco_wrapper is not available!') def test_metric_invalid_usage(): + tmp_dir = tempfile.TemporaryDirectory() + + # create dummy data + fake_json_file = osp.join(tmp_dir.name, 'fake_data.json') + _create_dummy_coco_json(fake_json_file) + with pytest.raises(KeyError): - LVISDetection(metric='xxx') + LVISDetection(ann_file=fake_json_file, metric='xxx') with pytest.raises(TypeError): - LVISDetection(iou_thrs=1) + LVISDetection(ann_file=fake_json_file, iou_thrs=1) with pytest.raises(AssertionError): - LVISDetection(format_only=True) + LVISDetection(ann_file=fake_json_file, format_only=True) num_classes = 2 # Avoid some potential error fake_dataset_metas = { 'classes': tuple([str(i) for i in range(num_classes)]) } - coco_det_metric = LVISDetection(dataset_meta=fake_dataset_metas) + coco_det_metric = LVISDetection( + ann_file=fake_json_file, dataset_meta=fake_dataset_metas) with pytest.raises(KeyError): prediction = _gen_prediction(num_classes=num_classes) @@ -300,6 +314,7 @@ def test_metric_invalid_usage(): prediction = _gen_prediction(num_classes=num_classes) groundtruth = _gen_groundtruth(num_classes=num_classes) coco_det_metric(prediction, groundtruth) + tmp_dir.cleanup() @pytest.mark.skipif( @@ -333,7 +348,8 @@ def test_compute_metric(): } eval_results.pop('bbox_result') - assert eval_results == target + results = {k: round(v, 4) for k, v in eval_results.items()} + assert results == target assert osp.isfile(osp.join(tmp_dir.name, 'test.bbox.json')) # test box and segm coco dataset evaluation @@ -366,7 +382,8 @@ def test_compute_metric(): } eval_results.pop('bbox_result') eval_results.pop('segm_result') - assert eval_results == target + results = {k: round(v, 4) for k, v in eval_results.items()} + assert results == target assert osp.isfile(osp.join(tmp_dir.name, 'test.bbox.json')) assert osp.isfile(osp.join(tmp_dir.name, 'test.segm.json')) From 0164baa67483c032a33c81f37655485b2490bc6a Mon Sep 17 00:00:00 2001 From: RangiLyu Date: Thu, 23 Feb 2023 14:13:29 +0800 Subject: [PATCH 04/13] docstring --- mmeval/metrics/lvis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmeval/metrics/lvis.py b/mmeval/metrics/lvis.py index 7ea39b74..e4d253fa 100644 --- a/mmeval/metrics/lvis.py +++ b/mmeval/metrics/lvis.py @@ -22,7 +22,7 @@ class LVISDetection(COCODetection): detection and instance segmentation. Args: - ann_file (str, optional): Path to the LVIS dataset annotation file. + ann_file (str): Path to the LVIS dataset annotation file. metric (str | List[str]): Metrics to be evaluated. Valid metrics include 'bbox', 'segm', and 'proposal'. Defaults to 'bbox'. iou_thrs (float | List[float], optional): IoU threshold to compute AP From bbff8b4bf3d2f4d5312e978c595aeaba9a682480 Mon Sep 17 00:00:00 2001 From: RangiLyu Date: Tue, 28 Feb 2023 13:46:16 +0800 Subject: [PATCH 05/13] update --- mmeval/metrics/lvis.py | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/mmeval/metrics/lvis.py b/mmeval/metrics/lvis.py index e4d253fa..494c9650 100644 --- a/mmeval/metrics/lvis.py +++ b/mmeval/metrics/lvis.py @@ -1,8 +1,12 @@ # Copyright (c) OpenMMLab. All rights reserved. +import contextlib +import io +import logging import numpy as np import os.path as osp import tempfile from collections import OrderedDict +from logging import Logger from typing import Dict, List, Optional, Sequence, Union from mmeval.fileio import get_local_path @@ -141,6 +145,7 @@ def __init__(self, format_only: bool = False, outfile_prefix: Optional[str] = None, backend_args: Optional[dict] = None, + logger: Optional[Logger] = None, **kwargs) -> None: if not HAS_LVISAPI: raise RuntimeError( @@ -161,6 +166,8 @@ def __init__(self, filepath=ann_file, backend_args=backend_args) as local_path: self._lvis_api = LVIS(local_path) + self.logger = logging.getLogger(__name__) if logger is None else logger + def add_predictions(self, predictions: Sequence[Dict]) -> None: """Add predictions only. @@ -196,7 +203,7 @@ def __call__(self, *args, **kwargs) -> Dict: return metric_result - def compute_metric(self, results: list) -> Dict[str, float]: + def compute_metric(self, results: list) -> Dict[str, Union[float, list]]: """Compute the LVIS metrics. Args: @@ -229,19 +236,20 @@ def compute_metric(self, results: list) -> Dict[str, float]: eval_results: OrderedDict = OrderedDict() if self.format_only: - print('results are saved in ' - f'{osp.dirname(outfile_prefix)}') + self.logger.info('results are saved in ' + f'{osp.dirname(outfile_prefix)}') return eval_results lvis_gt = self._lvis_api for metric in self.metrics: - print(f'Evaluating {metric}...') + self.logger.info(f'Evaluating {metric}...') try: lvis_dt = LVISResults(lvis_gt, result_files[metric]) except IndexError: - print('The testing results of the whole dataset is empty.') + self.logger.warning( + 'The testing results of the whole dataset is empty.') break iou_type = 'bbox' if metric == 'proposal' else metric @@ -313,8 +321,11 @@ def compute_metric(self, results: list) -> Dict[str, float]: eval_results[f'{metric}_classwise_result'] = \ results_per_category - - lvis_eval.print_results() + # Save coco summarize print information to logger + redirect_string = io.StringIO() + with contextlib.redirect_stdout(redirect_string): + lvis_eval.print_results() + self.logger.info('\n' + redirect_string.getvalue()) if tmp_dir is not None: tmp_dir.cleanup() return eval_results From 1fae16516014c2fc276a6c0f6454761b362ba3aa Mon Sep 17 00:00:00 2001 From: RangiLyu Date: Fri, 3 Mar 2023 11:00:00 +0800 Subject: [PATCH 06/13] resolve comments --- mmeval/metrics/__init__.py | 2 +- mmeval/metrics/{lvis.py => lvis_detection.py} | 134 +++++++----------- .../test_lvis_detection_metric.py | 48 +++---- 3 files changed, 71 insertions(+), 113 deletions(-) rename mmeval/metrics/{lvis.py => lvis_detection.py} (71%) diff --git a/mmeval/metrics/__init__.py b/mmeval/metrics/__init__.py index 2fcc67a8..60c519ac 100644 --- a/mmeval/metrics/__init__.py +++ b/mmeval/metrics/__init__.py @@ -15,7 +15,7 @@ from .keypoint_auc import KeypointAUC from .keypoint_epe import KeypointEndPointError from .keypoint_nme import KeypointNME -from .lvis import LVISDetection +from .lvis_detection import LVISDetection from .mae import MeanAbsoluteError from .matting_mse import MattingMeanSquaredError from .mean_iou import MeanIoU diff --git a/mmeval/metrics/lvis.py b/mmeval/metrics/lvis_detection.py similarity index 71% rename from mmeval/metrics/lvis.py rename to mmeval/metrics/lvis_detection.py index 494c9650..0574e37f 100644 --- a/mmeval/metrics/lvis.py +++ b/mmeval/metrics/lvis_detection.py @@ -22,7 +22,7 @@ class LVISDetection(COCODetection): """LVIS evaluation metric. - Evaluate AR, AP, and mAP for detection tasks including proposal/box + Evaluate AR, AP for detection tasks on LVIS dataset including proposal/box detection and instance segmentation. Args: @@ -32,13 +32,14 @@ class LVISDetection(COCODetection): iou_thrs (float | List[float], optional): IoU threshold to compute AP and AR. If not specified, IoUs from 0.5 to 0.95 will be used. Defaults to None. - classwise (bool): Whether to return the computed results of each + classwise (bool): Whether to return the computed results of each class. Defaults to False. proposal_nums (int): Numbers of proposals to be evaluated. Defaults to 300. metric_items (List[str], optional): Metric result names to be - recorded in the evaluation result. Defaults to None. - format_only (bool): Format the output results without perform + recorded in the evaluation result. If None, default configurations + in LVIS will be used.Defaults to None. + format_only (bool): Format the output results without performing evaluation. It is useful when you want to format the result to a specific format and submit it to the test server. Defaults to False. @@ -67,72 +68,9 @@ class LVISDetection(COCODetection): ... dataset_meta=fake_dataset_metas, ... metric=['bbox', 'segm'] ... ) - >>> def _gen_bboxes(num_bboxes, img_w=256, img_h=256): - ... # random generate bounding boxes in 'xyxy' formart. - ... x = np.random.rand(num_bboxes, ) * img_w - ... y = np.random.rand(num_bboxes, ) * img_h - ... w = np.random.rand(num_bboxes, ) * (img_w - x) - ... h = np.random.rand(num_bboxes, ) * (img_h - y) - ... return np.stack([x, y, x + w, y + h], axis=1) - >>> - >>> def _gen_masks(bboxes, img_w=256, img_h=256): - ... if mask_util is None: - ... raise ImportError( - ... 'Please try to install official pycocotools by ' - ... '"pip install pycocotools"') - ... masks = [] - ... for i, bbox in enumerate(bboxes): - ... mask = np.zeros((img_h, img_w)) - ... bbox = bbox.astype(np.int32) - ... box_mask = (np.random.rand( - ... bbox[3] - bbox[1], - ... bbox[2] - bbox[0]) > 0.3).astype(np.int32) - ... mask[bbox[1]:bbox[3], bbox[0]:bbox[2]] = box_mask - ... masks.append( - ... mask_util.encode( - ... np.array(mask[:, :, np.newaxis], order='F', - ... dtype='uint8'))[0]) # encoded with RLE - ... return masks - >>> - >>> img_id = 1 - >>> img_w, img_h = 256, 256 - >>> num_bboxes = 10 - >>> pred_boxes = _gen_bboxes( - ... num_bboxes=num_bboxes, - ... img_w=img_w, - ... img_h=img_h) - >>> pred_masks = _gen_masks( - ... bboxes=pred_boxes, - ... img_w=img_w, - ... img_h=img_h) - >>> prediction = { - ... 'img_id': img_id, - ... 'bboxes': pred_boxes, - ... 'scores': np.random.rand(num_bboxes, ), - ... 'labels': np.random.randint(0, num_classes, size=(num_bboxes, )), - ... 'masks': pred_masks - ... } - >>> gt_boxes = _gen_bboxes( - ... num_bboxes=num_bboxes, - ... img_w=img_w, - ... img_h=img_h) - >>> gt_masks = _gen_masks( - ... bboxes=pred_boxes, - ... img_w=img_w, - ... img_h=img_h) - >>> groundtruth = { - ... 'img_id': img_id, - ... 'width': img_w, - ... 'height': img_h, - ... 'bboxes': gt_boxes, - ... 'labels': np.random.randint(0, num_classes, size=(num_bboxes, )), - ... 'masks': gt_masks, - ... 'ignore_flags': np.zeros(num_bboxes) - ... } - >>> lvis_det_metric(predictions=[prediction, ], groundtruths=[groundtruth, ]) # doctest: +ELLIPSIS # noqa: E501 - {'bbox_mAP': ..., 'bbox_mAP_50': ..., ..., - 'segm_mAP': ..., 'segm_mAP_50': ..., ..., - 'bbox_result': ..., 'segm_result': ..., ...} + >>> lvis_det_metric(predictions=predictions) # doctest: +ELLIPSIS # noqa: E501 + {'bbox_AP': ..., 'bbox_AP50': ..., ..., + 'segm_AP': ..., 'segm_AP50': ..., ...,} """ def __init__(self, @@ -169,18 +107,49 @@ def __init__(self, self.logger = logging.getLogger(__name__) if logger is None else logger def add_predictions(self, predictions: Sequence[Dict]) -> None: - """Add predictions only. + """Add predictions to `self._results`. - If the `ann_file` has been passed, we can add predictions only. + Args: + predictions (Sequence[dict]): A sequence of dict. Each dict + representing a detection result for an image, with the + following keys: + + - img_id (int): Image id. + - bboxes (numpy.ndarray): Shape (N, 4), the predicted + bounding bboxes of this image, in 'xyxy' foramrt. + - scores (numpy.ndarray): Shape (N, ), the predicted scores + of bounding boxes. + - labels (numpy.ndarray): Shape (N, ), the predicted labels + of bounding boxes. + - masks (list[RLE], optional): The predicted masks. + - mask_scores (np.array, optional): Shape (N, ), the predicted + scores of masks. + """ + self.add(predictions) + + def add(self, predictions: Sequence[Dict]) -> None: # type: ignore # yapf: disable # noqa: E501 + """Add the intermediate results to `self._results`. Args: - predictions (Sequence[dict]): Refer to - :class:`LVISDetection.add`. + predictions (Sequence[dict]): A sequence of dict. Each dict + representing a detection result for an image, with the + following keys: + + - img_id (int): Image id. + - bboxes (numpy.ndarray): Shape (N, 4), the predicted + bounding bboxes of this image, in 'xyxy' foramrt. + - scores (numpy.ndarray): Shape (N, ), the predicted scores + of bounding boxes. + - labels (numpy.ndarray): Shape (N, ), the predicted labels + of bounding boxes. + - masks (list[RLE], optional): The predicted masks. + - mask_scores (np.array, optional): Shape (N, ), the predicted + scores of masks. """ - assert self._lvis_api is not None, 'The `ann_file` should be ' \ - 'passesd when use the `LVISDetection.add_predictions` ' \ - 'method, otherwisw use the `LVISDetection.add` instead!' - self.add(predictions, groundtruths=[{}] * len(predictions)) + for prediction in predictions: + assert isinstance(prediction, dict), 'The prediciton should be ' \ + f'a sequence of dict, but got a sequence of {type(prediction)}.' # noqa: E501 + self._results.append(prediction) def __call__(self, *args, **kwargs) -> Dict: """Stateless call for a metric compute.""" @@ -203,7 +172,7 @@ def __call__(self, *args, **kwargs) -> Dict: return metric_result - def compute_metric(self, results: list) -> Dict[str, Union[float, list]]: + def compute_metric(self, results: list) -> Dict[str, Union[float, list]]: # type: ignore """Compute the LVIS metrics. Args: @@ -222,9 +191,6 @@ def compute_metric(self, results: list) -> Dict[str, Union[float, list]]: else: outfile_prefix = self.outfile_prefix - # split gt and prediction list - preds, gts = zip(*results) - # handle lazy init if len(self.cat_ids) == 0: self.cat_ids = self._lvis_api.get_cat_ids() @@ -232,7 +198,7 @@ def compute_metric(self, results: list) -> Dict[str, Union[float, list]]: self.img_ids = self._lvis_api.get_img_ids() # convert predictions to coco format and dump to json file - result_files = self.results2json(preds, outfile_prefix) + result_files = self.results2json(results, outfile_prefix) eval_results: OrderedDict = OrderedDict() if self.format_only: @@ -321,7 +287,7 @@ def compute_metric(self, results: list) -> Dict[str, Union[float, list]]: eval_results[f'{metric}_classwise_result'] = \ results_per_category - # Save coco summarize print information to logger + # Save lvis summarize print information to logger redirect_string = io.StringIO() with contextlib.redirect_stdout(redirect_string): lvis_eval.print_results() diff --git a/tests/test_metrics/test_lvis_detection_metric.py b/tests/test_metrics/test_lvis_detection_metric.py index afc5272b..7214bc6c 100644 --- a/tests/test_metrics/test_lvis_detection_metric.py +++ b/tests/test_metrics/test_lvis_detection_metric.py @@ -12,7 +12,7 @@ coco_wrapper = try_import('mmeval.metrics.utils.coco_wrapper') -def _create_dummy_coco_json(json_name): +def create_dummy_lvis_json(json_name): dummy_mask = np.zeros((10, 10), order='F', dtype=np.uint8) dummy_mask[:5, :5] = 1 rle_mask = coco_wrapper.mask_util.encode(dummy_mask) @@ -207,7 +207,7 @@ def test_box_metric_interface(metric_kwargs): # create dummy data fake_json_file = osp.join(tmp_dir.name, 'fake_data.json') - _create_dummy_coco_json(fake_json_file) + create_dummy_lvis_json(fake_json_file) num_classes = 2 metric = ['bbox'] @@ -215,17 +215,14 @@ def test_box_metric_interface(metric_kwargs): fake_dataset_metas = { 'classes': tuple([str(i) for i in range(num_classes)]) } - coco_det_metric = LVISDetection( + lvis_det_metric = LVISDetection( ann_file=fake_json_file, metric=metric, dataset_meta=fake_dataset_metas, **metric_kwargs) - assert isinstance(coco_det_metric, BaseMetric) + assert isinstance(lvis_det_metric, BaseMetric) - metric_results = coco_det_metric( - predictions=[_create_dummy_results()], - groundtruths=[dict()], - ) + metric_results = lvis_det_metric(predictions=[_create_dummy_results()]) assert isinstance(metric_results, dict) assert 'bbox_AP' in metric_results @@ -254,7 +251,7 @@ def test_segm_metric_interface(metric_kwargs): # create dummy data fake_json_file = osp.join(tmp_dir.name, 'fake_data.json') - _create_dummy_coco_json(fake_json_file) + create_dummy_lvis_json(fake_json_file) num_classes = 2 metric = ['segm'] @@ -262,17 +259,14 @@ def test_segm_metric_interface(metric_kwargs): fake_dataset_metas = { 'classes': tuple([str(i) for i in range(num_classes)]) } - coco_det_metric = LVISDetection( + lvis_det_metric = LVISDetection( ann_file=fake_json_file, metric=metric, dataset_meta=fake_dataset_metas, **metric_kwargs) - assert isinstance(coco_det_metric, BaseMetric) + assert isinstance(lvis_det_metric, BaseMetric) - metric_results = coco_det_metric( - predictions=[_create_dummy_results()], - groundtruths=[dict()], - ) + metric_results = lvis_det_metric(predictions=[_create_dummy_results()]) assert isinstance(metric_results, dict) assert 'segm_AP' in metric_results tmp_dir.cleanup() @@ -285,7 +279,7 @@ def test_metric_invalid_usage(): # create dummy data fake_json_file = osp.join(tmp_dir.name, 'fake_data.json') - _create_dummy_coco_json(fake_json_file) + create_dummy_lvis_json(fake_json_file) with pytest.raises(KeyError): LVISDetection(ann_file=fake_json_file, metric='xxx') @@ -301,19 +295,17 @@ def test_metric_invalid_usage(): fake_dataset_metas = { 'classes': tuple([str(i) for i in range(num_classes)]) } - coco_det_metric = LVISDetection( + lvis_det_metric = LVISDetection( ann_file=fake_json_file, dataset_meta=fake_dataset_metas) with pytest.raises(KeyError): prediction = _gen_prediction(num_classes=num_classes) - groundtruth = _gen_groundtruth(num_classes=num_classes) del prediction['bboxes'] - coco_det_metric([prediction], [groundtruth]) + lvis_det_metric([prediction]) with pytest.raises(AssertionError): prediction = _gen_prediction(num_classes=num_classes) - groundtruth = _gen_groundtruth(num_classes=num_classes) - coco_det_metric(prediction, groundtruth) + lvis_det_metric(prediction) tmp_dir.cleanup() @@ -324,17 +316,17 @@ def test_compute_metric(): # create dummy data fake_json_file = osp.join(tmp_dir.name, 'fake_data.json') - _create_dummy_coco_json(fake_json_file) + create_dummy_lvis_json(fake_json_file) dummy_pred = _create_dummy_results() fake_dataset_metas = dict(classes=['car', 'bicycle']) # test single coco dataset evaluation - coco_det_metric = LVISDetection( + lvis_det_metric = LVISDetection( ann_file=fake_json_file, classwise=False, outfile_prefix=f'{tmp_dir.name}/test', dataset_meta=fake_dataset_metas) - eval_results = coco_det_metric([dummy_pred], [dict()]) + eval_results = lvis_det_metric([dummy_pred]) target = { 'bbox_AP': 1.0, 'bbox_AP50': 1.0, @@ -353,13 +345,13 @@ def test_compute_metric(): assert osp.isfile(osp.join(tmp_dir.name, 'test.bbox.json')) # test box and segm coco dataset evaluation - coco_det_metric = LVISDetection( + lvis_det_metric = LVISDetection( ann_file=fake_json_file, classwise=False, metric=['bbox', 'segm'], outfile_prefix=f'{tmp_dir.name}/test', dataset_meta=fake_dataset_metas) - eval_results = coco_det_metric([dummy_pred], [dict()]) + eval_results = lvis_det_metric([dummy_pred]) target = { 'bbox_AP': 1.0, 'bbox_AP50': 1.0, @@ -388,13 +380,13 @@ def test_compute_metric(): assert osp.isfile(osp.join(tmp_dir.name, 'test.segm.json')) # test format only evaluation - coco_det_metric = LVISDetection( + lvis_det_metric = LVISDetection( ann_file=fake_json_file, classwise=False, format_only=True, outfile_prefix=f'{tmp_dir.name}/test', dataset_meta=fake_dataset_metas) - eval_results = coco_det_metric([dummy_pred], [dict()]) + eval_results = lvis_det_metric([dummy_pred]) assert osp.exists(f'{tmp_dir.name}/test.bbox.json') assert eval_results == dict() tmp_dir.cleanup() From d95c9faa44394540fae9e614dfd96c0c347e55fe Mon Sep 17 00:00:00 2001 From: RangiLyu Date: Fri, 3 Mar 2023 11:12:14 +0800 Subject: [PATCH 07/13] lint --- mmeval/metrics/lvis_detection.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mmeval/metrics/lvis_detection.py b/mmeval/metrics/lvis_detection.py index 0574e37f..bca24c3c 100644 --- a/mmeval/metrics/lvis_detection.py +++ b/mmeval/metrics/lvis_detection.py @@ -37,7 +37,7 @@ class LVISDetection(COCODetection): proposal_nums (int): Numbers of proposals to be evaluated. Defaults to 300. metric_items (List[str], optional): Metric result names to be - recorded in the evaluation result. If None, default configurations + recorded in the evaluation result. If None, default configurations in LVIS will be used.Defaults to None. format_only (bool): Format the output results without performing evaluation. It is useful when you want to format the result @@ -172,7 +172,8 @@ def __call__(self, *args, **kwargs) -> Dict: return metric_result - def compute_metric(self, results: list) -> Dict[str, Union[float, list]]: # type: ignore + def compute_metric( # type: ignore + self, results: list) -> Dict[str, Union[float, list]]: """Compute the LVIS metrics. Args: From 5bef53e05843e2450339399947c6c00f24080ad9 Mon Sep 17 00:00:00 2001 From: RangiLyu Date: Fri, 3 Mar 2023 12:58:59 +0800 Subject: [PATCH 08/13] use cat --- mmeval/metrics/lvis_detection.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mmeval/metrics/lvis_detection.py b/mmeval/metrics/lvis_detection.py index bca24c3c..7e13c243 100644 --- a/mmeval/metrics/lvis_detection.py +++ b/mmeval/metrics/lvis_detection.py @@ -224,7 +224,6 @@ def compute_metric( # type: ignore lvis_eval.params.imgIds = self.img_ids metric_items = self.metric_items if metric == 'proposal': - lvis_eval.params.use_cats = 0 lvis_eval.params.max_dets = self.proposal_nums lvis_eval.evaluate() lvis_eval.accumulate() From cafe17a3a0c5a602bf55b56a5d4cf26217fadf0e Mon Sep 17 00:00:00 2001 From: RangiLyu Date: Mon, 13 Mar 2023 19:31:40 +0800 Subject: [PATCH 09/13] update --- mmeval/metrics/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mmeval/metrics/__init__.py b/mmeval/metrics/__init__.py index 10171393..58166ec2 100644 --- a/mmeval/metrics/__init__.py +++ b/mmeval/metrics/__init__.py @@ -47,7 +47,8 @@ 'ConnectivityError', 'ROUGE', 'Perplexity', 'KeypointEndPointError', 'KeypointAUC', 'KeypointNME', 'NaturalImageQualityEvaluator', 'WordAccuracy', 'PrecisionRecallF1score', - 'SingleLabelPrecisionRecallF1score', 'MultiLabelPrecisionRecallF1score' + 'SingleLabelPrecisionRecallF1score', 'MultiLabelPrecisionRecallF1score', + 'LVISDetection' ] _deprecated_msg = ( From e2aa2c9159d8cd71f892c618fc59cbaab597e9d7 Mon Sep 17 00:00:00 2001 From: RangiLyu Date: Mon, 13 Mar 2023 19:32:47 +0800 Subject: [PATCH 10/13] requirement --- requirements/optional.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/optional.txt b/requirements/optional.txt index 4f9af194..18028250 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -1,4 +1,4 @@ -git+https://github.com/lvis-dataset/lvis-api.git +-e git+https://github.com/lvis-dataset/lvis-api.git#egg=lvis opencv-python!=4.5.5.62,!=4.5.5.64 pycocotools shapely From 5cfd78a3f9e6f204ddf29ed64a1b58c383d805ef Mon Sep 17 00:00:00 2001 From: RangiLyu Date: Mon, 13 Mar 2023 19:48:46 +0800 Subject: [PATCH 11/13] add logging --- mmeval/metrics/lvis_detection.py | 93 +++++++++++++++++++++++++++++--- 1 file changed, 85 insertions(+), 8 deletions(-) diff --git a/mmeval/metrics/lvis_detection.py b/mmeval/metrics/lvis_detection.py index 7e13c243..3599eb55 100644 --- a/mmeval/metrics/lvis_detection.py +++ b/mmeval/metrics/lvis_detection.py @@ -1,12 +1,13 @@ # Copyright (c) OpenMMLab. All rights reserved. import contextlib import io -import logging +import itertools import numpy as np import os.path as osp import tempfile from collections import OrderedDict -from logging import Logger +from rich.console import Console +from rich.table import Table from typing import Dict, List, Optional, Sequence, Union from mmeval.fileio import get_local_path @@ -48,6 +49,9 @@ class LVISDetection(COCODetection): If not specified, a temp file will be created. Defaults to None. backend_args (dict, optional): Arguments to instantiate the preifx of uri corresponding backend. Defaults to None. + logger (Logger, optional): logger used to record messages. When set to + ``None``, the default logger will be used. + Defaults to None. **kwargs: Keyword parameters passed to :class:`BaseMetric`. Examples: @@ -83,7 +87,6 @@ def __init__(self, format_only: bool = False, outfile_prefix: Optional[str] = None, backend_args: Optional[dict] = None, - logger: Optional[Logger] = None, **kwargs) -> None: if not HAS_LVISAPI: raise RuntimeError( @@ -104,8 +107,6 @@ def __init__(self, filepath=ann_file, backend_args=backend_args) as local_path: self._lvis_api = LVIS(local_path) - self.logger = logging.getLogger(__name__) if logger is None else logger - def add_predictions(self, predictions: Sequence[Dict]) -> None: """Add predictions to `self._results`. @@ -202,6 +203,7 @@ def compute_metric( # type: ignore result_files = self.results2json(results, outfile_prefix) eval_results: OrderedDict = OrderedDict() + table_results: OrderedDict = OrderedDict() if self.format_only: self.logger.info('results are saved in ' f'{osp.dirname(outfile_prefix)}') @@ -235,10 +237,13 @@ def compute_metric( # type: ignore f'ARm@{self.proposal_nums}', f'ARl@{self.proposal_nums}' ] + results_list = [] for k, v in lvis_eval.get_results().items(): if k in metric_items: - val = float(f'{float(v):.3f}') + val = float(v) + results_list.append(f'{round(val * 100, 2):0.2f}') eval_results[k] = val + table_results[f'{metric}_result'] = results_list else: lvis_eval.evaluate() @@ -259,7 +264,7 @@ def compute_metric( # type: ignore val = float(v) results_list.append(f'{round(val * 100, 2)}') eval_results[key] = val - eval_results[f'{metric}_result'] = results_list + table_results[f'{metric}_result'] = results_list if self.classwise: # Compute per-category AP # Compute per-category AP @@ -285,7 +290,7 @@ def compute_metric( # type: ignore (f'{nm["name"]}', f'{round(ap * 100, 2)}')) eval_results[f'{metric}_{nm["name"]}_precision'] = ap - eval_results[f'{metric}_classwise_result'] = \ + table_results[f'{metric}_classwise_result'] = \ results_per_category # Save lvis summarize print information to logger redirect_string = io.StringIO() @@ -294,4 +299,76 @@ def compute_metric( # type: ignore self.logger.info('\n' + redirect_string.getvalue()) if tmp_dir is not None: tmp_dir.cleanup() + # if the testing results of the whole dataset is empty, + # does not print tables. + if len(table_results) > 0: + self._print_results(table_results) return eval_results + + def _print_results(self, table_results: dict) -> None: + """Print the evaluation results table. + + Args: + table_results (dict): The computed metric. + """ + for metric in self.metrics: + result = table_results[f'{metric}_result'] + + if metric == 'proposal': + table_title = ' Recall Results (%)' + if self.metric_items is None: + assert len(result) == 4 + headers = [ + f'AR@{self.proposal_nums}', + f'ARs@{self.proposal_nums}', + f'ARm@{self.proposal_nums}', + f'ARl@{self.proposal_nums}' + ] + else: + assert len(result) == len(self.metric_items) # type: ignore # yapf: disable # noqa: E501 + headers = self.metric_items # type: ignore + else: + table_title = f' {metric} Results (%)' + if self.metric_items is None: + assert len(result) == 6 + headers = [ + f'{metric}_AP', f'{metric}_AP50', f'{metric}_AP75', + f'{metric}_APs', f'{metric}_APm', f'{metric}_APl', + f'{metric}_APr', f'{metric}_APc', f'{metric}_APf' + ] + else: + assert len(result) == len(self.metric_items) + headers = [ + f'{metric}_{item}' for item in self.metric_items + ] + table = Table(title=table_title) + console = Console() + for name in headers: + table.add_column(name, justify='left') + table.add_row(*result) + with console.capture() as capture: + console.print(table, end='') + self.logger.info('\n' + capture.get()) + + if self.classwise and metric != 'proposal': + self.logger.info( + f'Evaluating {metric} metric of each category...') + classwise_table_title = f' {metric} Classwise Results (%)' + classwise_result = table_results[f'{metric}_classwise_result'] + + num_columns = min(6, len(classwise_result) * 2) + results_flatten = list(itertools.chain(*classwise_result)) + headers = ['category', f'{metric}_AP'] * (num_columns // 2) + results_2d = itertools.zip_longest(*[ + results_flatten[i::num_columns] for i in range(num_columns) + ]) + + table = Table(title=classwise_table_title) + console = Console() + for name in headers: + table.add_column(name, justify='left') + for _result in results_2d: + table.add_row(*_result) + with console.capture() as capture: + console.print(table, end='') + self.logger.info('\n' + capture.get()) From 813da52bfe13c8724d57536d53ef60d7edf3c6d0 Mon Sep 17 00:00:00 2001 From: RangiLyu Date: Mon, 13 Mar 2023 19:51:33 +0800 Subject: [PATCH 12/13] log table --- mmeval/metrics/lvis_detection.py | 2 +- tests/test_metrics/test_lvis_detection_metric.py | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/mmeval/metrics/lvis_detection.py b/mmeval/metrics/lvis_detection.py index 3599eb55..5d61d3a4 100644 --- a/mmeval/metrics/lvis_detection.py +++ b/mmeval/metrics/lvis_detection.py @@ -330,7 +330,7 @@ def _print_results(self, table_results: dict) -> None: else: table_title = f' {metric} Results (%)' if self.metric_items is None: - assert len(result) == 6 + assert len(result) == 9 headers = [ f'{metric}_AP', f'{metric}_AP50', f'{metric}_AP75', f'{metric}_APs', f'{metric}_APm', f'{metric}_APl', diff --git a/tests/test_metrics/test_lvis_detection_metric.py b/tests/test_metrics/test_lvis_detection_metric.py index 7214bc6c..3c3a6052 100644 --- a/tests/test_metrics/test_lvis_detection_metric.py +++ b/tests/test_metrics/test_lvis_detection_metric.py @@ -339,7 +339,6 @@ def test_compute_metric(): 'bbox_APl': 1.0, } - eval_results.pop('bbox_result') results = {k: round(v, 4) for k, v in eval_results.items()} assert results == target assert osp.isfile(osp.join(tmp_dir.name, 'test.bbox.json')) @@ -372,8 +371,7 @@ def test_compute_metric(): 'segm_APm': 1.0, 'segm_APl': 1.0, } - eval_results.pop('bbox_result') - eval_results.pop('segm_result') + results = {k: round(v, 4) for k, v in eval_results.items()} assert results == target assert osp.isfile(osp.join(tmp_dir.name, 'test.bbox.json')) From a93a71ffe592182bcdb57f20b9c5019c14bd299c Mon Sep 17 00:00:00 2001 From: RangiLyu Date: Fri, 17 Mar 2023 13:43:07 +0800 Subject: [PATCH 13/13] sync print results --- mmeval/metrics/lvis_detection.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mmeval/metrics/lvis_detection.py b/mmeval/metrics/lvis_detection.py index 5d61d3a4..8e216b29 100644 --- a/mmeval/metrics/lvis_detection.py +++ b/mmeval/metrics/lvis_detection.py @@ -49,6 +49,7 @@ class LVISDetection(COCODetection): If not specified, a temp file will be created. Defaults to None. backend_args (dict, optional): Arguments to instantiate the preifx of uri corresponding backend. Defaults to None. + print_results (bool): Whether to print the results. Defaults to True. logger (Logger, optional): logger used to record messages. When set to ``None``, the default logger will be used. Defaults to None. @@ -301,7 +302,7 @@ def compute_metric( # type: ignore tmp_dir.cleanup() # if the testing results of the whole dataset is empty, # does not print tables. - if len(table_results) > 0: + if self.print_results and len(table_results) > 0: self._print_results(table_results) return eval_results