Simple eval example

[1]:
import json
import logging
import faster_coco_eval
from faster_coco_eval import COCO, COCOeval_faster

print(f"{faster_coco_eval.__version__=}")

logging.root.setLevel("INFO")
logging.debug("Запись.")
faster_coco_eval.__version__='1.6.6'
[2]:
prepared_coco_in_dict = COCO.load_json("../tests/dataset/gt_dataset.json")
prepared_anns = COCO.load_json("../tests/dataset/dt_dataset.json")
[3]:
iouType = "segm"
useCats = False
[4]:
cocoGt = COCO(prepared_coco_in_dict)
cocoDt = cocoGt.loadRes(prepared_anns)

cocoEval = COCOeval_faster(cocoGt, cocoDt, iouType, extra_calc=True)
cocoEval.params.maxDets = [len(cocoGt.anns)]

if not useCats:
    cocoEval.params.useCats = 0  # Выключение labels

cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
INFO:faster_coco_eval.core.cocoeval:Evaluate annotation type *segm*
INFO:faster_coco_eval.core.cocoeval:COCOeval_opt.evaluate() finished...
INFO:faster_coco_eval.core.cocoeval:DONE (t=0.00s).
INFO:faster_coco_eval.core.cocoeval:Accumulating evaluation results...
INFO:faster_coco_eval.core.cocoeval:COCOeval_opt.accumulate() finished...
INFO:faster_coco_eval.core.cocoeval:DONE (t=0.00s).
INFO:faster_coco_eval.core.cocoeval: Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=  9 ] = 0.783
INFO:faster_coco_eval.core.cocoeval: Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=  9 ] = 0.783
INFO:faster_coco_eval.core.cocoeval: Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=  9 ] = 0.783
INFO:faster_coco_eval.core.cocoeval: Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=  9 ] = -1.000
INFO:faster_coco_eval.core.cocoeval: Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=  9 ] = 1.000
INFO:faster_coco_eval.core.cocoeval: Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=  9 ] = 0.000
INFO:faster_coco_eval.core.cocoeval: Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  9 ] = 0.889
INFO:faster_coco_eval.core.cocoeval: Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=  9 ] = -1.000
INFO:faster_coco_eval.core.cocoeval: Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=  9 ] = 1.000
INFO:faster_coco_eval.core.cocoeval: Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=  9 ] = 0.000
INFO:faster_coco_eval.core.cocoeval: Average Recall     (AR) @[ IoU=0.50      | area=   all | maxDets=  9 ] = 0.889
INFO:faster_coco_eval.core.cocoeval: Average Recall     (AR) @[ IoU=0.75      | area=   all | maxDets=  9 ] = 0.889
[5]:
cocoEval.stats
[5]:
array([ 0.78327833,  0.78327833,  0.78327833, -1.        ,  1.        ,
        0.        ,  0.88888889,  0.        ,  0.        , -1.        ,
        1.        ,  0.        ])
[6]:
cocoEval.stats_as_dict
[6]:
{'AP_all': 0.7832783278327835,
 'AP_50': 0.7832783278327836,
 'AP_75': 0.7832783278327836,
 'AP_small': -1.0,
 'AP_medium': 1.0,
 'AP_large': 0.0,
 'AR_all': 0.888888888888889,
 'AR_second': 0.0,
 'AR_third': 0.0,
 'AR_small': -1.0,
 'AR_medium': 1.0,
 'AR_large': 0.0,
 'AR_50': 0.8888888888888888,
 'AR_75': 0.8888888888888888,
 'mIoU': 1.0,
 'mAUC_50': 0.594074074074074}
[7]:
cocoEval.extended_metrics
[7]:
{'class_map': [{'class': 'all',
   'map@50:95': 0.7832783278327835,
   'map@50': 0.7832783278327836,
   'precision': 0.8888888888888888,
   'recall': 0.88}],
 'map': 0.7832783278327836,
 'precision': 0.8888888888888888,
 'recall': 0.88}