o
    .wi&                     @   s6  d dl mZmZmZmZmZmZmZ d dlZd dlm	Z	 d dl
mZmZmZmZmZ d dlmZmZmZ es8er:es=dgZ										
		d deeeef  deeeef  ded deed eed df f deee  deee  deee  dededed ded dedee	eee	f f fddZdS )!    )AnyDictListLiteralOptionalTupleUnionN)Tensor)CocoBackend_calculate_map_with_coco_get_safe_item_values_input_validator_validate_iou_type_arg)_FASTER_COCO_EVAL_AVAILABLE_PYCOCOTOOLS_AVAILABLE_TORCHVISION_AVAILABLEmean_average_precisionxyxybboxFmacropycocotoolsTpredstarget
box_format)r   xywhcxcywhiou_type)r   segm.iou_thresholdsrec_thresholdsmax_detection_thresholdsclass_metricsextended_summaryaverage)r   microbackend)r   faster_coco_evalwarn_on_many_detectionsreturnc                 C   s  |pt ddtdd  }t t j|pg dt jdd  }|p0t dd	td
d  }t|}t| ||d t	|
d}g }g }g }g }| D ].}t
||||||d\}}|durb|| |durk|| ||d  ||d  qKg }g }g }g }g }|D ]A}t
|||||\}}|dur|| |dur|| ||d  ||dt |d  ||dt |d  qt||||||||||||	|||||}dd | D S )a  Compute the mean average precision (mAP) and mean average recall (mAR) for object detection predictions.

    This function evaluates detection predictions for either bounding boxes or segmentation masks based
    on the provided ``iou_type``, comparing predictions (``preds``) and ground truth annotations (``target``)
    using a COCO-style evaluation. The expected input for each image is a dictionary with keys:

    - For bounding boxes (``iou_type="bbox"``): ``boxes``, ``scores``, and ``labels``.
    - For segmentation (``iou_type="segm"``): ``masks``, ``scores``, and ``labels``.

    In addition, ground truth dictionaries may include the optional keys ``iscrowd`` and ``area``.
    Boxes are expected in the coordinate format provided via ``box_format``, which supports:

    - ``"xyxy"``: [xmin, ymin, xmax, ymax]
    - ``"xywh"``: [xmin, ymin, width, height]
    - ``"cxcywh"``: [center_x, center_y, width, height]

    The evaluation defaults to IoU thresholds from 0.50 to 0.95 (step 0.05), recall thresholds
    from 0.00 to 1.00 (step 0.01), and maximum detection thresholds of [1, 10, 100]. These can be overridden
    by specifying ``iou_thresholds``, ``rec_thresholds``, and ``max_detection_thresholds``, respectively.
    Optionally, per-class metrics may be computed by enabling ``class_metrics``, and an extended summary
    (including IoU, precision, recall, and scores) is available via ``extended_summary``.
    The averaging method over labels can be set with ``average`` ("macro" or "micro") and the evaluation
    is performed using either the ``pycocotools`` or ``faster_coco_eval`` backend.

    Args:
        preds: List of dictionaries, each representing detection predictions for a single image.
        target: List of dictionaries, each representing ground truth annotations for a single image.
        box_format: Format of the input bounding boxes. Supported values are "xyxy", "xywh", and "cxcywh".
        iou_type: Type of IoU to compute. Can be "bbox", "segm", or a tuple containing both.
        iou_thresholds: List of IoU thresholds (default is [0.5, 0.55, ..., 0.95]).
        rec_thresholds: List of recall thresholds (default is [0.0, 0.01, ..., 1.0]).
        max_detection_thresholds: List of maximum detections per image (default is [1, 10, 100]).
        class_metrics: Whether to compute per-class mAP and mAR metrics.
        extended_summary: Whether to include additional outputs (IoU, precision, recall, scores) in the result.
        average: Averaging method over labels, either "macro" or "micro".
        backend: Backend to use for evaluation ("pycocotools" or "faster_coco_eval").
        warn_on_many_detections: If True, warn when there are an unusually large number of detections.

    Returns:
        dict: A dictionary containing the evaluation metrics. The dictionary includes the following keys:
            - ``map``: Global mean average precision over the defined IoU thresholds.
            - ``mar_{max_det}``: Global mean average recall for each maximum detection threshold.
            - ``map_per_class``: Mean average precision per observed class (or -1 if ``class_metrics`` is disabled).
            - ``mar_{max_det}_per_class``: Mean average recall per observed class for the highest detection threshold.
            - ``classes``: A tensor listing all observed classes.

    Example::

        # Example with bounding boxes
        >>> from torch import tensor
        >>> from torchmetrics.functional.detection.map import mean_average_precision
        >>> preds = [
        ...   {
        ...     "boxes": tensor([[258.0, 41.0, 606.0, 285.0]]),
        ...     "scores": tensor([0.536]),
        ...     "labels": tensor([0]),
        ...   }
        ... ]
        >>> target = [
        ...   {
        ...     "boxes": tensor([[214.0, 41.0, 562.0, 285.0]]),
        ...     "labels": tensor([0]),
        ...   }
        ... ]
        >>> result = mean_average_precision(preds, target, iou_type="bbox")
        >>> print(f"mAP: {result['map']:.4f}, mAP@0.5: {result['map_50']:.4f}")
        mAP: 0.6000, mAP@0.5: 1.0000

    Example::

        # Example with segmentation masks
        >>> import torch
        >>> from torch import tensor
        >>> from torchmetrics.functional.detection.map import mean_average_precision
        >>> mask_pred = tensor([
        ...   [0, 0, 0, 0, 0],
        ...   [0, 0, 1, 1, 0],
        ...   [0, 0, 1, 1, 0],
        ...   [0, 0, 0, 0, 0],
        ...   [0, 0, 0, 0, 0],
        ... ], dtype=torch.bool)
        >>> mask_tgt = tensor([
        ...   [0, 0, 0, 0, 0],
        ...   [0, 0, 1, 0, 0],
        ...   [0, 0, 1, 1, 0],
        ...   [0, 0, 1, 0, 0],
        ...   [0, 0, 0, 0, 0],
        ... ], dtype=torch.bool)
        >>> preds = [
        ...   {
        ...     "masks": mask_pred.unsqueeze(0),
        ...     "scores": tensor([0.536]),
        ...     "labels": tensor([0]),
        ...   }
        ... ]
        >>> target = [
        ...   {
        ...     "masks": mask_tgt.unsqueeze(0),
        ...     "labels": tensor([0]),
        ...   }
        ... ]
        >>> result = mean_average_precision(preds, target, iou_type="segm")
        >>> print(f"mAP: {result['map']:.4f}, mAP@0.5: {result['map_50']:.4f}")
        mAP: 0.2000, mAP@0.5: 1.0000

    g      ?gffffff?g!@   )r)   
   d   )dtyper   g        g      ?g      Y@)r   )r%   )warnNlabelsscoresiscrowdareac                 S   s6   i | ]\}}|t |tjr| d kr| n|qS )r)   )
isinstancetorchr	   numelsqueeze).0kv r9   b/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/torchmetrics/functional/detection/map.py
<dictcomp>   s   6 z*mean_average_precision.<locals>.<dictcomp>)r3   linspaceroundtolistsorttensorintr   r   r
   r   appendget
zeros_liker   items)r   r   r   r   r   r   r    r!   r"   r#   r%   r'   coco_backenddetection_boxdetection_labelsdetection_scoresdetection_maskitembbox_detectionmask_detectiongroundtruth_boxgroundtruth_maskgroundtruth_labelsgroundtruth_crowdsgroundtruth_areabbox_groundtruthmask_groundtruthresult_dictr9   r9   r:   r   '   sx   x






)
r   r   NNNFFr   r   T)typingr   r   r   r   r   r   r   r3   r	   torchmetrics.detection.helpersr
   r   r   r   r   torchmetrics.utilities.importsr   r   r   __doctest_skip__strlistfloatrA   boolr   r9   r9   r9   r:   <module>   sZ   $


	
