o
    .wip[                     @   s   d dl mZ d dlmZmZmZ d dlmZ d dlm	Z	 d dl
mZ d dlmZmZmZ d dlmZ d dlmZ d d	lmZ d d
lmZ d dlmZmZ esRg dZG dd deZG dd deZG dd deZG dd deZdS )    )Sequence)AnyOptionalUnion)Tensor)Literal)_ClassificationTaskWrapper)BinaryStatScoresMulticlassStatScoresMultilabelStatScores)_accuracy_reduce)Metric)ClassificationTask)_MATPLOTLIB_AVAILABLE)_AX_TYPE_PLOT_OUT_TYPE)BinaryAccuracy.plotMulticlassAccuracy.plotMultilabelAccuracy.plotc                   @   s   e Zd ZU dZdZeed< dZeed< dZeed< dZ	e
ed< d	Ze
ed
< defddZ	ddeeeee f  dee defddZdS )BinaryAccuracya  Compute `Accuracy`_ for binary tasks.

    .. math::
        \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)

    Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.

    As input to ``forward`` and ``update`` the metric accepts the following input:

        - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating
          point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
          per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
        - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``

    As output to ``forward`` and ``compute`` the metric returns the following output:

        - ``acc`` (:class:`~torch.Tensor`): If ``multidim_average`` is set to ``global``, metric returns a scalar value.
          If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar
          value per sample.

    If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
    which the reduction will then be applied over instead of the sample dimension ``N``.

    Args:
        threshold: Threshold for transforming probability to binary {0,1} predictions
        multidim_average:
            Defines how additionally dimensions ``...`` should be handled. Should be one of the following:

            - ``global``: Additional dimensions are flatted along the batch dimension
            - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
              The statistics in this case are calculated over the additional dimensions.

        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.

    Example (preds is int tensor):
        >>> from torch import tensor
        >>> from torchmetrics.classification import BinaryAccuracy
        >>> target = tensor([0, 1, 0, 1, 0, 1])
        >>> preds = tensor([0, 0, 1, 1, 0, 1])
        >>> metric = BinaryAccuracy()
        >>> metric(preds, target)
        tensor(0.6667)

    Example (preds is float tensor):
        >>> from torchmetrics.classification import BinaryAccuracy
        >>> target = tensor([0, 1, 0, 1, 0, 1])
        >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
        >>> metric = BinaryAccuracy()
        >>> metric(preds, target)
        tensor(0.6667)

    Example (multidim tensors):
        >>> from torchmetrics.classification import BinaryAccuracy
        >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
        >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
        ...                 [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
        >>> metric = BinaryAccuracy(multidim_average='samplewise')
        >>> metric(preds, target)
        tensor([0.3333, 0.1667])

    Fis_differentiableThigher_is_betterfull_state_update        plot_lower_bound      ?plot_upper_boundreturnc                 C   s&   |   \}}}}t||||d| jdS )DCompute accuracy based on inputs passed in to ``update`` previously.binary)averagemultidim_average)_final_stater   r!   selftpfptnfn r)   a/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/torchmetrics/classification/accuracy.pycomputeh   s   zBinaryAccuracy.computeNvalaxc                 C      |  ||S )a1  Plot a single or multiple values from the metric.

        Args:
            val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
                If no value is provided, will automatically call `metric.compute` and plot that result.
            ax: An matplotlib axis object. If provided will add plot to that axis

        Returns:
            Figure object and Axes object

        Raises:
            ModuleNotFoundError:
                If `matplotlib` is not installed

        .. plot::
            :scale: 75

            >>> from torch import rand, randint
            >>> # Example plotting a single value
            >>> from torchmetrics.classification import BinaryAccuracy
            >>> metric = BinaryAccuracy()
            >>> metric.update(rand(10), randint(2,(10,)))
            >>> fig_, ax_ = metric.plot()

        .. plot::
            :scale: 75

            >>> from torch import rand, randint
            >>> # Example plotting multiple values
            >>> from torchmetrics.classification import BinaryAccuracy
            >>> metric = BinaryAccuracy()
            >>> values = [ ]
            >>> for _ in range(10):
            ...     values.append(metric(rand(10), randint(2,(10,))))
            >>> fig_, ax_ = metric.plot(values)

        _plotr$   r,   r-   r)   r)   r*   plotm      (r   NN)__name__
__module____qualname____doc__r   bool__annotations__r   r   r   floatr   r   r+   r   r   r   r   r   r2   r)   r)   r)   r*   r       s    
 Ar   c                   @      e Zd ZU dZdZeed< dZeed< dZeed< dZ	e
ed< d	Ze
ed
< dZeed< defddZ	ddeeeee f  dee defddZdS )MulticlassAccuracya  Compute `Accuracy`_ for multiclass tasks.

    .. math::
        \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)

    Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.

    As input to ``forward`` and ``update`` the metric accepts the following input:

        - ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor
          of shape ``(N, C, ..)``. If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension
          to automatically convert probabilities/logits into an int tensor.
        - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``

    As output to ``forward`` and ``compute`` the metric returns the following output:

        - ``mca`` (:class:`~torch.Tensor`): A tensor with the accuracy score whose returned shape depends on the
          ``average`` and ``multidim_average`` arguments:

            - If ``multidim_average`` is set to ``global``:

              - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
              - If ``average=None/'none'``, the shape will be ``(C,)``

            - If ``multidim_average`` is set to ``samplewise``:

              - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
              - If ``average=None/'none'``, the shape will be ``(N, C)``

    If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
    which the reduction will then be applied over instead of the sample dimension ``N``.

    Args:
        num_classes: Integer specifying the number of classes
        average:
            Defines the reduction that is applied over labels. Should be one of the following:

            - ``micro``: Sum statistics over all labels
            - ``macro``: Calculate statistics for each label and average them
            - ``weighted``: calculates statistics for each label and computes weighted average using their support
            - ``"none"`` or ``None``: calculates statistic for each label and applies no reduction

        top_k:
            Number of highest probability or logit score predictions considered to find the correct label.
            Only works when ``preds`` contain probabilities/logits.
        multidim_average:
            Defines how additionally dimensions ``...`` should be handled. Should be one of the following:

            - ``global``: Additional dimensions are flatted along the batch dimension
            - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
              The statistics in this case are calculated over the additional dimensions.

        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.

    Example (preds is int tensor):
        >>> from torch import tensor
        >>> from torchmetrics.classification import MulticlassAccuracy
        >>> target = tensor([2, 1, 0, 0])
        >>> preds = tensor([2, 1, 0, 1])
        >>> metric = MulticlassAccuracy(num_classes=3)
        >>> metric(preds, target)
        tensor(0.8333)
        >>> mca = MulticlassAccuracy(num_classes=3, average=None)
        >>> mca(preds, target)
        tensor([0.5000, 1.0000, 1.0000])

    Example (preds is float tensor):
        >>> from torchmetrics.classification import MulticlassAccuracy
        >>> target = tensor([2, 1, 0, 0])
        >>> preds = tensor([[0.16, 0.26, 0.58],
        ...                 [0.22, 0.61, 0.17],
        ...                 [0.71, 0.09, 0.20],
        ...                 [0.05, 0.82, 0.13]])
        >>> metric = MulticlassAccuracy(num_classes=3)
        >>> metric(preds, target)
        tensor(0.8333)
        >>> mca = MulticlassAccuracy(num_classes=3, average=None)
        >>> mca(preds, target)
        tensor([0.5000, 1.0000, 1.0000])

    Example (multidim tensors):
        >>> from torchmetrics.classification import MulticlassAccuracy
        >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
        >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
        >>> metric = MulticlassAccuracy(num_classes=3, multidim_average='samplewise')
        >>> metric(preds, target)
        tensor([0.5000, 0.2778])
        >>> mca = MulticlassAccuracy(num_classes=3, multidim_average='samplewise', average=None)
        >>> mca(preds, target)
        tensor([[1.0000, 0.0000, 0.5000],
                [0.0000, 0.3333, 0.5000]])

    Fr   Tr   r   r   r   r   r   Classplot_legend_namer   c              	   C   s,   |   \}}}}t||||| j| j| jdS )r   )r    r!   top_k)r"   r   r    r!   r@   r#   r)   r)   r*   r+     s   zMulticlassAccuracy.computeNr,   r-   c                 C   r.   )a  Plot a single or multiple values from the metric.

        Args:
            val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
                If no value is provided, will automatically call `metric.compute` and plot that result.
            ax: An matplotlib axis object. If provided will add plot to that axis

        Returns:
            Figure object and Axes object

        Raises:
            ModuleNotFoundError:
                If `matplotlib` is not installed

        .. plot::
            :scale: 75

            >>> from torch import randint
            >>> # Example plotting a single value per class
            >>> from torchmetrics.classification import MulticlassAccuracy
            >>> metric = MulticlassAccuracy(num_classes=3, average=None)
            >>> metric.update(randint(3, (20,)), randint(3, (20,)))
            >>> fig_, ax_ = metric.plot()

        .. plot::
            :scale: 75

            >>> from torch import randint
            >>> # Example plotting a multiple values per class
            >>> from torchmetrics.classification import MulticlassAccuracy
            >>> metric = MulticlassAccuracy(num_classes=3, average=None)
            >>> values = []
            >>> for _ in range(20):
            ...     values.append(metric(randint(3, (20,)), randint(3, (20,))))
            >>> fig_, ax_ = metric.plot(values)

        r/   r1   r)   r)   r*   r2     r3   r   r4   r5   r6   r7   r8   r   r9   r:   r   r   r   r;   r   r?   strr   r+   r   r   r   r   r   r2   r)   r)   r)   r*   r=      "   
 ar=   c                   @   r<   )MultilabelAccuracya;  Compute `Accuracy`_ for multilabel tasks.

    .. math::
        \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)

    Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating
      point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per
      element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``

    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``mla`` (:class:`~torch.Tensor`): A tensor with the accuracy score whose returned shape depends on the
      ``average`` and ``multidim_average`` arguments:

        - If ``multidim_average`` is set to ``global``:

          - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
          - If ``average=None/'none'``, the shape will be ``(C,)``

        - If ``multidim_average`` is set to ``samplewise``:

          - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
          - If ``average=None/'none'``, the shape will be ``(N, C)``

    If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
    which the reduction will then be applied over instead of the sample dimension ``N``.

    Args:
        num_labels: Integer specifying the number of labels
        threshold: Threshold for transforming probability to binary (0,1) predictions
        average:
            Defines the reduction that is applied over labels. Should be one of the following:

            - ``micro``: Sum statistics over all labels
            - ``macro``: Calculate statistics for each label and average them
            - ``weighted``: calculates statistics for each label and computes weighted average using their support
            - ``"none"`` or ``None``: calculates statistic for each label and applies no reduction

        multidim_average:
            Defines how additionally dimensions ``...`` should be handled. Should be one of the following:

            - ``global``: Additional dimensions are flatted along the batch dimension
            - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
              The statistics in this case are calculated over the additional dimensions.

        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.

    Example (preds is int tensor):
        >>> from torch import tensor
        >>> from torchmetrics.classification import MultilabelAccuracy
        >>> target = tensor([[0, 1, 0], [1, 0, 1]])
        >>> preds = tensor([[0, 0, 1], [1, 0, 1]])
        >>> metric = MultilabelAccuracy(num_labels=3)
        >>> metric(preds, target)
        tensor(0.6667)
        >>> mla = MultilabelAccuracy(num_labels=3, average=None)
        >>> mla(preds, target)
        tensor([1.0000, 0.5000, 0.5000])

    Example (preds is float tensor):
        >>> from torchmetrics.classification import MultilabelAccuracy
        >>> target = tensor([[0, 1, 0], [1, 0, 1]])
        >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
        >>> metric = MultilabelAccuracy(num_labels=3)
        >>> metric(preds, target)
        tensor(0.6667)
        >>> mla = MultilabelAccuracy(num_labels=3, average=None)
        >>> mla(preds, target)
        tensor([1.0000, 0.5000, 0.5000])

    Example (multidim tensors):
        >>> from torchmetrics.classification import MultilabelAccuracy
        >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
        >>> preds = tensor(
        ...     [
        ...         [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
        ...         [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
        ...     ]
        ... )
        >>> mla = MultilabelAccuracy(num_labels=3, multidim_average='samplewise')
        >>> mla(preds, target)
        tensor([0.3333, 0.1667])
        >>> mla = MultilabelAccuracy(num_labels=3, multidim_average='samplewise', average=None)
        >>> mla(preds, target)
        tensor([[0.5000, 0.5000, 0.0000],
                [0.0000, 0.0000, 0.5000]])

    Fr   Tr   r   r   r   r   r   Labelr?   r   c              	   C   s*   |   \}}}}t||||| j| jddS )r   T)r    r!   
multilabel)r"   r   r    r!   r#   r)   r)   r*   r+     s   zMultilabelAccuracy.computeNr,   r-   c                 C   r.   )an  Plot a single or multiple values from the metric.

        Args:
            val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
                If no value is provided, will automatically call `metric.compute` and plot that result.
            ax: An matplotlib axis object. If provided will add plot to that axis

        Returns:
            Figure and Axes object

        Raises:
            ModuleNotFoundError:
                If `matplotlib` is not installed

        .. plot::
            :scale: 75

            >>> from torch import rand, randint
            >>> # Example plotting a single value
            >>> from torchmetrics.classification import MultilabelAccuracy
            >>> metric = MultilabelAccuracy(num_labels=3)
            >>> metric.update(randint(2, (20, 3)), randint(2, (20, 3)))
            >>> fig_, ax_ = metric.plot()

        .. plot::
            :scale: 75

            >>> from torch import rand, randint
            >>> # Example plotting multiple values
            >>> from torchmetrics.classification import MultilabelAccuracy
            >>> metric = MultilabelAccuracy(num_labels=3)
            >>> values = [ ]
            >>> for _ in range(10):
            ...     values.append(metric(randint(2, (20, 3)), randint(2, (20, 3))))
            >>> fig_, ax_ = metric.plot(values)

        r/   r1   r)   r)   r*   r2     r3   r   r4   rA   r)   r)   r)   r*   rD   3  rC   rD   c                   @   s   e Zd ZdZ								dded  d	ed
 dedee dee deed  ded dee dee de	de
defddZdS )Accuracya  Compute `Accuracy`_.

    .. math::
        \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i)

    Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.

    This module is a simple wrapper to get the task specific versions of this metric, which is done by setting the
    ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``'multilabel'``. See the documentation of
    :class:`~torchmetrics.classification.BinaryAccuracy`, :class:`~torchmetrics.classification.MulticlassAccuracy` and
    :class:`~torchmetrics.classification.MultilabelAccuracy` for the specific details of each argument influence and
    examples.

    Legacy Example:
        >>> from torch import tensor
        >>> target = tensor([0, 1, 2, 3])
        >>> preds = tensor([0, 2, 1, 3])
        >>> accuracy = Accuracy(task="multiclass", num_classes=4)
        >>> accuracy(preds, target)
        tensor(0.5000)

        >>> target = tensor([0, 1, 2])
        >>> preds = tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]])
        >>> accuracy = Accuracy(task="multiclass", num_classes=3, top_k=2)
        >>> accuracy(preds, target)
        tensor(0.6667)

          ?Nmicroglobal   Tclstask)r   
multiclassrF   	thresholdnum_classes
num_labelsr    )rI   macroweightednoner!   )rJ   
samplewiser@   ignore_indexvalidate_argskwargsr   c
                 K   s   t |}|
|||	d |t jkrt|fi |
S |t jkrLt|ts1td| dt	| t|tsBtd| dt	| t
|||fi |
S |t jkrlt|tsbtd| dt	| t|||fi |
S td| )zInitialize task metric.)r!   rV   rW   z;Optional arg `num_classes` must be type `int` when task is z. Got z5Optional arg `top_k` must be type `int` when task is z:Optional arg `num_labels` must be type `int` when task is zNot handled value: )r   from_strupdateBINARYr   
MULTICLASS
isinstanceint
ValueErrortyper=   
MULTILABELrD   )rL   rM   rO   rP   rQ   r    r!   r@   rV   rW   rX   r)   r)   r*   __new__  s.   






zAccuracy.__new__)rH   NNrI   rJ   rK   NT)r5   r6   r7   r8   r`   r   r;   r   r^   r9   r   r   rb   r)   r)   r)   r*   rG     sF     
	
rG   N) collections.abcr   typingr   r   r   torchr   typing_extensionsr    torchmetrics.classification.baser   'torchmetrics.classification.stat_scoresr	   r
   r   /torchmetrics.functional.classification.accuracyr   torchmetrics.metricr   torchmetrics.utilities.enumsr   torchmetrics.utilities.importsr   torchmetrics.utilities.plotr   r   __doctest_skip__r   r=   rD   rG   r)   r)   r)   r*   <module>   s&   x  