o
    .wi                     @   s4  d dl mZ d dlmZmZmZ d dlmZ d dlm	Z	 d dl
mZ d dlmZmZmZ d dlmZmZmZmZ d dlmZ d d	lmZ d d
lmZ d dlmZmZ esXg dZG dd deZG dd deZ G dd deZ!G dd deZ"G dd de Z#G dd de!Z$G dd deZ%G dd deZ&dS )    )Sequence)AnyOptionalUnion)Tensor)Literal)_ClassificationTaskWrapper)BinaryStatScoresMulticlassStatScoresMultilabelStatScores)"_binary_fbeta_score_arg_validation_fbeta_reduce&_multiclass_fbeta_score_arg_validation&_multilabel_fbeta_score_arg_validation)Metric)ClassificationTask)_MATPLOTLIB_AVAILABLE)_AX_TYPE_PLOT_OUT_TYPE)BinaryFBetaScore.plotMulticlassFBetaScore.plotMultilabelFBetaScore.plotBinaryF1Score.plotMulticlassF1Score.plotMultilabelF1Score.plotc                       s   e Zd ZU dZdZeed< dZee ed< dZ	eed< dZ
eed< d	Zeed
< 					d dededed dee dedededdf fddZdefddZ	d!deeeee f  dee defddZ  ZS )"BinaryFBetaScoreag  Compute `F-score`_ metric for binary tasks.

    .. math::
        F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
        {(\beta^2 * \text{precision}) + \text{recall}}

    The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
    where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
    positives and false negatives respectively. If this case is encountered a score of `zero_division`
    (0 or 1, default is 0) is returned.

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, ...)``. If preds is a floating
      point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
      per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.

    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``bfbs`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``multidim_average`` argument:

        - If ``multidim_average`` is set to ``global`` the output will be a scalar tensor
        - If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)`` consisting of
          a scalar value per sample.

    If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
    which the reduction will then be applied over instead of the sample dimension ``N``.

    Args:
        beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight
        threshold: Threshold for transforming probability to binary {0,1} predictions
        multidim_average:
            Defines how additionally dimensions ``...`` should be handled. Should be one of the following:

            - ``global``: Additional dimensions are flatted along the batch dimension
            - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
              The statistics in this case are calculated over the additional dimensions.

        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.
        zero_division: Should be `0` or `1`. The value returned when
            :math:`\text{TP} + \text{FP} = 0 \wedge \text{TP} + \text{FN} = 0`.

    Example (preds is int tensor):
        >>> from torch import tensor
        >>> from torchmetrics.classification import BinaryFBetaScore
        >>> target = tensor([0, 1, 0, 1, 0, 1])
        >>> preds = tensor([0, 0, 1, 1, 0, 1])
        >>> metric = BinaryFBetaScore(beta=2.0)
        >>> metric(preds, target)
        tensor(0.6667)

    Example (preds is float tensor):
        >>> from torchmetrics.classification import BinaryFBetaScore
        >>> target = tensor([0, 1, 0, 1, 0, 1])
        >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
        >>> metric = BinaryFBetaScore(beta=2.0)
        >>> metric(preds, target)
        tensor(0.6667)

    Example (multidim tensors):
        >>> from torchmetrics.classification import BinaryFBetaScore
        >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
        >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99],  [0.63, 0.04]],
        ...                 [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
        >>> metric = BinaryFBetaScore(beta=2.0, multidim_average='samplewise')
        >>> metric(preds, target)
        tensor([0.5882, 0.0000])

    Fis_differentiableThigher_is_betterfull_state_update        plot_lower_bound      ?plot_upper_bound      ?globalNr   beta	thresholdmultidim_averager$   
samplewiseignore_indexvalidate_argszero_divisionkwargsreturnc                    sF   t  jd|||dd| |rt||||| || _|| _|| _d S )NF)r&   r'   r*   r+    )super__init__r   r+   r,   r%   )selfr%   r&   r'   r*   r+   r,   r-   	__class__r/   _/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/torchmetrics/classification/f_beta.pyr1   }   s   

zBinaryFBetaScore.__init__c              
   C   s.   |   \}}}}t||||| jd| j| jdS )Compute metric.binaryaverager'   r,   )_final_stater   r%   r'   r,   r2   tpfptnfnr/   r/   r5   compute   s   zBinaryFBetaScore.computevalaxc                 C      |  ||S )aI  Plot a single or multiple values from the metric.

        Args:
            val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
                If no value is provided, will automatically call `metric.compute` and plot that result.
            ax: An matplotlib axis object. If provided will add plot to that axis

        Returns:
            Figure object and Axes object

        Raises:
            ModuleNotFoundError:
                If `matplotlib` is not installed

        .. plot::
            :scale: 75

            >>> from torch import rand, randint
            >>> # Example plotting a single value
            >>> from torchmetrics.classification import BinaryFBetaScore
            >>> metric = BinaryFBetaScore(beta=2.0)
            >>> metric.update(rand(10), randint(2,(10,)))
            >>> fig_, ax_ = metric.plot()

        .. plot::
            :scale: 75

            >>> from torch import rand, randint
            >>> # Example plotting multiple values
            >>> from torchmetrics.classification import BinaryFBetaScore
            >>> metric = BinaryFBetaScore(beta=2.0)
            >>> values = [ ]
            >>> for _ in range(10):
            ...     values.append(metric(rand(10), randint(2,(10,))))
            >>> fig_, ax_ = metric.plot(values)

        _plotr2   rA   rB   r/   r/   r5   plot      (r   r#   r$   NTr   NN)__name__
__module____qualname____doc__r   bool__annotations__r   r   r   r    floatr"   r   intr   r1   r   r@   r   r   r   r   rG   __classcell__r/   r/   r3   r5   r   ,   sL   
 J	r   c                       s   e Zd ZU dZdZeed< dZee ed< dZ	eed< dZ
eed< d	Zeed
< dZeed< 						d&dedededeed  ded dee dedededdf fddZdefd d!Z	d'd"eeeee f  d#ee defd$d%Z  ZS )(MulticlassFBetaScoreaP  Compute `F-score`_ metric for multiclass tasks.

    .. math::
        F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
        {(\beta^2 * \text{precision}) + \text{recall}}

    The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
    where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
    positives and false negatives respectively. If this case is encountered for any class, the metric for that class
    will be set to `zero_division` (0 or 1, default is 0) and the overall metric may therefore be affected in turn.

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
      If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
      probabilities/logits into an int tensor.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.

    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``mcfbs`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
      ``multidim_average`` arguments:

        - If ``multidim_average`` is set to ``global``:

          - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
          - If ``average=None/'none'``, the shape will be ``(C,)``

        - If ``multidim_average`` is set to ``samplewise``:

          - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
          - If ``average=None/'none'``, the shape will be ``(N, C)``

    If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
    which the reduction will then be applied over instead of the sample dimension ``N``.

    Args:
        beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight
        num_classes: Integer specifying the number of classes
        average:
            Defines the reduction that is applied over labels. Should be one of the following:

            - ``micro``: Sum statistics over all labels
            - ``macro``: Calculate statistics for each label and average them
            - ``weighted``: calculates statistics for each label and computes weighted average using their support
            - ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
        top_k:

            Number of highest probability or logit score predictions considered to find the correct label.
            Only works when ``preds`` contain probabilities/logits.
        multidim_average:
            Defines how additionally dimensions ``...`` should be handled. Should be one of the following:

            - ``global``: Additional dimensions are flatted along the batch dimension
            - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
              The statistics in this case are calculated over the additional dimensions.

        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.
        zero_division: Should be `0` or `1`. The value returned when
            :math:`\text{TP} + \text{FP} = 0 \wedge \text{TP} + \text{FN} = 0`.

    Example (preds is int tensor):
        >>> from torch import tensor
        >>> from torchmetrics.classification import MulticlassFBetaScore
        >>> target = tensor([2, 1, 0, 0])
        >>> preds = tensor([2, 1, 0, 1])
        >>> metric = MulticlassFBetaScore(beta=2.0, num_classes=3)
        >>> metric(preds, target)
        tensor(0.7963)
        >>> mcfbs = MulticlassFBetaScore(beta=2.0, num_classes=3, average=None)
        >>> mcfbs(preds, target)
        tensor([0.5556, 0.8333, 1.0000])

    Example (preds is float tensor):
        >>> from torchmetrics.classification import MulticlassFBetaScore
        >>> target = tensor([2, 1, 0, 0])
        >>> preds = tensor([[0.16, 0.26, 0.58],
        ...                 [0.22, 0.61, 0.17],
        ...                 [0.71, 0.09, 0.20],
        ...                 [0.05, 0.82, 0.13]])
        >>> metric = MulticlassFBetaScore(beta=2.0, num_classes=3)
        >>> metric(preds, target)
        tensor(0.7963)
        >>> mcfbs = MulticlassFBetaScore(beta=2.0, num_classes=3, average=None)
        >>> mcfbs(preds, target)
        tensor([0.5556, 0.8333, 1.0000])

    Example (multidim tensors):
        >>> from torchmetrics.classification import MulticlassFBetaScore
        >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
        >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
        >>> metric = MulticlassFBetaScore(beta=2.0, num_classes=3, multidim_average='samplewise')
        >>> metric(preds, target)
        tensor([0.4697, 0.2706])
        >>> mcfbs = MulticlassFBetaScore(beta=2.0, num_classes=3, multidim_average='samplewise', average=None)
        >>> mcfbs(preds, target)
        tensor([[0.9091, 0.0000, 0.5000],
                [0.0000, 0.3571, 0.4545]])

    Fr   Tr   r   r   r    r!   r"   Classplot_legend_name   macror$   Nr   r%   num_classestop_kr9   microrX   weightednoner'   r(   r*   r+   r,   r-   r.   c	           
   	      N   t  jd|||||dd|	 |rt||||||| || _|| _|| _d S )NF)rY   rZ   r9   r'   r*   r+   r/   )r0   r1   r   r+   r,   r%   )
r2   r%   rY   rZ   r9   r'   r*   r+   r,   r-   r3   r/   r5   r1   =  "   	
zMulticlassFBetaScore.__init__c              
   C   s0   |   \}}}}t||||| j| j| j| jdS )r6   r8   r:   r   r%   r9   r'   r,   r;   r/   r/   r5   r@   Z  s   zMulticlassFBetaScore.computerA   rB   c                 C   rC   )a  Plot a single or multiple values from the metric.

        Args:
            val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
                If no value is provided, will automatically call `metric.compute` and plot that result.
            ax: An matplotlib axis object. If provided will add plot to that axis

        Returns:
            Figure object and Axes object

        Raises:
            ModuleNotFoundError:
                If `matplotlib` is not installed

        .. plot::
            :scale: 75

            >>> from torch import randint
            >>> # Example plotting a single value per class
            >>> from torchmetrics.classification import MulticlassFBetaScore
            >>> metric = MulticlassFBetaScore(num_classes=3, beta=2.0, average=None)
            >>> metric.update(randint(3, (20,)), randint(3, (20,)))
            >>> fig_, ax_ = metric.plot()

        .. plot::
            :scale: 75

            >>> from torch import randint
            >>> # Example plotting a multiple values per class
            >>> from torchmetrics.classification import MulticlassFBetaScore
            >>> metric = MulticlassFBetaScore(num_classes=3, beta=2.0, average=None)
            >>> values = []
            >>> for _ in range(20):
            ...     values.append(metric(randint(3, (20,)), randint(3, (20,))))
            >>> fig_, ax_ = metric.plot(values)

        rD   rF   r/   r/   r5   rG   h  rH   r   rW   rX   r$   NTr   rJ   rK   rL   rM   rN   r   rO   rP   r   r   r   r    rQ   r"   rV   strrR   r   r   r1   r   r@   r   r   r   r   rG   rS   r/   r/   r3   r5   rT      sX   
 h
	
rT   c                       s   e Zd ZU dZdZeed< dZee ed< dZ	eed< dZ
eed< d	Zeed
< dZeed< 						d&dedededeed  ded dee dedededdf fddZdefd d!Z	d'd"eeeee f  d#ee defd$d%Z  ZS )(MultilabelFBetaScorea  Compute `F-score`_ metric for multilabel tasks.

    .. math::
        F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
        {(\beta^2 * \text{precision}) + \text{recall}}

    The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
    where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
    positives and false negatives respectively. If this case is encountered for any label, the metric for that label
    will be set to `zero_division` (0 or 1, default is 0) and the overall metric may therefore be affected in turn.

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating
      point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
      per element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``.

    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``mlfbs`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
      ``multidim_average`` arguments:

        - If ``multidim_average`` is set to ``global``:

          - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
          - If ``average=None/'none'``, the shape will be ``(C,)``

        - If ``multidim_average`` is set to ``samplewise``:

          - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
          - If ``average=None/'none'``, the shape will be ``(N, C)``

    If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
    which the reduction will then be applied over instead of the sample dimension ``N``.

    Args:
        beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight
        num_labels: Integer specifying the number of labels
        threshold: Threshold for transforming probability to binary (0,1) predictions
        average:
            Defines the reduction that is applied over labels. Should be one of the following:

            - ``micro``: Sum statistics over all labels
            - ``macro``: Calculate statistics for each label and average them
            - ``weighted``: calculates statistics for each label and computes weighted average using their support
            - ``"none"`` or ``None``: calculates statistic for each label and applies no reduction

        multidim_average:
            Defines how additionally dimensions ``...`` should be handled. Should be one of the following:

            - ``global``: Additional dimensions are flatted along the batch dimension
            - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
              The statistics in this case are calculated over the additional dimensions.

        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.
        zero_division: Should be `0` or `1`. The value returned when
            :math:`\text{TP} + \text{FP} = 0 \wedge \text{TP} + \text{FN} = 0`.

    Example (preds is int tensor):
        >>> from torch import tensor
        >>> from torchmetrics.classification import MultilabelFBetaScore
        >>> target = tensor([[0, 1, 0], [1, 0, 1]])
        >>> preds = tensor([[0, 0, 1], [1, 0, 1]])
        >>> metric = MultilabelFBetaScore(beta=2.0, num_labels=3)
        >>> metric(preds, target)
        tensor(0.6111)
        >>> mlfbs = MultilabelFBetaScore(beta=2.0, num_labels=3, average=None)
        >>> mlfbs(preds, target)
        tensor([1.0000, 0.0000, 0.8333])

    Example (preds is float tensor):
        >>> from torchmetrics.classification import MultilabelFBetaScore
        >>> target = tensor([[0, 1, 0], [1, 0, 1]])
        >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
        >>> metric = MultilabelFBetaScore(beta=2.0, num_labels=3)
        >>> metric(preds, target)
        tensor(0.6111)
        >>> mlfbs = MultilabelFBetaScore(beta=2.0, num_labels=3, average=None)
        >>> mlfbs(preds, target)
        tensor([1.0000, 0.0000, 0.8333])

    Example (multidim tensors):
        >>> from torchmetrics.classification import MultilabelFBetaScore
        >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
        >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99],  [0.63, 0.04]],
        ...                 [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
        >>> metric = MultilabelFBetaScore(num_labels=3, beta=2.0, multidim_average='samplewise')
        >>> metric(preds, target)
        tensor([0.5556, 0.0000])
        >>> mlfbs = MultilabelFBetaScore(num_labels=3, beta=2.0, multidim_average='samplewise', average=None)
        >>> mlfbs(preds, target)
        tensor([[0.8333, 0.8333, 0.0000],
                [0.0000, 0.0000, 0.0000]])

    Fr   Tr   r   r   r    r!   r"   LabelrV   r#   rX   r$   Nr   r%   
num_labelsr&   r9   r[   r'   r(   r*   r+   r,   r-   r.   c	           
   	      r_   )NF)rg   r&   r9   r'   r*   r+   r/   )r0   r1   r   r+   r,   r%   )
r2   r%   rg   r&   r9   r'   r*   r+   r,   r-   r3   r/   r5   r1     r`   zMultilabelFBetaScore.__init__c                 C   s2   |   \}}}}t||||| j| j| jd| jd	S )r6   T)r9   r'   
multilabelr,   ra   r;   r/   r/   r5   r@     s   zMultilabelFBetaScore.computerA   rB   c                 C   rC   )a  Plot a single or multiple values from the metric.

        Args:
            val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
                If no value is provided, will automatically call `metric.compute` and plot that result.
            ax: An matplotlib axis object. If provided will add plot to that axis

        Returns:
            Figure and Axes object

        Raises:
            ModuleNotFoundError:
                If `matplotlib` is not installed

        .. plot::
            :scale: 75

            >>> from torch import rand, randint
            >>> # Example plotting a single value
            >>> from torchmetrics.classification import MultilabelFBetaScore
            >>> metric = MultilabelFBetaScore(num_labels=3, beta=2.0)
            >>> metric.update(randint(2, (20, 3)), randint(2, (20, 3)))
            >>> fig_, ax_ = metric.plot()

        .. plot::
            :scale: 75

            >>> from torch import rand, randint
            >>> # Example plotting multiple values
            >>> from torchmetrics.classification import MultilabelFBetaScore
            >>> metric = MultilabelFBetaScore(num_labels=3, beta=2.0)
            >>> values = [ ]
            >>> for _ in range(10):
            ...     values.append(metric(randint(2, (20, 3)), randint(2, (20, 3))))
            >>> fig_, ax_ = metric.plot(values)

        rD   rF   r/   r/   r5   rG   +  rH   r   r#   rX   r$   NTr   rJ   rc   r/   r/   r3   r5   re     sX   
 d
	
re   c                       s   e Zd ZU dZdZeed< dZee ed< dZ	eed< dZ
eed< d	Zeed
< 					ddeded dee dedededdf fddZ	ddeeeee f  dee defddZ  ZS )BinaryF1Scorea  Compute F-1 score for binary tasks.

    .. math::
        F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}

    The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
    where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
    positives and false negatives respectively. If this case is encountered a score of `zero_division`
    (0 or 1, default is 0) is returned.

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point
      tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per
      element. Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``

    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``bf1s`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``multidim_average`` argument:

        - If ``multidim_average`` is set to ``global``, the metric returns a scalar value.
        - If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar
          value per sample.

    If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
    which the reduction will then be applied over instead of the sample dimension ``N``.

    Args:
        threshold: Threshold for transforming probability to binary {0,1} predictions
        multidim_average:
            Defines how additionally dimensions ``...`` should be handled. Should be one of the following:

            - ``global``: Additional dimensions are flatted along the batch dimension
            - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
              The statistics in this case are calculated over the additional dimensions.

        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.
        zero_division: Should be `0` or `1`. The value returned when
            :math:`\text{TP} + \text{FP} = 0 \wedge \text{TP} + \text{FN} = 0`.

    Example (preds is int tensor):
        >>> from torch import tensor
        >>> from torchmetrics.classification import BinaryF1Score
        >>> target = tensor([0, 1, 0, 1, 0, 1])
        >>> preds = tensor([0, 0, 1, 1, 0, 1])
        >>> metric = BinaryF1Score()
        >>> metric(preds, target)
        tensor(0.6667)

    Example (preds is float tensor):
        >>> from torchmetrics.classification import BinaryF1Score
        >>> target = tensor([0, 1, 0, 1, 0, 1])
        >>> preds = tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
        >>> metric = BinaryF1Score()
        >>> metric(preds, target)
        tensor(0.6667)

    Example (multidim tensors):
        >>> from torchmetrics.classification import BinaryF1Score
        >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
        >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99],  [0.63, 0.04]],
        ...                 [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
        >>> metric = BinaryF1Score(multidim_average='samplewise')
        >>> metric(preds, target)
        tensor([0.5000, 0.0000])

    Fr   Tr   r   r   r    r!   r"   r#   r$   Nr   r&   r'   r(   r*   r+   r,   r-   r.   c              	      s$   t  jdd|||||d| d S )Nr!   )r%   r&   r'   r*   r+   r,   r/   r0   r1   )r2   r&   r'   r*   r+   r,   r-   r3   r/   r5   r1     s   	
zBinaryF1Score.__init__rA   rB   c                 C   rC   )a-  Plot a single or multiple values from the metric.

        Args:
            val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
                If no value is provided, will automatically call `metric.compute` and plot that result.
            ax: An matplotlib axis object. If provided will add plot to that axis

        Returns:
            Figure object and Axes object

        Raises:
            ModuleNotFoundError:
                If `matplotlib` is not installed

        .. plot::
            :scale: 75

            >>> from torch import rand, randint
            >>> # Example plotting a single value
            >>> from torchmetrics.classification import BinaryF1Score
            >>> metric = BinaryF1Score()
            >>> metric.update(rand(10), randint(2,(10,)))
            >>> fig_, ax_ = metric.plot()

        .. plot::
            :scale: 75

            >>> from torch import rand, randint
            >>> # Example plotting multiple values
            >>> from torchmetrics.classification import BinaryF1Score
            >>> metric = BinaryF1Score()
            >>> values = [ ]
            >>> for _ in range(10):
            ...     values.append(metric(rand(10), randint(2,(10,))))
            >>> fig_, ax_ = metric.plot(values)

        rD   rF   r/   r/   r5   rG     rH   r   rI   rJ   )rK   rL   rM   rN   r   rO   rP   r   r   r   r    rQ   r"   r   rR   r   r1   r   r   r   r   r   rG   rS   r/   r/   r3   r5   rj   V  sF   
 Hrj   c                       s   e Zd ZU dZdZeed< dZee ed< dZ	eed< dZ
eed< d	Zeed
< dZeed< 						d#dededeed  ded dee dedededdf fddZ	d$deeeee f  d ee defd!d"Z  ZS )%MulticlassF1Scorea  Compute F-1 score for multiclass tasks.

    .. math::
        F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}

    The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
    where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
    positives and false negatives respectively.  If this case is encountered for any class, the metric for that class
    will be set to `zero_division` (0 or 1, default is 0) and the overall metric may therefore be affected in turn.

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
      If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
      probabilities/logits into an int tensor.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``

    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``mcf1s`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
      ``multidim_average`` arguments:

        - If ``multidim_average`` is set to ``global``:

          - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
          - If ``average=None/'none'``, the shape will be ``(C,)``

        - If ``multidim_average`` is set to ``samplewise``:

          - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
          - If ``average=None/'none'``, the shape will be ``(N, C)``

    If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
    which the reduction will then be applied over instead of the sample dimension ``N``.

    Args:
        preds: Tensor with predictions
        target: Tensor with true labels
        num_classes: Integer specifying the number of classes
        average:
            Defines the reduction that is applied over labels. Should be one of the following:

            - ``micro``: Sum statistics over all labels
            - ``macro``: Calculate statistics for each label and average them
            - ``weighted``: calculates statistics for each label and computes weighted average using their support
            - ``"none"`` or ``None``: calculates statistic for each label and applies no reduction
        top_k:
            Number of highest probability or logit score predictions considered to find the correct label.
            Only works when ``preds`` contain probabilities/logits.
        multidim_average:
            Defines how additionally dimensions ``...`` should be handled. Should be one of the following:

            - ``global``: Additional dimensions are flatted along the batch dimension
            - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
              The statistics in this case are calculated over the additional dimensions.

        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.
        zero_division: Should be `0` or `1`. The value returned when
            :math:`\text{TP} + \text{FP} = 0 \wedge \text{TP} + \text{FN} = 0`.

    Example (preds is int tensor):
        >>> from torch import tensor
        >>> from torchmetrics.classification import MulticlassF1Score
        >>> target = tensor([2, 1, 0, 0])
        >>> preds = tensor([2, 1, 0, 1])
        >>> metric = MulticlassF1Score(num_classes=3)
        >>> metric(preds, target)
        tensor(0.7778)
        >>> mcf1s = MulticlassF1Score(num_classes=3, average=None)
        >>> mcf1s(preds, target)
        tensor([0.6667, 0.6667, 1.0000])

    Example (preds is float tensor):
        >>> from torchmetrics.classification import MulticlassF1Score
        >>> target = tensor([2, 1, 0, 0])
        >>> preds = tensor([[0.16, 0.26, 0.58],
        ...                 [0.22, 0.61, 0.17],
        ...                 [0.71, 0.09, 0.20],
        ...                 [0.05, 0.82, 0.13]])
        >>> metric = MulticlassF1Score(num_classes=3)
        >>> metric(preds, target)
        tensor(0.7778)
        >>> mcf1s = MulticlassF1Score(num_classes=3, average=None)
        >>> mcf1s(preds, target)
        tensor([0.6667, 0.6667, 1.0000])

    Example (multidim tensors):
        >>> from torchmetrics.classification import MulticlassF1Score
        >>> target = tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
        >>> preds = tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
        >>> metric = MulticlassF1Score(num_classes=3, multidim_average='samplewise')
        >>> metric(preds, target)
        tensor([0.4333, 0.2667])
        >>> mcf1s = MulticlassF1Score(num_classes=3, multidim_average='samplewise', average=None)
        >>> mcf1s(preds, target)
        tensor([[0.8000, 0.0000, 0.5000],
                [0.0000, 0.4000, 0.4000]])

    Fr   Tr   r   r   r    r!   r"   rU   rV   rW   rX   r$   Nr   rY   rZ   r9   r[   r'   r(   r*   r+   r,   r-   r.   c           	         (   t  jdd|||||||d| d S )Nr!   )r%   rY   rZ   r9   r'   r*   r+   r,   r/   rk   )	r2   rY   rZ   r9   r'   r*   r+   r,   r-   r3   r/   r5   r1   R     	
zMulticlassF1Score.__init__rA   rB   c                 C   rC   )a  Plot a single or multiple values from the metric.

        Args:
            val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
                If no value is provided, will automatically call `metric.compute` and plot that result.
            ax: An matplotlib axis object. If provided will add plot to that axis

        Returns:
            Figure object and Axes object

        Raises:
            ModuleNotFoundError:
                If `matplotlib` is not installed

        .. plot::
            :scale: 75

            >>> from torch import randint
            >>> # Example plotting a single value per class
            >>> from torchmetrics.classification import MulticlassF1Score
            >>> metric = MulticlassF1Score(num_classes=3, average=None)
            >>> metric.update(randint(3, (20,)), randint(3, (20,)))
            >>> fig_, ax_ = metric.plot()

        .. plot::
            :scale: 75

            >>> from torch import randint
            >>> # Example plotting a multiple values per class
            >>> from torchmetrics.classification import MulticlassF1Score
            >>> metric = MulticlassF1Score(num_classes=3, average=None)
            >>> values = []
            >>> for _ in range(20):
            ...     values.append(metric(randint(3, (20,)), randint(3, (20,))))
            >>> fig_, ax_ = metric.plot(values)

        rD   rF   r/   r/   r5   rG   i  rH   r   rb   rJ   rK   rL   rM   rN   r   rO   rP   r   r   r   r    rQ   r"   rV   rd   rR   r   r   r1   r   r   r   r   r   rG   rS   r/   r/   r3   r5   rl     sR   
 g
	
rl   c                       s   e Zd ZU dZdZeed< dZee ed< dZ	eed< dZ
eed< d	Zeed
< dZeed< 						d#dededeed  ded dee dedededdf fddZ	d$deeeee f  d ee defd!d"Z  ZS )%MultilabelF1Scorea  Compute F-1 score for multilabel tasks.

    .. math::
        F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}

    The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
    where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
    positives and false negatives respectively. If this case is encountered for any label, the metric for that label
    will be set to `zero_division` (0 or 1, default is 0) and the overall metric may therefore be affected in turn.

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``.
      If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and
      will auto apply sigmoid per element. Additionally, we convert to int tensor with thresholding using the value
      in ``threshold``.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``.

    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``mlf1s`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
      ``multidim_average`` arguments:

        - If ``multidim_average`` is set to ``global``:

          - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
          - If ``average=None/'none'``, the shape will be ``(C,)``

        - If ``multidim_average`` is set to ``samplewise``:

          - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
          - If ``average=None/'none'``, the shape will be ``(N, C)```

    If ``multidim_average`` is set to ``samplewise`` we expect at least one additional dimension ``...`` to be present,
    which the reduction will then be applied over instead of the sample dimension ``N``.

    Args:
        num_labels: Integer specifying the number of labels
        threshold: Threshold for transforming probability to binary (0,1) predictions
        average:
            Defines the reduction that is applied over labels. Should be one of the following:

            - ``micro``: Sum statistics over all labels
            - ``macro``: Calculate statistics for each label and average them
            - ``weighted``: calculates statistics for each label and computes weighted average using their support
            - ``"none"`` or ``None``: calculates statistic for each label and applies no reduction

        multidim_average:
            Defines how additionally dimensions ``...`` should be handled. Should be one of the following:

            - ``global``: Additional dimensions are flatted along the batch dimension
            - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
              The statistics in this case are calculated over the additional dimensions.

        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.
        zero_division: Should be `0` or `1`. The value returned when
            :math:`\text{TP} + \text{FP} = 0 \wedge \text{TP} + \text{FN} = 0`.

    Example (preds is int tensor):
        >>> from torch import tensor
        >>> from torchmetrics.classification import MultilabelF1Score
        >>> target = tensor([[0, 1, 0], [1, 0, 1]])
        >>> preds = tensor([[0, 0, 1], [1, 0, 1]])
        >>> metric = MultilabelF1Score(num_labels=3)
        >>> metric(preds, target)
        tensor(0.5556)
        >>> mlf1s = MultilabelF1Score(num_labels=3, average=None)
        >>> mlf1s(preds, target)
        tensor([1.0000, 0.0000, 0.6667])

    Example (preds is float tensor):
        >>> from torchmetrics.classification import MultilabelF1Score
        >>> target = tensor([[0, 1, 0], [1, 0, 1]])
        >>> preds = tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
        >>> metric = MultilabelF1Score(num_labels=3)
        >>> metric(preds, target)
        tensor(0.5556)
        >>> mlf1s = MultilabelF1Score(num_labels=3, average=None)
        >>> mlf1s(preds, target)
        tensor([1.0000, 0.0000, 0.6667])

    Example (multidim tensors):
        >>> from torchmetrics.classification import MultilabelF1Score
        >>> target = tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
        >>> preds = tensor([[[0.59, 0.91], [0.91, 0.99],  [0.63, 0.04]],
        ...                 [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]]])
        >>> metric = MultilabelF1Score(num_labels=3, multidim_average='samplewise')
        >>> metric(preds, target)
        tensor([0.4444, 0.0000])
        >>> mlf1s = MultilabelF1Score(num_labels=3, multidim_average='samplewise', average=None)
        >>> mlf1s(preds, target)
        tensor([[0.6667, 0.6667, 0.0000],
                [0.0000, 0.0000, 0.0000]])

    Fr   Tr   r   r   r    r!   r"   rf   rV   r#   rX   r$   Nr   rg   r&   r9   r[   r'   r(   r*   r+   r,   r-   r.   c           	         rm   )Nr!   )r%   rg   r&   r9   r'   r*   r+   r,   r/   rk   )	r2   rg   r&   r9   r'   r*   r+   r,   r-   r3   r/   r5   r1     rn   zMultilabelF1Score.__init__rA   rB   c                 C   rC   )aj  Plot a single or multiple values from the metric.

        Args:
            val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
                If no value is provided, will automatically call `metric.compute` and plot that result.
            ax: An matplotlib axis object. If provided will add plot to that axis

        Returns:
            Figure and Axes object

        Raises:
            ModuleNotFoundError:
                If `matplotlib` is not installed

        .. plot::
            :scale: 75

            >>> from torch import rand, randint
            >>> # Example plotting a single value
            >>> from torchmetrics.classification import MultilabelF1Score
            >>> metric = MultilabelF1Score(num_labels=3)
            >>> metric.update(randint(2, (20, 3)), randint(2, (20, 3)))
            >>> fig_, ax_ = metric.plot()

        .. plot::
            :scale: 75

            >>> from torch import rand, randint
            >>> # Example plotting multiple values
            >>> from torchmetrics.classification import MultilabelF1Score
            >>> metric = MultilabelF1Score(num_labels=3)
            >>> values = [ ]
            >>> for _ in range(10):
            ...     values.append(metric(randint(2, (20, 3)), randint(2, (20, 3))))
            >>> fig_, ax_ = metric.plot(values)

        rD   rF   r/   r/   r5   rG     rH   r   ri   rJ   ro   r/   r/   r3   r5   rp     sR   
 c
	
rp   c                   @   s   e Zd ZdZ											dd
ed  ded dededee dee deed  deed  dee dee de	dede
defddZdS )
FBetaScorea  Compute `F-score`_ metric.

    .. math::
        F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
        {(\beta^2 * \text{precision}) + \text{recall}}

    The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
    where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
    positives and false negatives respectively. If this case is encountered for any class/label, the metric for that
    class/label will be set to `zero_division` (0 or 1, default is 0) and the overall metric may therefore be
    affected in turn.

    This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
    ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``'multilabel'``. See the documentation of
    :class:`~torchmetrics.classification.BinaryFBetaScore`,
    :class:`~torchmetrics.classification.MulticlassFBetaScore` and
    :class:`~torchmetrics.classification.MultilabelFBetaScore` for the specific details of each argument influence
    and examples.

    Legcy Example:
        >>> from torch import tensor
        >>> target = tensor([0, 1, 2, 0, 1, 2])
        >>> preds = tensor([0, 2, 1, 0, 0, 1])
        >>> f_beta = FBetaScore(task="multiclass", num_classes=3, beta=0.5)
        >>> f_beta(preds, target)
        tensor(0.3333)

    r!   r#   Nr\   r$   rW   Tr   clstaskr7   
multiclassrh   r%   r&   rY   rg   r9   r[   r'   r(   rZ   r*   r+   r,   r-   r.   c                 K   s   t |}|dusJ |||	|
|d |t jkr#t||fi |S |t jkrQt|ts7tdt	| dt|tsFtdt	| dt
||||fi |S |t jkrpt|tsetdt	| dt||||fi |S td| d	zInitialize task metric.N)r'   r*   r+   r,   z+`num_classes` is expected to be `int` but `z was passed.`z%`top_k` is expected to be `int` but `z*`num_labels` is expected to be `int` but `zTask z not supported!)r   from_strupdateBINARYr   
MULTICLASS
isinstancerR   
ValueErrortyperT   
MULTILABELre   )rr   rs   r%   r&   rY   rg   r9   r'   rZ   r*   r+   r,   r-   r/   r/   r5   __new___  s*   






zFBetaScore.__new__)
r!   r#   NNr\   r$   rW   NTr   rK   rL   rM   rN   r}   r   rQ   r   rR   rO   r   r   r   r/   r/   r/   r5   rq   A  sR     

	
rq   c                   @   s   e Zd ZdZ									dd	ed  d
ed dedee dee deed  deed  dee dee de	dede
defddZdS )F1ScoreaH  Compute F-1 score.

    .. math::
        F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}

    The metric is only proper defined when :math:`\text{TP} + \text{FP} \neq 0 \wedge \text{TP} + \text{FN} \neq 0`
    where :math:`\text{TP}`, :math:`\text{FP}` and :math:`\text{FN}` represent the number of true positives, false
    positives and false negatives respectively. If this case is encountered for any class/label, the metric for that
    class/label will be set to `zero_division` (0 or 1, default is 0) and the overall metric may therefore be
    affected in turn.

    This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
    ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``'multilabel'``. See the documentation of
    :class:`~torchmetrics.classification.BinaryF1Score`, :class:`~torchmetrics.classification.MulticlassF1Score` and
    :class:`~torchmetrics.classification.MultilabelF1Score` for the specific details of each argument influence and
    examples.

    Legacy Example:
        >>> from torch import tensor
        >>> target = tensor([0, 1, 2, 0, 1, 2])
        >>> preds = tensor([0, 2, 1, 0, 0, 1])
        >>> f1 = F1Score(task="multiclass", num_classes=3)
        >>> f1(preds, target)
        tensor(0.3333)

    r#   Nr\   r$   rW   Tr   rr   rs   rt   r&   rY   rg   r9   r[   r'   r(   rZ   r*   r+   r,   r-   r.   c                 K   s   t |}|dusJ ||||	|
d |t jkr"t|fi |S |t jkrOt|ts6tdt	| dt|tsEtdt	| dt
|||fi |S |t jkrmt|tsctdt	| dt|||fi |S td| drv   )r   rw   rx   ry   rj   rz   r{   rR   r|   r}   rl   r~   rp   )rr   rs   r&   rY   rg   r9   r'   rZ   r*   r+   r,   r-   r/   r/   r5   r     s*   






zF1Score.__new__)	r#   NNr\   r$   rW   NTr   r   r/   r/   r/   r5   r     sL    

	
r   N)'collections.abcr   typingr   r   r   torchr   typing_extensionsr    torchmetrics.classification.baser   'torchmetrics.classification.stat_scoresr	   r
   r   -torchmetrics.functional.classification.f_betar   r   r   r   torchmetrics.metricr   torchmetrics.utilities.enumsr   torchmetrics.utilities.importsr   torchmetrics.utilities.plotr   r   __doctest_skip__r   rT   re   rj   rl   rp   rq   r   r/   r/   r/   r5   <module>   s6   
 " G D  2 .E