o
    yi                     @   s   d dl mZmZ d dlZd dlmZ d dlmZ d dlmZm	Z	m
Z
 d dlmZmZmZmZ d dlmZ G dd	 d	eZG d
d de	ZG dd de
ZG dd deZG dd deZG dd deZG dd dZG dd dZdS )    )AnyOptionalN)Tensor)Literal)BinaryStatScoresMulticlassStatScoresMultilabelStatScores)"_binary_fbeta_score_arg_validation_fbeta_reduce&_multiclass_fbeta_score_arg_validation&_multilabel_fbeta_score_arg_validation)Metricc                       s   e Zd ZU dZdZeed< dZee ed< dZ	eed< 					dd
e
de
ded dee dededd	f fddZdefddZ  ZS )BinaryFBetaScorea  Computes `F-score`_ metric for binary tasks:

    .. math::
        F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
        {(\beta^2 * \text{precision}) + \text{recall}}

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): An int tensor or float tensor of shape ``(N, ...)``. If preds is a floating
      point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
      per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.


    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``bfbs`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``multidim_average`` argument:

        - If ``multidim_average`` is set to ``global`` the output will be a scalar tensor
        - If ``multidim_average`` is set to ``samplewise`` the output will be a tensor of shape ``(N,)`` consisting of
          a scalar value per sample.

    Args:
        beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight
        threshold: Threshold for transforming probability to binary {0,1} predictions
        multidim_average:
            Defines how additionally dimensions ``...`` should be handled. Should be one of the following:

            - ``global``: Additional dimensions are flatted along the batch dimension
            - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
              The statistics in this case are calculated over the additional dimensions.

        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.

    Example (preds is int tensor):
        >>> from torchmetrics.classification import BinaryFBetaScore
        >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
        >>> preds = torch.tensor([0, 0, 1, 1, 0, 1])
        >>> metric = BinaryFBetaScore(beta=2.0)
        >>> metric(preds, target)
        tensor(0.6667)

    Example (preds is float tensor):
        >>> from torchmetrics.classification import BinaryFBetaScore
        >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
        >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
        >>> metric = BinaryFBetaScore(beta=2.0)
        >>> metric(preds, target)
        tensor(0.6667)

    Example (multidim tensors):
        >>> from torchmetrics.classification import BinaryFBetaScore
        >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
        >>> preds = torch.tensor(
        ...     [
        ...         [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
        ...         [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
        ...     ]
        ... )
        >>> metric = BinaryFBetaScore(beta=2.0, multidim_average='samplewise')
        >>> metric(preds, target)
        tensor([0.5882, 0.0000])
    Fis_differentiableThigher_is_betterfull_state_update      ?globalNbeta	thresholdmultidim_averager   
samplewiseignore_indexvalidate_argskwargsreturnc                    s>   t  jd|||dd| |rt|||| || _|| _d S )NF)r   r   r   r    )super__init__r	   r   r   )selfr   r   r   r   r   r   	__class__r   V/home/ubuntu/.local/lib/python3.10/site-packages/torchmetrics/classification/f_beta.pyr   e   s   	
zBinaryFBetaScore.__init__c              	   C   s*   |   \}}}}t||||| jd| jdS )Nbinaryaverager   )_final_stater
   r   r   r    tpfptnfnr   r   r#   computez   s   zBinaryFBetaScore.computer   r   NT)__name__
__module____qualname____doc__r   bool__annotations__r   r   r   floatr   intr   r   r   r-   __classcell__r   r   r!   r#   r      s2   
 Br   c                       s   e Zd ZU dZdZeed< dZee ed< dZ	eed< 					
	dde
dededeed  ded dee dededd
f fddZdefddZ  ZS )MulticlassFBetaScorea
  Computes `F-score`_ metric for multiclass tasks:

    .. math::
        F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
        {(\beta^2 * \text{precision}) + \text{recall}}

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
      If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
      probabilities/logits into an int tensor.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.


    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``mcfbs`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
      ``multidim_average`` arguments:

        - If ``multidim_average`` is set to ``global``:

          - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
          - If ``average=None/'none'``, the shape will be ``(C,)``

        - If ``multidim_average`` is set to ``samplewise``:

          - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
          - If ``average=None/'none'``, the shape will be ``(N, C)``

    Args:
        beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight
        num_classes: Integer specifing the number of classes
        average:
            Defines the reduction that is applied over labels. Should be one of the following:

            - ``micro``: Sum statistics over all labels
            - ``macro``: Calculate statistics for each label and average them
            - ``weighted``: Calculates statistics for each label and computes weighted average using their support
            - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
        top_k:

            Number of highest probability or logit score predictions considered to find the correct label.
            Only works when ``preds`` contain probabilities/logits.
        multidim_average:
            Defines how additionally dimensions ``...`` should be handled. Should be one of the following:

            - ``global``: Additional dimensions are flatted along the batch dimension
            - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
              The statistics in this case are calculated over the additional dimensions.

        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.

    Example (preds is int tensor):
        >>> from torchmetrics.classification import MulticlassFBetaScore
        >>> target = torch.tensor([2, 1, 0, 0])
        >>> preds = torch.tensor([2, 1, 0, 1])
        >>> metric = MulticlassFBetaScore(beta=2.0, num_classes=3)
        >>> metric(preds, target)
        tensor(0.7963)
        >>> mcfbs = MulticlassFBetaScore(beta=2.0, num_classes=3, average=None)
        >>> mcfbs(preds, target)
        tensor([0.5556, 0.8333, 1.0000])

    Example (preds is float tensor):
        >>> from torchmetrics.classification import MulticlassFBetaScore
        >>> target = torch.tensor([2, 1, 0, 0])
        >>> preds = torch.tensor([
        ...   [0.16, 0.26, 0.58],
        ...   [0.22, 0.61, 0.17],
        ...   [0.71, 0.09, 0.20],
        ...   [0.05, 0.82, 0.13],
        ... ])
        >>> metric = MulticlassFBetaScore(beta=2.0, num_classes=3)
        >>> metric(preds, target)
        tensor(0.7963)
        >>> mcfbs = MulticlassFBetaScore(beta=2.0, num_classes=3, average=None)
        >>> mcfbs(preds, target)
        tensor([0.5556, 0.8333, 1.0000])

    Example (multidim tensors):
        >>> from torchmetrics.classification import MulticlassFBetaScore
        >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
        >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
        >>> metric = MulticlassFBetaScore(beta=2.0, num_classes=3, multidim_average='samplewise')
        >>> metric(preds, target)
        tensor([0.4697, 0.2706])
        >>> mcfbs = MulticlassFBetaScore(beta=2.0, num_classes=3, multidim_average='samplewise', average=None)
        >>> mcfbs(preds, target)
        tensor([[0.9091, 0.0000, 0.5000],
                [0.0000, 0.3571, 0.4545]])
    Fr   Tr   r      macror   Nr   num_classestop_kr&   micror:   weightednoner   r   r   r   r   r   c           	   	      F   t  jd|||||dd| |rt|||||| || _|| _d S )NF)r;   r<   r&   r   r   r   r   )r   r   r   r   r   )	r    r   r;   r<   r&   r   r   r   r   r!   r   r#   r         	
zMulticlassFBetaScore.__init__c              	   C   ,   |   \}}}}t||||| j| j| jdS Nr%   r'   r
   r   r&   r   r(   r   r   r#   r-         zMulticlassFBetaScore.computer9   r:   r   NTr/   r0   r1   r2   r   r3   r4   r   r   r   r5   r6   r   r   r   r   r-   r7   r   r   r!   r#   r8      s<   
 ^
	
r8   c                       s   e Zd ZU dZdZeed< dZee ed< dZ	eed< 					
	dde
dede
deed  ded dee dededd
f fddZdefddZ  ZS )MultilabelFBetaScorea  Computes `F-score`_ metric for multilabel tasks:

    .. math::
        F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
        {(\beta^2 * \text{precision}) + \text{recall}}

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``. If preds is a floating
      point tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid
      per element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``.


    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``mlfbs`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
      ``multidim_average`` arguments:

        - If ``multidim_average`` is set to ``global``:

          - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
          - If ``average=None/'none'``, the shape will be ``(C,)``

        - If ``multidim_average`` is set to ``samplewise``:

          - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
          - If ``average=None/'none'``, the shape will be ``(N, C)``

    Args:
        beta: Weighting between precision and recall in calculation. Setting to 1 corresponds to equal weight
        num_labels: Integer specifing the number of labels
        threshold: Threshold for transforming probability to binary (0,1) predictions
        average:
            Defines the reduction that is applied over labels. Should be one of the following:

            - ``micro``: Sum statistics over all labels
            - ``macro``: Calculate statistics for each label and average them
            - ``weighted``: Calculates statistics for each label and computes weighted average using their support
            - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction

        multidim_average:
            Defines how additionally dimensions ``...`` should be handled. Should be one of the following:

            - ``global``: Additional dimensions are flatted along the batch dimension
            - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
              The statistics in this case are calculated over the additional dimensions.

        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.

    Example (preds is int tensor):
        >>> from torchmetrics.classification import MultilabelFBetaScore
        >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
        >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
        >>> metric = MultilabelFBetaScore(beta=2.0, num_labels=3)
        >>> metric(preds, target)
        tensor(0.6111)
        >>> mlfbs = MultilabelFBetaScore(beta=2.0, num_labels=3, average=None)
        >>> mlfbs(preds, target)
        tensor([1.0000, 0.0000, 0.8333])

    Example (preds is float tensor):
        >>> from torchmetrics.classification import MultilabelFBetaScore
        >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
        >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
        >>> metric = MultilabelFBetaScore(beta=2.0, num_labels=3)
        >>> metric(preds, target)
        tensor(0.6111)
        >>> mlfbs = MultilabelFBetaScore(beta=2.0, num_labels=3, average=None)
        >>> mlfbs(preds, target)
        tensor([1.0000, 0.0000, 0.8333])

    Example (multidim tensors):
        >>> from torchmetrics.classification import MultilabelFBetaScore
        >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
        >>> preds = torch.tensor(
        ...     [
        ...         [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
        ...         [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
        ...     ]
        ... )
        >>> metric = MultilabelFBetaScore(num_labels=3, beta=2.0, multidim_average='samplewise')
        >>> metric(preds, target)
        tensor([0.5556, 0.0000])
        >>> mlfbs = MultilabelFBetaScore(num_labels=3, beta=2.0, multidim_average='samplewise', average=None)
        >>> mlfbs(preds, target)
        tensor([[0.8333, 0.8333, 0.0000],
                [0.0000, 0.0000, 0.0000]])
    Fr   Tr   r   r   r:   r   Nr   
num_labelsr   r&   r=   r   r   r   r   r   r   c           	   	      rA   )NF)rJ   r   r&   r   r   r   r   )r   r   r   r   r   )	r    r   rJ   r   r&   r   r   r   r   r!   r   r#   r   a  rB   zMultilabelFBetaScore.__init__c              	   C   rC   rD   rE   r(   r   r   r#   r-   z  rF   zMultilabelFBetaScore.computer   r:   r   NTrH   r   r   r!   r#   rI      s<   
 \
	
rI   c                       sv   e Zd ZU dZdZeed< dZee ed< dZ	eed< 					dd
e
ded dee dededd	f fddZ  ZS )BinaryF1Scorea  Computes F-1 score for binary tasks:

    .. math::
        F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, ...)``. If preds is a floating point
      tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per
      element. Addtionally, we convert to int tensor with thresholding using the value in ``threshold``.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``


    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``bf1s`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``multidim_average`` argument:

        - If ``multidim_average`` is set to ``global``, the metric returns a scalar value.
        - If ``multidim_average`` is set to ``samplewise``, the metric returns ``(N,)`` vector consisting of a scalar
          value per sample.

    Args:
        threshold: Threshold for transforming probability to binary {0,1} predictions
        multidim_average:
            Defines how additionally dimensions ``...`` should be handled. Should be one of the following:

            - ``global``: Additional dimensions are flatted along the batch dimension
            - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
              The statistics in this case are calculated over the additional dimensions.

        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.

    Example (preds is int tensor):
        >>> from torchmetrics.classification import BinaryF1Score
        >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
        >>> preds = torch.tensor([0, 0, 1, 1, 0, 1])
        >>> metric = BinaryF1Score()
        >>> metric(preds, target)
        tensor(0.6667)

    Example (preds is float tensor):
        >>> from torchmetrics.classification import BinaryF1Score
        >>> target = torch.tensor([0, 1, 0, 1, 0, 1])
        >>> preds = torch.tensor([0.11, 0.22, 0.84, 0.73, 0.33, 0.92])
        >>> metric = BinaryF1Score()
        >>> metric(preds, target)
        tensor(0.6667)

    Example (multidim tensors):
        >>> from torchmetrics.classification import BinaryF1Score
        >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
        >>> preds = torch.tensor(
        ...     [
        ...         [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
        ...         [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
        ...     ]
        ... )
        >>> metric = BinaryF1Score(multidim_average='samplewise')
        >>> metric(preds, target)
        tensor([0.5000, 0.0000])
    Fr   Tr   r   r   r   Nr   r   r   r   r   r   r   c                    s"   t  jdd||||d| d S )N      ?)r   r   r   r   r   r   r   r   )r    r   r   r   r   r   r!   r   r#   r     s   
zBinaryF1Score.__init__r.   )r/   r0   r1   r2   r   r3   r4   r   r   r   r5   r   r6   r   r   r7   r   r   r!   r#   rL     s,   
 @rL   c                       s   e Zd ZU dZdZeed< dZee ed< dZ	eed< 					
	dde
de
deed  ded dee
 dededd
f fddZ  ZS )MulticlassF1Scoread  Computes F-1 score for multiclass tasks:

    .. math::
        F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` or float tensor of shape ``(N, C, ..)``.
      If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically convert
      probabilities/logits into an int tensor.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``


    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``mcf1s`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
      ``multidim_average`` arguments:

        - If ``multidim_average`` is set to ``global``:

          - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
          - If ``average=None/'none'``, the shape will be ``(C,)``

        - If ``multidim_average`` is set to ``samplewise``:

          - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
          - If ``average=None/'none'``, the shape will be ``(N, C)``

    Args:
        preds: Tensor with predictions
        target: Tensor with true labels
        num_classes: Integer specifing the number of classes
        average:
            Defines the reduction that is applied over labels. Should be one of the following:

            - ``micro``: Sum statistics over all labels
            - ``macro``: Calculate statistics for each label and average them
            - ``weighted``: Calculates statistics for each label and computes weighted average using their support
            - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction
        top_k:
            Number of highest probability or logit score predictions considered to find the correct label.
            Only works when ``preds`` contain probabilities/logits.
        multidim_average:
            Defines how additionally dimensions ``...`` should be handled. Should be one of the following:

            - ``global``: Additional dimensions are flatted along the batch dimension
            - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
              The statistics in this case are calculated over the additional dimensions.

        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.

    Example (preds is int tensor):
        >>> from torchmetrics.classification import MulticlassF1Score
        >>> target = torch.tensor([2, 1, 0, 0])
        >>> preds = torch.tensor([2, 1, 0, 1])
        >>> metric = MulticlassF1Score(num_classes=3)
        >>> metric(preds, target)
        tensor(0.7778)
        >>> mcf1s = MulticlassF1Score(num_classes=3, average=None)
        >>> mcf1s(preds, target)
        tensor([0.6667, 0.6667, 1.0000])

    Example (preds is float tensor):
        >>> from torchmetrics.classification import MulticlassF1Score
        >>> target = torch.tensor([2, 1, 0, 0])
        >>> preds = torch.tensor([
        ...   [0.16, 0.26, 0.58],
        ...   [0.22, 0.61, 0.17],
        ...   [0.71, 0.09, 0.20],
        ...   [0.05, 0.82, 0.13],
        ... ])
        >>> metric = MulticlassF1Score(num_classes=3)
        >>> metric(preds, target)
        tensor(0.7778)
        >>> mcf1s = MulticlassF1Score(num_classes=3, average=None)
        >>> mcf1s(preds, target)
        tensor([0.6667, 0.6667, 1.0000])

    Example (multidim tensors):
        >>> from torchmetrics.classification import MulticlassF1Score
        >>> target = torch.tensor([[[0, 1], [2, 1], [0, 2]], [[1, 1], [2, 0], [1, 2]]])
        >>> preds = torch.tensor([[[0, 2], [2, 0], [0, 1]], [[2, 2], [2, 1], [1, 0]]])
        >>> metric = MulticlassF1Score(num_classes=3, multidim_average='samplewise')
        >>> metric(preds, target)
        tensor([0.4333, 0.2667])
        >>> mcf1s = MulticlassF1Score(num_classes=3, multidim_average='samplewise', average=None)
        >>> mcf1s(preds, target)
        tensor([[0.8000, 0.0000, 0.5000],
                [0.0000, 0.4000, 0.4000]])
    Fr   Tr   r   r9   r:   r   Nr;   r<   r&   r=   r   r   r   r   r   r   c              
      &   t  jdd||||||d| d S )NrM   )r   r;   r<   r&   r   r   r   r   rN   )r    r;   r<   r&   r   r   r   r   r!   r   r#   r   8     

zMulticlassF1Score.__init__rG   )r/   r0   r1   r2   r   r3   r4   r   r   r   r6   r   r   r   r7   r   r   r!   r#   rO     s6   
 ]
	rO   c                       s   e Zd ZU dZdZeed< dZee ed< dZ	eed< 					
	dde
dedeed  ded dee
 dededd
f fddZ  ZS )MultilabelF1Scorea  Computes F-1 score for multilabel tasks:

    .. math::
        F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): An int or float tensor of shape ``(N, C, ...)``.
      If preds is a floating point tensor with values outside [0,1] range we consider the input to be logits and
      will auto apply sigmoid per element. Addtionally, we convert to int tensor with thresholding using the value
      in ``threshold``.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)``.


    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``mlf1s`` (:class:`~torch.Tensor`): A tensor whose returned shape depends on the ``average`` and
      ``multidim_average`` arguments:

        - If ``multidim_average`` is set to ``global``:

          - If ``average='micro'/'macro'/'weighted'``, the output will be a scalar tensor
          - If ``average=None/'none'``, the shape will be ``(C,)``

        - If ``multidim_average`` is set to ``samplewise``:

          - If ``average='micro'/'macro'/'weighted'``, the shape will be ``(N,)``
          - If ``average=None/'none'``, the shape will be ``(N, C)```

    Args:
        num_labels: Integer specifing the number of labels
        threshold: Threshold for transforming probability to binary (0,1) predictions
        average:
            Defines the reduction that is applied over labels. Should be one of the following:

            - ``micro``: Sum statistics over all labels
            - ``macro``: Calculate statistics for each label and average them
            - ``weighted``: Calculates statistics for each label and computes weighted average using their support
            - ``"none"`` or ``None``: Calculates statistic for each label and applies no reduction

        multidim_average:
            Defines how additionally dimensions ``...`` should be handled. Should be one of the following:

            - ``global``: Additional dimensions are flatted along the batch dimension
            - ``samplewise``: Statistic will be calculated independently for each sample on the ``N`` axis.
              The statistics in this case are calculated over the additional dimensions.

        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.

    Example (preds is int tensor):
        >>> from torchmetrics.classification import MultilabelF1Score
        >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
        >>> preds = torch.tensor([[0, 0, 1], [1, 0, 1]])
        >>> metric = MultilabelF1Score(num_labels=3)
        >>> metric(preds, target)
        tensor(0.5556)
        >>> mlf1s = MultilabelF1Score(num_labels=3, average=None)
        >>> mlf1s(preds, target)
        tensor([1.0000, 0.0000, 0.6667])

    Example (preds is float tensor):
        >>> from torchmetrics.classification import MultilabelF1Score
        >>> target = torch.tensor([[0, 1, 0], [1, 0, 1]])
        >>> preds = torch.tensor([[0.11, 0.22, 0.84], [0.73, 0.33, 0.92]])
        >>> metric = MultilabelF1Score(num_labels=3)
        >>> metric(preds, target)
        tensor(0.5556)
        >>> mlf1s = MultilabelF1Score(num_labels=3, average=None)
        >>> mlf1s(preds, target)
        tensor([1.0000, 0.0000, 0.6667])

    Example (multidim tensors):
        >>> from torchmetrics.classification import MultilabelF1Score
        >>> target = torch.tensor([[[0, 1], [1, 0], [0, 1]], [[1, 1], [0, 0], [1, 0]]])
        >>> preds = torch.tensor(
        ...     [
        ...         [[0.59, 0.91], [0.91, 0.99], [0.63, 0.04]],
        ...         [[0.38, 0.04], [0.86, 0.780], [0.45, 0.37]],
        ...     ]
        ... )
        >>> metric = MultilabelF1Score(num_labels=3, multidim_average='samplewise')
        >>> metric(preds, target)
        tensor([0.4444, 0.0000])
        >>> mlf1s = MultilabelF1Score(num_labels=3, multidim_average='samplewise', average=None)
        >>> mlf1s(preds, target)
        tensor([[0.6667, 0.6667, 0.0000],
                [0.0000, 0.0000, 0.0000]])
    Fr   Tr   r   r   r:   r   NrJ   r   r&   r=   r   r   r   r   r   r   c              
      rP   )NrM   )r   rJ   r   r&   r   r   r   r   rN   )r    rJ   r   r&   r   r   r   r   r!   r   r#   r     rQ   zMultilabelF1Score.__init__rK   )r/   r0   r1   r2   r   r3   r4   r   r   r   r6   r5   r   r   r   r7   r   r   r!   r#   rR   N  s6   
 [
	rR   c                   @   s   e Zd ZdZ									dd	ed
 dededee dee deed  deed  dee dee dede	de
fddZdS )
FBetaScoreaX  Computes `F-score`_ metric:

    .. math::
        F_{\beta} = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
        {(\beta^2 * \text{precision}) + \text{recall}}

    This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
    ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
    :func:`binary_fbeta_score`, :func:`multiclass_fbeta_score` and :func:`multilabel_fbeta_score` for the specific
    details of each argument influence and examples.

    Legcy Example:
        >>> import torch
        >>> target = torch.tensor([0, 1, 2, 0, 1, 2])
        >>> preds = torch.tensor([0, 2, 1, 0, 0, 1])
        >>> f_beta = FBetaScore(task="multiclass", num_classes=3, beta=0.5)
        >>> f_beta(preds, target)
        tensor(0.3333)
    rM   r   Nr>   r   r9   Ttaskr$   
multiclass
multilabelr   r   r;   rJ   r&   r=   r   r   r<   r   r   r   r   c                 K   s   |d usJ | t||	|
d |dkrt||fi |S |dkr:t|ts(J t|ts/J t||||fi |S |dkrPt|tsEJ t||||fi |S td| N)r   r   r   r$   rV   rW   z[Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got )updatedictr   
isinstancer6   r8   rI   
ValueError)clsrT   r   r   r;   rJ   r&   r   r<   r   r   r   r   r   r#   __new__  s   zFBetaScore.__new__)	rM   r   NNr>   r   r9   NTr/   r0   r1   r2   r   r5   r   r6   r3   r   r   r^   r   r   r   r#   rS     sH    

	
rS   c                   @   s|   e Zd ZdZ								dded	 d
edee dee deed  deed  dee dee dede	de
fddZdS )F1Scorea  Computes F-1 score:

    .. math::
        F_{1} = 2\frac{\text{precision} * \text{recall}}{(\text{precision}) + \text{recall}}

    This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
    ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
    :mod:`BinaryF1Score`, :mod:`MulticlassF1Score` and :mod:`MultilabelF1Score` for the specific
    details of each argument influence and examples.

    Legacy Example:
        >>> import torch
        >>> target = torch.tensor([0, 1, 2, 0, 1, 2])
        >>> preds = torch.tensor([0, 2, 1, 0, 0, 1])
        >>> f1 = F1Score(task="multiclass", num_classes=3)
        >>> f1(preds, target)
        tensor(0.3333)
    r   Nr>   r   r9   TrT   rU   r   r;   rJ   r&   r=   r   r   r<   r   r   r   r   c
                 K   s   |d usJ |
 t|||	d |dkrt|fi |
S |dkr8t|ts'J t|ts.J t|||fi |
S |dkrMt|tsCJ t|||fi |
S td| rX   )rY   rZ   rL   r[   r6   rO   rR   r\   )r]   rT   r   r;   rJ   r&   r   r<   r   r   r   r   r   r#   r^     s   zF1Score.__new__)r   NNr>   r   r9   NTr_   r   r   r   r#   r`     sB    

	
r`   )typingr   r   torchr   typing_extensionsr   'torchmetrics.classification.stat_scoresr   r   r   -torchmetrics.functional.classification.f_betar	   r
   r   r   torchmetrics.metricr   r   r8   rI   rL   rO   rR   rS   r`   r   r   r   r#   <module>   s    a Wxv3