o
    yiYI                     @   s   d dl mZmZmZmZ d dlZd dlmZ d dlmZ d dl	m
Z
mZmZ d dlmZmZmZmZmZmZ d dlmZ d dlmZ G d	d
 d
e
ZG dd deZG dd deZG dd dZdS )    )AnyListOptionalUnionN)Tensor)Literal)BinaryPrecisionRecallCurveMulticlassPrecisionRecallCurveMultilabelPrecisionRecallCurve)_binary_auroc_arg_validation_binary_auroc_compute _multiclass_auroc_arg_validation_multiclass_auroc_compute _multilabel_auroc_arg_validation_multilabel_auroc_compute)Metric)dim_zero_catc                       s   e Zd ZU dZdZeed< dZee ed< dZ	eed< 				ddee
 d	eeeee
 ef  d
ee dededdf fddZdefddZ  ZS )BinaryAUROCaB  Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for binary tasks. The AUROC
    score summarizes the ROC curve into an single number that describes the performance of a model for multiple
    thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
    corresponds to random guessing.

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, ...)`` containing probabilities or logits for
      each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto apply
      sigmoid per element.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and
      therefore only contain {0,1} values (except if `ignore_index` is specified). The value 1 always encodes the
      positive class.

    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``b_auroc`` (:class:`~torch.Tensor`): A single scalar with the auroc score.

    Additional dimension ``...`` will be flattened into the batch dimension.

    The implementation both supports calculating the metric in a non-binned but accurate version and a
    binned version that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will
    activate the non-binned  version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the
    `thresholds` argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
    size :math:`\mathcal{O}(n_{thresholds})` (constant memory).

    Args:
        max_fpr: If not ``None``, calculates standardized partial AUC over the range ``[0, max_fpr]``.
        thresholds:
            Can be one of:

            - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
              all the data. Most accurate but also most memory consuming approach.
            - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
              0 to 1 as bins for the calculation.
            - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
            - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
              bins for the calculation.

        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.
        kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.

    Example:
        >>> from torchmetrics.classification import BinaryAUROC
        >>> preds = torch.tensor([0, 0.5, 0.7, 0.8])
        >>> target = torch.tensor([0, 1, 1, 0])
        >>> metric = BinaryAUROC(thresholds=None)
        >>> metric(preds, target)
        tensor(0.5000)
        >>> b_auroc = BinaryAUROC(thresholds=5)
        >>> b_auroc(preds, target)
        tensor(0.5000)
    Fis_differentiableNhigher_is_betterfull_state_updateTmax_fpr
thresholdsignore_indexvalidate_argskwargsreturnc                    s4   t  jd||dd| |rt||| || _d S )NFr   r   r    )super__init__r   r   )selfr   r   r   r   r   	__class__r   U/home/ubuntu/.local/lib/python3.10/site-packages/torchmetrics/classification/auroc.pyr    `   s   
zBinaryAUROC.__init__c                 C   s6   | j d u rt| jt| jg}n| j}t|| j | jS N)r   r   predstargetconfmatr   r   r!   stater   r   r$   computem   s   
zBinaryAUROC.compute)NNNT)__name__
__module____qualname____doc__r   bool__annotations__r   r   r   floatr   intr   r   r   r    r+   __classcell__r   r   r"   r$   r   %   s.   
 6r   c                          e Zd ZU dZdZeed< dZee ed< dZ	eed< 				dd	e
d
eed  deee
ee ef  dee
 dededdf fddZdefddZ  ZS )MulticlassAUROCa  Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for multiclass tasks. The AUROC
    score summarizes the ROC curve into an single number that describes the performance of a model for multiple
    thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
    corresponds to random guessing.

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits
      for each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto
      apply softmax per sample.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)`` containing ground truth labels, and
      therefore only contain values in the [0, n_classes-1] range (except if `ignore_index` is specified).

    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``mc_auroc`` (:class:`~torch.Tensor`): If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will
      be returned with auroc score per class. If `average="macro"|"weighted"` then a single scalar is returned.

    Additional dimension ``...`` will be flattened into the batch dimension.

    The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
    that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
    non-binned  version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
    argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
    size :math:`\mathcal{O}(n_{thresholds} \times n_{classes})` (constant memory).

    Args:
        num_classes: Integer specifing the number of classes
        average:
            Defines the reduction that is applied over classes. Should be one of the following:

            - ``macro``: Calculate score for each class and average them
            - ``weighted``: Calculates score for each class and computes weighted average using their support
            - ``"none"`` or ``None``: Calculates score for each class and applies no reduction

        thresholds:
            Can be one of:

            - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
              all the data. Most accurate but also most memory consuming approach.
            - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
              0 to 1 as bins for the calculation.
            - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
            - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
              bins for the calculation.

        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.
        kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.

    Example:
        >>> from torchmetrics.classification import MulticlassAUROC
        >>> preds = torch.tensor([[0.75, 0.05, 0.05, 0.05, 0.05],
        ...                       [0.05, 0.75, 0.05, 0.05, 0.05],
        ...                       [0.05, 0.05, 0.75, 0.05, 0.05],
        ...                       [0.05, 0.05, 0.05, 0.75, 0.05]])
        >>> target = torch.tensor([0, 1, 3, 2])
        >>> metric = MulticlassAUROC(num_classes=5, average="macro", thresholds=None)
        >>> metric(preds, target)
        tensor(0.5333)
        >>> mc_auroc = MulticlassAUROC(num_classes=5, average=None, thresholds=None)
        >>> mc_auroc(preds, target)
        tensor([1.0000, 1.0000, 0.3333, 0.3333, 0.0000])
        >>> mc_auroc = MulticlassAUROC(num_classes=5, average="macro", thresholds=5)
        >>> mc_auroc(preds, target)
        tensor(0.5333)
        >>> mc_auroc = MulticlassAUROC(num_classes=5, average=None, thresholds=5)
        >>> mc_auroc(preds, target)
        tensor([1.0000, 1.0000, 0.3333, 0.3333, 0.0000])
    Fr   Nr   r   macroTnum_classesaverager7   weightednoner   r   r   r   r   c                    >   t  jd|||dd| |rt|||| || _|| _d S )NF)r8   r   r   r   r   )r   r    r   r9   r   )r!   r8   r9   r   r   r   r   r"   r   r$   r          	
zMulticlassAUROC.__init__c                 C   s:   | j d u rt| jt| jg}n| j}t|| j| j| j S r%   )r   r   r&   r'   r(   r   r8   r9   r)   r   r   r$   r+      s   
zMulticlassAUROC.computer7   NNTr,   r-   r.   r/   r   r0   r1   r   r   r   r3   r   r   r   r2   r   r   r    r+   r4   r   r   r"   r$   r6   u   s2   
 G
r6   c                       r5   )MultilabelAUROCa
  Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_) for multilabel tasks. The AUROC
    score summarizes the ROC curve into an single number that describes the performance of a model for multiple
    thresholds at the same time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5
    corresponds to random guessing.

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): A float tensor of shape ``(N, C, ...)`` containing probabilities or logits
      for each observation. If preds has values outside [0,1] range we consider the input to be logits and will auto
      apply sigmoid per element.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, C, ...)`` containing ground truth labels, and
      therefore only contain {0,1} values (except if `ignore_index` is specified).

    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``ml_auroc`` (:class:`~torch.Tensor`): If `average=None|"none"` then a 1d tensor of shape (n_classes, ) will
      be returned with auroc score per class. If `average="micro|macro"|"weighted"` then a single scalar is returned.

    Additional dimension ``...`` will be flattened into the batch dimension.

    The implementation both supports calculating the metric in a non-binned but accurate version and a binned version
    that is less accurate but more memory efficient. Setting the `thresholds` argument to `None` will activate the
    non-binned  version that uses memory of size :math:`\mathcal{O}(n_{samples})` whereas setting the `thresholds`
    argument to either an integer, list or a 1d tensor will use a binned version that uses memory of
    size :math:`\mathcal{O}(n_{thresholds} \times n_{labels})` (constant memory).

    Args:
        num_labels: Integer specifing the number of labels
        average:
            Defines the reduction that is applied over labels. Should be one of the following:

            - ``micro``: Sum score over all labels
            - ``macro``: Calculate score for each label and average them
            - ``weighted``: Calculates score for each label and computes weighted average using their support
            - ``"none"`` or ``None``: Calculates score for each label and applies no reduction
        thresholds:
            Can be one of:

            - If set to `None`, will use a non-binned approach where thresholds are dynamically calculated from
              all the data. Most accurate but also most memory consuming approach.
            - If set to an `int` (larger than 1), will use that number of thresholds linearly spaced from
              0 to 1 as bins for the calculation.
            - If set to an `list` of floats, will use the indicated thresholds in the list as bins for the calculation
            - If set to an 1d `tensor` of floats, will use the indicated thresholds in the tensor as
              bins for the calculation.

        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.
        kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.

    Example:
        >>> from torchmetrics.classification import MultilabelAUROC
        >>> preds = torch.tensor([[0.75, 0.05, 0.35],
        ...                       [0.45, 0.75, 0.05],
        ...                       [0.05, 0.55, 0.75],
        ...                       [0.05, 0.65, 0.05]])
        >>> target = torch.tensor([[1, 0, 1],
        ...                        [0, 0, 0],
        ...                        [0, 1, 1],
        ...                        [1, 1, 1]])
        >>> ml_auroc = MultilabelAUROC(num_labels=3, average="macro", thresholds=None)
        >>> ml_auroc(preds, target)
        tensor(0.6528)
        >>> ml_auroc = MultilabelAUROC(num_labels=3, average=None, thresholds=None)
        >>> ml_auroc(preds, target)
        tensor([0.6250, 0.5000, 0.8333])
        >>> ml_auroc = MultilabelAUROC(num_labels=3, average="macro", thresholds=5)
        >>> ml_auroc(preds, target)
        tensor(0.6528)
        >>> ml_auroc = MultilabelAUROC(num_labels=3, average=None, thresholds=5)
        >>> ml_auroc(preds, target)
        tensor([0.6250, 0.5000, 0.8333])
    Fr   Nr   r   r7   T
num_labelsr9   )micror7   r;   r<   r   r   r   r   r   c                    r=   )NF)rB   r   r   r   r   )r   r    r   r9   r   )r!   rB   r9   r   r   r   r   r"   r   r$   r    (  r>   zMultilabelAUROC.__init__c                 C   s>   | j d u rt| jt| jg}n| j}t|| j| j| j | jS r%   )	r   r   r&   r'   r(   r   rB   r9   r   r)   r   r   r$   r+   9  s   
zMultilabelAUROC.computer?   r@   r   r   r"   r$   rA      s2   
 I
rA   c                   @   s   e Zd ZdZ							dded deeeee	 e
f  dee d	ee d
eed  dee	 dee dededefddZdS )AUROCaf  Compute Area Under the Receiver Operating Characteristic Curve (`ROC AUC`_). The AUROC score summarizes the
    ROC curve into an single number that describes the performance of a model for multiple thresholds at the same
    time. Notably, an AUROC score of 1 is a perfect score and an AUROC score of 0.5 corresponds to random guessing.

    This module is a simple wrapper to get the task specific versions of this metric, which is done by setting the
    ``task`` argument to either ``'binary'``, ``'multiclass'`` or ``multilabel``. See the documentation of
    :mod:`BinaryAUROC`, :mod:`MulticlassAUROC` and :mod:`MultilabelAUROC` for the specific details of
    each argument influence and examples.

    Legacy Example:
        >>> preds = torch.tensor([0.13, 0.26, 0.08, 0.19, 0.34])
        >>> target = torch.tensor([0, 0, 1, 1, 1])
        >>> auroc = AUROC(task="binary")
        >>> auroc(preds, target)
        tensor(0.5000)

        >>> preds = torch.tensor([[0.90, 0.05, 0.05],
        ...                       [0.05, 0.90, 0.05],
        ...                       [0.05, 0.05, 0.90],
        ...                       [0.85, 0.05, 0.10],
        ...                       [0.10, 0.10, 0.80]])
        >>> target = torch.tensor([0, 1, 1, 2, 2])
        >>> auroc = AUROC(task="multiclass", num_classes=3)
        >>> auroc(preds, target)
        tensor(0.7778)
    Nr7   Ttask)binary
multiclass
multilabelr   r8   rB   r9   r:   r   r   r   r   r   c	           
      K   s   |	 t|||d |dkrt|fi |	S |dkr*t|ts!J t||fi |	S |dkr>t|ts5J t||fi |	S td| )Nr   rF   rG   rH   z[Expected argument `task` to either be `'binary'`, `'multiclass'` or `'multilabel'` but got )updatedictr   
isinstancer3   r6   rA   
ValueError)
clsrE   r   r8   rB   r9   r   r   r   r   r   r   r$   __new__]  s   zAUROC.__new__)NNNr7   NNT)r,   r-   r.   r/   r   r   r   r3   r   r2   r   r0   r   r   rN   r   r   r   r$   rD   A  s<    
	
rD   )typingr   r   r   r   torchr   typing_extensionsr   2torchmetrics.classification.precision_recall_curver   r	   r
   ,torchmetrics.functional.classification.aurocr   r   r   r   r   r   torchmetrics.metricr   torchmetrics.utilities.datar   r   r6   rA   rD   r   r   r   r$   <module>   s    Peg