o
    .wi5                     @   s   d dl mZ d dlmZmZmZ d dlmZ d dlm	Z	 d dl
mZ d dlmZmZ d dlmZmZmZ d dlmZ d d	lmZ d d
lmZ d dlmZmZ esTddgZG dd deZG dd deZG dd deZdS )    )Sequence)AnyOptionalUnion)Tensor)Literal)_ClassificationTaskWrapper)BinaryConfusionMatrixMulticlassConfusionMatrix)"_binary_cohen_kappa_arg_validation_cohen_kappa_reduce&_multiclass_cohen_kappa_arg_validation)Metric)ClassificationTaskNoMultilabel)_MATPLOTLIB_AVAILABLE)_AX_TYPE_PLOT_OUT_TYPEBinaryCohenKappa.plotMulticlassCohenKappa.plotc                       s   e Zd ZU dZdZeed< dZeed< dZeed< dZ	e
ed< d	Ze
ed
< 				dde
dee deed  dededdf fddZdefddZ	ddeeeee f  dee defddZ  ZS )BinaryCohenKappaa	  Calculate `Cohen's kappa score`_ that measures inter-annotator agreement for binary tasks.

    .. math::
        \kappa = (p_o - p_e) / (1 - p_e)

    where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is
    the expected agreement when both annotators assign labels randomly. Note that
    :math:`p_e` is estimated using a per-annotator empirical prior over the
    class labels.

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): A int or float tensor of shape ``(N, ...)``. If preds is a floating point
      tensor with values outside [0,1] range we consider the input to be logits and will auto apply sigmoid per element.
      Additionally, we convert to int tensor with thresholding using the value in ``threshold``.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.

    .. tip::
       Additional dimension ``...`` will be flattened into the batch dimension.

    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``bc_kappa`` (:class:`~torch.Tensor`): A tensor containing cohen kappa score

    Args:
        threshold: Threshold for transforming probability to binary (0,1) predictions
        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        weights: Weighting type to calculate the score. Choose from:

            - ``None`` or ``'none'``: no weighting
            - ``'linear'``: linear weighting
            - ``'quadratic'``: quadratic weighting

        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.
        kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.

    Example (preds is int tensor):
        >>> from torch import tensor
        >>> from torchmetrics.classification import BinaryCohenKappa
        >>> target = tensor([1, 1, 0, 0])
        >>> preds = tensor([0, 1, 0, 0])
        >>> metric = BinaryCohenKappa()
        >>> metric(preds, target)
        tensor(0.5000)

    Example (preds is float tensor):
        >>> from torchmetrics.classification import BinaryCohenKappa
        >>> target = tensor([1, 1, 0, 0])
        >>> preds = tensor([0.35, 0.85, 0.48, 0.01])
        >>> metric = BinaryCohenKappa()
        >>> metric(preds, target)
        tensor(0.5000)

    Fis_differentiableThigher_is_betterfull_state_update        plot_lower_bound      ?plot_upper_bound      ?N	thresholdignore_indexweightslinear	quadraticnonevalidate_argskwargsreturnc                    <   t  j||fd dd| |rt||| || _|| _d S NF)	normalizer%   )super__init__r   r    r%   )selfr   r   r    r%   r&   	__class__ d/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/torchmetrics/classification/cohen_kappa.pyr,   d   
   
zBinaryCohenKappa.__init__c                 C      t | j| jS zCompute metric.r   confmatr    r-   r0   r0   r1   computer      zBinaryCohenKappa.computevalaxc                 C      |  ||S )a9  Plot a single or multiple values from the metric.

        Args:
            val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
                If no value is provided, will automatically call `metric.compute` and plot that result.
            ax: An matplotlib axis object. If provided will add plot to that axis

        Returns:
            Figure object and Axes object

        Raises:
            ModuleNotFoundError:
                If `matplotlib` is not installed

        .. plot::
            :scale: 75

            >>> from torch import rand, randint
            >>> # Example plotting a single value
            >>> from torchmetrics.classification import BinaryCohenKappa
            >>> metric = BinaryCohenKappa()
            >>> metric.update(rand(10), randint(2,(10,)))
            >>> fig_, ax_ = metric.plot()

        .. plot::
            :scale: 75

            >>> from torch import rand, randint
            >>> # Example plotting multiple values
            >>> from torchmetrics.classification import BinaryCohenKappa
            >>> metric = BinaryCohenKappa()
            >>> values = [ ]
            >>> for _ in range(10):
            ...     values.append(metric(rand(10), randint(2,(10,))))
            >>> fig_, ax_ = metric.plot(values)

        _plotr-   r:   r;   r0   r0   r1   plotv      (r   )r   NNTNN)__name__
__module____qualname____doc__r   bool__annotations__r   r   r   floatr   r   intr   r   r,   r   r8   r   r   r   r   r@   __classcell__r0   r0   r.   r1   r   $   sB   
 9
r   c                       s   e Zd ZU dZdZeed< dZeed< dZeed< dZ	e
ed< d	Ze
ed
< dZeed< 			ddedee deed  dededdf fddZdefddZ	ddeeeee f  dee defddZ  ZS )MulticlassCohenKappaa"
  Calculate `Cohen's kappa score`_ that measures inter-annotator agreement for multiclass tasks.

    .. math::
        \kappa = (p_o - p_e) / (1 - p_e)

    where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is
    the expected agreement when both annotators assign labels randomly. Note that
    :math:`p_e` is estimated using a per-annotator empirical prior over the
    class labels.

    As input to ``forward`` and ``update`` the metric accepts the following input:

    - ``preds`` (:class:`~torch.Tensor`): Either an int tensor of shape ``(N, ...)` or float tensor of shape
      ``(N, C, ..)``. If preds is a floating point we apply ``torch.argmax`` along the ``C`` dimension to automatically
      convert probabilities/logits into an int tensor.
    - ``target`` (:class:`~torch.Tensor`): An int tensor of shape ``(N, ...)``.

    .. tip::
       Additional dimension ``...`` will be flattened into the batch dimension.

    As output to ``forward`` and ``compute`` the metric returns the following output:

    - ``mcck`` (:class:`~torch.Tensor`): A tensor containing cohen kappa score

    Args:
        num_classes: Integer specifying the number of classes
        ignore_index:
            Specifies a target value that is ignored and does not contribute to the metric calculation
        weights: Weighting type to calculate the score. Choose from:

            - ``None`` or ``'none'``: no weighting
            - ``'linear'``: linear weighting
            - ``'quadratic'``: quadratic weighting

        validate_args: bool indicating if input arguments and tensors should be validated for correctness.
            Set to ``False`` for faster computations.
        kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.

    Example (pred is integer tensor):
        >>> from torch import tensor
        >>> from torchmetrics.classification import MulticlassCohenKappa
        >>> target = tensor([2, 1, 0, 0])
        >>> preds = tensor([2, 1, 0, 1])
        >>> metric = MulticlassCohenKappa(num_classes=3)
        >>> metric(preds, target)
        tensor(0.6364)

    Example (pred is float tensor):
        >>> from torchmetrics.classification import MulticlassCohenKappa
        >>> target = tensor([2, 1, 0, 0])
        >>> preds = tensor([[0.16, 0.26, 0.58],
        ...                 [0.22, 0.61, 0.17],
        ...                 [0.71, 0.09, 0.20],
        ...                 [0.05, 0.82, 0.13]])
        >>> metric = MulticlassCohenKappa(num_classes=3)
        >>> metric(preds, target)
        tensor(0.6364)

    Fr   Tr   r   r   r   r   r   Classplot_legend_nameNnum_classesr   r    r!   r%   r&   r'   c                    r(   r)   )r+   r,   r   r    r%   )r-   rO   r   r    r%   r&   r.   r0   r1   r,      r2   zMulticlassCohenKappa.__init__c                 C   r3   r4   r5   r7   r0   r0   r1   r8      r9   zMulticlassCohenKappa.computer:   r;   c                 C   r<   )a  Plot a single or multiple values from the metric.

        Args:
            val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
                If no value is provided, will automatically call `metric.compute` and plot that result.
            ax: An matplotlib axis object. If provided will add plot to that axis

        Returns:
            Figure object and Axes object

        Raises:
            ModuleNotFoundError:
                If `matplotlib` is not installed

        .. plot::
            :scale: 75

            >>> from torch import randn, randint
            >>> # Example plotting a single value
            >>> from torchmetrics.classification import MulticlassCohenKappa
            >>> metric = MulticlassCohenKappa(num_classes=3)
            >>> metric.update(randn(20,3).softmax(dim=-1), randint(3, (20,)))
            >>> fig_, ax_ = metric.plot()

        .. plot::
            :scale: 75

            >>> from torch import randn, randint
            >>> # Example plotting a multiple values
            >>> from torchmetrics.classification import MulticlassCohenKappa
            >>> metric = MulticlassCohenKappa(num_classes=3)
            >>> values = []
            >>> for _ in range(20):
            ...     values.append(metric(randn(20,3).softmax(dim=-1), randint(3, (20,))))
            >>> fig_, ax_ = metric.plot(values)

        r=   r?   r0   r0   r1   r@      rA   r   )NNTrB   )rC   rD   rE   rF   r   rG   rH   r   r   r   rI   r   rN   strrJ   r   r   r   r,   r   r8   r   r   r   r   r@   rK   r0   r0   r.   r1   rL      sB   
 <
rL   c                   @   sb   e Zd ZdZ					dded  ded ded	ee d
eed  dee de	de
defddZdS )
CohenKappaa0  Calculate `Cohen's kappa score`_ that measures inter-annotator agreement.

    .. math::
        \kappa = (p_o - p_e) / (1 - p_e)

    where :math:`p_o` is the empirical probability of agreement and :math:`p_e` is
    the expected agreement when both annotators assign labels randomly. Note that
    :math:`p_e` is estimated using a per-annotator empirical prior over the
    class labels.

    This function is a simple wrapper to get the task specific versions of this metric, which is done by setting the
    ``task`` argument to either ``'binary'`` or ``'multiclass'``. See the documentation of
    :class:`~torchmetrics.classification.BinaryCohenKappa` and
    :class:`~torchmetrics.classification.MulticlassCohenKappa` for the specific details of each argument influence and
    examples.

    Legacy Example:
        >>> from torch import tensor
        >>> target = tensor([1, 1, 0, 0])
        >>> preds = tensor([0, 1, 0, 0])
        >>> cohenkappa = CohenKappa(task="multiclass", num_classes=2)
        >>> cohenkappa(preds, target)
        tensor(0.5000)

    r   NTclstask)binary
multiclassr   rO   r    r!   r   r%   r&   r'   c                 K   s~   t |}||||d |t jkrt|fi |S |t jkr7t|ts/tdt	| dt
|fi |S td| d)zInitialize task metric.)r    r   r%   z+`num_classes` is expected to be `int` but `z was passed.`zTask z not supported!)r   from_strupdateBINARYr   
MULTICLASS
isinstancerJ   
ValueErrortyperL   )rR   rS   r   rO   r    r   r%   r&   r0   r0   r1   __new__=  s   



zCohenKappa.__new__)r   NNNT)rC   rD   rE   rF   r\   r   rI   r   rJ   rG   r   r   r]   r0   r0   r0   r1   rQ   "  s4    
	rQ   N) collections.abcr   typingr   r   r   torchr   typing_extensionsr    torchmetrics.classification.baser   ,torchmetrics.classification.confusion_matrixr	   r
   2torchmetrics.functional.classification.cohen_kappar   r   r   torchmetrics.metricr   torchmetrics.utilities.enumsr   torchmetrics.utilities.importsr   torchmetrics.utilities.plotr   r   __doctest_skip__r   rL   rQ   r0   r0   r0   r1   <module>   s"   } 