o
    8wi                     @   s   d dl mZ d dlmZ d dlmZmZmZmZm	Z	 d dl
Z
d dl
mZ d dlmZmZ d dlmZ d dlZd dlmZ d d	lmZ d d
lmZ d dlmZ d dlmZ d dlmZ G dd deZdS )    )	Generator)contextmanager)AnyCallableLiteralOptionalUnionN)Tensor)LBFGS	Optimizer)override)_optimizer_handles_unscaling)_TORCH_GREATER_EQUAL_2_4)Optimizable)	Precision)GradClipAlgorithmType)MisconfigurationExceptionc                       s  e Zd ZdZ	d%ded deded ddfd	d
Zede	ddde	f fddZ
ededddeg ef dedef
 fddZedejfdedeeef deddf fddZdejfddZeeded fddZedeeef fd d!Zed"eeef ddfd#d$Z  ZS )&MixedPrecisionaF  Plugin for Automatic Mixed Precision (AMP) training with ``torch.autocast``.

    Args:
        precision: Whether to use ``torch.float16`` (``'16-mixed'``) or ``torch.bfloat16`` (``'bf16-mixed'``).
        device: The device for ``torch.autocast``.
        scaler: An optional :class:`torch.cuda.amp.GradScaler` to use.

    N	precision16-mixed
bf16-mixeddevicescalerztorch.amp.GradScalerreturnc                 C   s   |dvrt dt| j d|d|| _|d u r-| jdkr-tr'tjj|dntjj }|d ur>| jdkr>t	d| d	|| _
|| _d S )
Nr   z	`Passed `z(precision=z1)`. Precision must be '16-mixed' or 'bf16-mixed'.r   )r   r   z6`precision='bf16-mixed'` does not use a scaler, found .)
ValueErrortype__name__r   r   torchamp
GradScalercudar   r   r   )selfr   r   r    r$   d/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/pytorch_lightning/plugins/precision/amp.py__init__(   s   
zMixedPrecision.__init__tensormodulezpl.LightningModulec                    s$   | j d ur| j |}t ||S N)r   scalesuperpre_backward)r#   r'   r(   	__class__r$   r%   r,   <   s   
zMixedPrecision.pre_backward	optimizermodelclosurekwargsc                    s   | j d u rt j|f||d|S t|trtd| }|d u o$|j}t|s1|s1| j | | 	|| |sJ| j j
|fi |}| j   |S |S )N)r0   r1   z/AMP and the LBFGS optimizer are not compatible.)r   r+   optimizer_step
isinstancer
   r   automatic_optimizationr   unscale__after_closurestepupdate)r#   r/   r0   r1   r2   closure_resultskip_unscalingstep_outputr-   r$   r%   r3   B   s   


zMixedPrecision.optimizer_stepg        clip_valgradient_clip_algorithmc                    s<   |dkrt |rtdt|j dt j|||d d S )Nr   zThe current optimizer, z, does not allow for gradient clipping because it performs unscaling of gradients internally. HINT: Are you using a 'fused' optimizer?)r/   r=   r>   )r   RuntimeErrorr   __qualname__r+   clip_gradients)r#   r/   r=   r>   r-   r$   r%   rA   d   s
   zMixedPrecision.clip_gradientsc                 C   s&   t j| j| jdkrt jdS t jdS )Nr   )dtype)r   autocastr   r   bfloat16halfr#   r$   r$   r%   autocast_context_managerr   s   &z'MixedPrecision.autocast_context_manager)NNNc                 c   s6    |    dV  W d   dS 1 sw   Y  dS )zEnable autocast context.N)rG   rF   r$   r$   r%   forward_contextu   s   
"zMixedPrecision.forward_contextc                 C   s   | j d ur
| j  S i S r)   )r   
state_dictrF   r$   r$   r%   rI   |   s   

zMixedPrecision.state_dictrI   c                 C   s   | j d ur| j | d S d S r)   )r   load_state_dict)r#   rI   r$   r$   r%   rJ      s   
zMixedPrecision.load_state_dictr)   ) r   
__module__r@   __doc__r   strr   r&   r   r	   r,   r   r   r   r3   r   NORMr   r   intfloatrA   r   rC   rG   r   r   rH   dictrI   rJ   __classcell__r$   r$   r-   r%   r      s\    

!
$r   ) collections.abcr   
contextlibr   typingr   r   r   r   r   r   r	   torch.optimr
   r   typing_extensionsr   pytorch_lightningpl&lightning_fabric.plugins.precision.ampr   "lightning_fabric.utilities.importsr    lightning_fabric.utilities.typesr   -pytorch_lightning.plugins.precision.precisionr   pytorch_lightning.utilitiesr   &pytorch_lightning.utilities.exceptionsr   r   r$   r$   r$   r%   <module>   s   