o
    闦i                  *   @   s  d dl mZmZmZmZmZ d dlZd dlmZ ddlm	Z	m
Z
mZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZ ddgZG dd deZd	d
e de de de	 de de d e_dee dee dee dee dee dee dee dee dedeee f deee f deee f de de dedededef$d d!Z!dee dee dee dee dee dee dee dee dedeee f deee f deee f de de dedededef$d"d#Z"dee dee dee dee dee dee dee dee dede de deee f de de dedededed$df&d%d&Z#ee!d'		(	(				(d,dee dee dee dee dee dee d)ee deded*ee dee dee dedede de dee ef de de def(d+dZ$dS )-    )castListOptionalTupleUnionN)Tensor   )_capturable_doc_default_to_fused_or_foreach_device_dtype_check_for_fused_differentiable_doc_disable_dynamo_if_unsupported_foreach_doc
_fused_doc!_get_capturable_supported_devices_get_scalar_dtype
_get_value_maximize_doc_params_doc_stack_if_compiling_use_grad_for_differentiable_view_as_real
DeviceDictDeviceDtypeDict	OptimizerParamsTAdamWadamwc                       s   e Zd Z					dddddddded	eeef d
eeeef eeef f dedededede	e dedede	e f fddZ
 fddZdd ZedddZ  ZS )r   MbP?g?g+?:0yE>{Gz?FN)maximizeforeach
capturabledifferentiablefusedparamslrbetasepsweight_decayamsgradr"   r#   r$   r%   r&   c                   s  t |tr|r|	std| dkrtdd|ks"td| d|ks-td| d|d   kr9dk sCn td	|d  d|d   krOdk sYn td
|d  d|ksdtd| t |d trrt |d tst |d trt |d tstdt |d tr|	s|rtd|d  dkrtdt |d tr|	s|rtd|d  dkrtdt||||||||	|
|d
}t || |r|
rtdd| _	|rtdd S d S )NElr as a Tensor is not supported for capturable=False and foreach=Truer   zTensor lr must be 1-element        zInvalid learning rate: zInvalid epsilon value: r         ?z#Invalid beta parameter at index 0: z#Invalid beta parameter at index 1: zInvalid weight_decay value: z0betas must be either both floats or both TensorszKbetas[0] as a Tensor is not supported for capturable=False and foreach=Truez!Tensor betas[0] must be 1-elementzKbetas[1] as a Tensor is not supported for capturable=False and foreach=Truez!Tensor betas[1] must be 1-element)
r(   r)   r*   r+   r,   r#   r"   r$   r%   r&   z)`fused` does not support `differentiable`Tz0`fused` and `foreach` cannot be `True` together.)

isinstancer   
ValueErrornumelfloatdictsuper__init__RuntimeError_step_supports_amp_scaling)selfr'   r(   r)   r*   r+   r,   r"   r#   r$   r%   r&   defaults	__class__ O/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/torch/optim/adamw.pyr6   #   sz   
zAdamW.__init__c                    s   t  | | jD ]e}|dd |dd |dd  |dd |dd |dd }|d D ]:}| j|g }t|d	krmt|d
 smt	|d
 }|d sW|d rctj
|t|d|jdntj
|t d|d
< q3q	d S )Nr,   Fr"   r#   r$   r%   r&   r'   r   stepis_fuseddtypedevicerC   )r5   __setstate__param_groups
setdefaultstategetlentorch	is_tensorr3   tensorr   rD   )r9   rI   groupr&   pp_statestep_valr;   r=   r>   rF   m   s2   
zAdamW.__setstate__c	                 C   s|  d}	|d D ]}
|
j d u rq|	t|
O }	||
 |
j jr"td||
j  | j|
 }t|dkr{|d r;t|
 |d sC|d rQtj	dt
|d d|
jd	ntjd
t
 d|d< tj|
tjd|d< tj|
tjd|d< |r{tj|
tjd|d< ||d  ||d  |d r||d  |d r|d jrtd|d rt|d tr|d std||d  q|	S )NFr'   z'AdamW does not support sparse gradientsr   r&   r$   r=   r@   rB   r.   rE   r?   )memory_formatexp_avg
exp_avg_sqmax_exp_avg_sqr,   r%   zB`requires_grad` is not supported for `step` in differentiable moder#   r(   r-   )gradrL   
is_complexappend	is_sparser7   rI   rK   r   zerosr   rD   rN   
zeros_likepreserve_formatrequires_gradr0   r   )r9   rO   params_with_gradgradsr,   exp_avgsexp_avg_sqsmax_exp_avg_sqsstate_stepshas_complexrP   rI   r=   r=   r>   _init_group   sh   


	



zAdamW._init_groupc                 C   s  |    d}|dur!t  | }W d   n1 sw   Y  | jD ]]}g }g }g }g }g }g }	|d }
ttttf |d \}}| ||||
||||	}t||||||	f|
|||d |d |d |d |d |d	 |d
 |d t	| ddt	| dd|d q$|S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr,   r)   r(   r+   r*   r"   r#   r$   r%   r&   
grad_scale	found_inf)r,   beta1beta2r(   r+   r*   r"   r#   r$   r%   r&   rg   rh   re   )
 _cuda_graph_capture_health_checkrL   enable_gradrG   r   r   r3   rf   r   getattr)r9   closurelossrO   r_   r`   ra   rb   rc   rd   r,   ri   rj   re   r=   r=   r>   r?      sb   




z
AdamW.step)r   r   r    r!   FN)__name__
__module____qualname__r   r   r3   r   r   boolr   r6   rF   rf   r   r?   __classcell__r=   r=   r;   r>   r   "   sN    	
	
JKa  Implements AdamW algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{(lr)}, \: \beta_1, \beta_2
                \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)},
                \: \epsilon \text{ (epsilon)}                                                    \\
            &\hspace{13mm}      \lambda \text{(weight decay)},  \: \textit{amsgrad},
                \: \textit{maximize}                                                             \\
            &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0
                \text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0              \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\

            &\hspace{5mm}\textbf{if} \: \textit{maximize}:                                       \\
            &\hspace{10mm}g_t           \leftarrow   -\nabla_{\theta} f_t (\theta_{t-1})          \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1}         \\
            &\hspace{5mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{5mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{5mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{5mm}\widehat{v_t} \leftarrow   v_t/\big(1-\beta_2^t \big)                   \\
            &\hspace{5mm}\textbf{if} \: amsgrad                                                  \\
            &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_{t-1}}^{max},
                \widehat{v_t})                                                                   \\
            &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big)                                 \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}} + \epsilon \big)                                       \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_.
    z
    Args:
        a  
        lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR
            is not yet supported for all our implementations. Please use a float
            LR if you are not also specifying fused=True or capturable=True.
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay coefficient (default: 1e-2)
        amsgrad (bool, optional): whether to use the AMSGrad variant of this
            algorithm from the paper `On the Convergence of Adam and Beyond`_
            (default: False)
        z	
        a8  
    .. Note::
        A prototype implementation of Adam and AdamW for MPS supports `torch.float32` and `torch.float16`.
    .. _Decoupled Weight Decay Regularization:
        https://arxiv.org/abs/1711.05101
    .. _On the Convergence of Adam and Beyond:
        https://openreview.net/forum?id=ryQu7f-RZ

    r'   r`   ra   rb   rc   rd   rg   rh   r,   ri   rj   r(   r+   r*   r"   r$   r%   re   c       
   &      C   s<  |d u r|d u s
J t j r$t|tsJ t|	tsJ t|
ts$J t|	tr2|	j|	jf|	i}nd }t| D ]b\}}|sC|| n||  }|| }|| }|| }t j	
 st|rtt }|jj|jjkrl|jj|v stJ d| dt |rt |}t |}t |}|rt || ||< t |}|d7 }|d||   |j}|j}|j}|d ur|j}||f}||vr|	j||dd||< || }n|	}||d|  ||
j||d|
 d |s|rB|}d|	|  }d|
|  } || }!|! }"|  }#|r.|r||  }$n|| }$|| t |$| ||  |#|"  ||" }%n| |#|"  ||" }%|||% nEt|}d|	|  }d|
|  } || }!| d }#|rut j|| ||| d ||  |# |}%n	| |# |}%|j||%|! d |rt | | rt || ||< q8d S )	NIIf capturable=True, params and state_steps must be on supported devices: .r   T)rD   rC   non_blocking)value      ?)out)rL   jitis_scriptingr0   r3   r   rD   rC   	enumeratecompileris_compilingr   typerX   view_as_realmul_tolerp_addcmul_negsqrtclonecopy_maximumadd_addcdiv_r   view_as_complex)&r'   r`   ra   rb   rc   rd   rg   rh   r,   ri   rj   r(   r+   r*   r"   r$   r%   re   
beta1_dictiparamrW   rT   rU   step_tcapturable_supported_devicesrD   rC   keydevice_beta1r?   bias_correction1bias_correction2	step_sizestep_size_negbias_correction2_sqrtrV   denomr=   r=   r>   _single_tensor_adamwS  s   











r   c       
   *         s<  t | dkrd S ttr|stdt tr(|std  dkr(tdttr=|s3td dkr=tdtj s_|r_t	dd	t
fd
dt| |D s_J d d|reJ d|d u rm|d u soJ t| |||||g}t trt jdkr j ind }| D ]\\}}}}}}}ttt |}ttt |}ttt |}ttt |}ttt |}|d j} |d ur| |vrՈ j| dd|| < |r||  n }!|r|rttt |}"t|||||" nt|||| |rt|}tj s|d jrtj|tjddddd nt|d |dkr.t|d|   t||d|!  t| ttjrOt|d }#d}$n|}#d }$t||#||$ ~~#|rt |}%t|}&t|%d t|&d t|& t |% t!|% t"|& |%}'|&}(|rttt |}"t#|"| t$|"})nt$|})t |)|( t|)| t |)|' t%|||) q fdd|D }%fdd|D }&t&fdd|%D }'dd |&D }(|rttt |}"t#|"| t$|"})nt$|})t |)|( t|)| t%|||)|' qd S )Nr   r-   zHbeta1 as a Tensor is not supported for capturable=False and foreach=Truer   zTensor beta1 must be 1-elementzHbeta2 as a Tensor is not supported for capturable=False and foreach=TruezTensor beta2 must be 1-elementF)supports_xlac                 3   s0    | ]\}}|j j|j jko|j j v V  qd S rp   )rD   r   ).0rP   r?   )r   r=   r>   	<genexpr>  s    

z&_multi_tensor_adamw.<locals>.<genexpr>rv   rw   z#_foreach ops don't support autogradcpuTrD   rx   r/   )rD   )alphac                       g | ]
}d  t |  qS r   r   r   r?   )ri   r=   r>   
<listcomp>      z'_multi_tensor_adamw.<locals>.<listcomp>c                    r   r   r   r   )rj   r=   r>   r     r   c                    s   g | ]} | d  qS )r=   r   bc)r(   r=   r>   r     s    c                 S   s   g | ]}|d  qS )rz   r=   r   r=   r=   r>   r     s    )'rK   r0   r   r7   r1   r2   rL   r   r   r   allzipr   "_group_tensors_by_device_and_dtypestrrD   valuesr   r   r   r   _foreach_negis_cpu_foreach_add_rN   _foreach_mul__foreach_lerp__foreach_mul_foreach_addcmul__foreach_pow_foreach_sub__foreach_neg__foreach_div__foreach_reciprocal__foreach_sqrt__foreach_maximum__foreach_sqrt_foreach_addcdiv_r   )*r'   r`   ra   rb   rc   rd   rg   rh   r,   ri   rj   r(   r+   r*   r"   r$   r%   re   grouped_tensorsr   device_params_device_grads_device_exp_avgs_device_exp_avg_sqs_device_max_exp_avg_sqs_device_state_steps__device_paramsdevice_gradsdevice_exp_avgsdevice_exp_avg_sqsdevice_state_stepsrD   r   device_max_exp_avg_sqsscaled_device_gradsry   r   r   r   r   exp_avg_sq_sqrtr=   )ri   rj   r   r(   r>   _multi_tensor_adamw  s  















 r   returnc       
   %      C   s  | sd S |r
t d|d ur|j|ini }|d ur|j|ini }t|tr1t|jdkr1|j|ind }t| |||||g}| D ]\\}}\\}}}}}}}tt	t |}tt	t |}tt	t |} tt	t |}!tt	t |}"|j
dkr|d u r|d u sJ d\}#}$|d ur|||j|dd}#|d ur|||j|dd}$|d ur||vr|||j|dd}t|"d tj||| |!||"|||	|
||||#|$d	 |$d urt|"|$gt|"  qBd S )
Nz9Adam with fused=True does not support differentiable=Truer   mps)NNT)rx   r   r   )	r,   r(   ri   rj   r+   r*   r"   rg   rh   )r7   rD   r0   r   r   r   r   itemsr   r   r   rH   r   rL   r   _fused_adamw_r   rK   )%r'   r`   ra   rb   rc   rd   rg   rh   r,   ri   rj   r(   r+   r*   r"   r$   r%   re   grad_scale_dictfound_inf_dictlr_dictr   rD   r   r   r   r   r   r   r   r   r   r   r   r   device_grad_scaledevice_found_infr=   r=   r>   _fused_adamw  s   $
r   )single_tensor_fnFr#   r&   c                C   s   t j stdd |D std|	du r.|du r.t| |dd\}}|r.t|tr.|s.d}|	du r4d}	|du r:d}|rEt j	 rEtd|	rPt j	 rPtd|	rZt j	 sZt
}n|rdt j	 sdt}nt}|| |||||||||||||||
||d	 dS )
zpFunctional API that performs AdamW algorithm computation.

    See :class:`~torch.optim.AdamW` for details.
    c                 s   s    | ]	}t |tjV  qd S rp   )r0   rL   r   )r   tr=   r=   r>   r   H  s    
zadamw.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)	use_fusedz6torch.jit.script not supported with foreach optimizersz4torch.jit.script not supported with fused optimizers)r,   ri   rj   r(   r+   r*   r"   r$   r%   rg   rh   re   )rL   r   r   r   r7   r
   r0   r   r|   r}   r   r   r   )r'   r`   ra   rb   rc   rd   r#   r$   r%   r&   rg   rh   re   r,   ri   rj   r(   r+   r*   r"   r   funcr=   r=   r>   r   *  sZ   

)NFFNNNF)%typingr   r   r   r   r   rL   r   	optimizerr	   r
   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   __all__r   __doc__rt   r3   r   r   r   r   r=   r=   r=   r>   <module>   sv  T m'F




 




 b


c
	

