o
    ,ivy                  *   @   s  d dl mZmZmZmZ d dlZd dlmZ d dlmZ ddl	m
Z
mZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZ ddgZG d	d deZd
de de de
 de de d e_dee dee dee dee dee dee dee dee dededededeeef dededededef$d d!Z dee dee dee dee dee dee dee dee dededededeeef dededededef$d"d#Z!dee dee dee dee dee dee dee dee dededededeeef dededededed$df&d%d&Z"ee d'		(	(				(d,dee dee dee dee dee dee d)ee deded*ee dee dee dededededeeef dededef(d+dZ#dS )-    )ListOptionalTupleUnionN)Tensor)$_get_fused_kernels_supported_devices   )_capturable_doc_default_to_fused_or_foreach_differentiable_doc_disable_dynamo_if_unsupported_dispatch_sqrt_foreach_doc
_fused_doc!_get_capturable_supported_devices_get_scalar_dtype
_get_value_maximize_doc_stack_if_compiling_use_grad_for_differentiable_view_as_real
DeviceDict	OptimizerParamsTAdamadamc                       s   e Zd Z					dddddddded	eeef d
eeef dededede	e dededede	e f fddZ
 fddZdd ZedddZ  ZS )r   MbP?g?g+?:0yE>r   FN)foreachmaximize
capturabledifferentiablefusedparamslrbetasepsweight_decayamsgradr   r    r!   r"   r#   c                   s6  d|kst d| t|tr|r|	st dd|ks#t d| d|d   kr/dk s9n t d|d  d|d   krEdk sOn t d	|d  d|ksZt d
| t||||||||	|
|d
}t || |r|
rwtdd| _t  t	 fdd| j
D std  d|rtdd S d S )N        zInvalid learning rate: Elr as a Tensor is not supported for capturable=False and foreach=TruezInvalid epsilon value: r         ?z#Invalid beta parameter at index 0: r   z#Invalid beta parameter at index 1: zInvalid weight_decay value: )
r%   r&   r'   r(   r)   r    r   r!   r"   r#   z)`fused` does not support `differentiable`Tc                 3   s4    | ]}|d  D ]}|j j v ot|V  qqdS )r$   N)devicetypetorchis_floating_point).0pgpfused_supported_devices N/home/ubuntu/SoloSpeech/.venv/lib/python3.10/site-packages/torch/optim/adam.py	<genexpr>T   s    z Adam.__init__.<locals>.<genexpr>zX`fused=True` requires all the params to be floating point Tensors of supported devices: .z0`fused` and `foreach` cannot be `True` together.)
ValueError
isinstancer   dictsuper__init__RuntimeError_step_supports_amp_scalingr   allparam_groups)selfr$   r%   r&   r'   r(   r)   r   r    r!   r"   r#   defaults	__class__r4   r7   r>      sX   zAdam.__init__c                    s   t  | | jD ]e}|dd |dd |dd  |dd |dd |dd }|d D ]:}| j|g }t|d	krmt|d
 smt	|d
 }|d sW|d rctj
|t|d|jdntj
|t d|d
< q3q	d S )Nr)   Fr    r   r!   r"   r#   r$   r   stepis_fuseddtyper-   rK   )r=   __setstate__rB   
setdefaultstategetlenr/   	is_tensorfloattensorr   r-   )rC   rO   groupr#   r3   p_statestep_valrE   r6   r7   rM   `   s2   
zAdam.__setstate__c                 C   sn  d}|d D ]}	|	j d ur|t|	O }||	 |	j jr!td||	j  | j|	 }
t|
dkrt|d s:|d rHtjdt	|d d|	j
d	ntjd
t	 d|
d< tj|	tjd|
d< tj|	tjd|
d< |d rttj|	tjd|
d< ||
d  ||
d  |d r||
d  |d r|
d jrtd|d rt|d r|d std||
d  q|S )NFr$   zJAdam does not support sparse gradients, please consider SparseAdam insteadr   r!   r#   r6   rH   rJ   r*   rL   rG   )memory_formatexp_avg
exp_avg_sqr)   max_exp_avg_sqr"   zB`requires_grad` is not supported for `step` in differentiable moder   r%   r+   )gradr/   
is_complexappend	is_sparser?   rO   rQ   zerosr   r-   rT   
zeros_likepreserve_formatrequires_gradrR   )rC   rU   params_with_gradgradsexp_avgsexp_avg_sqsmax_exp_avg_sqsstate_stepshas_complexr3   rO   r6   r6   r7   _init_groupw   sh   








zAdam._init_groupc                 C   s   |    d}|dur!t  | }W d   n1 sw   Y  | jD ]S}g }g }g }g }g }g }	|d \}
}| |||||||	}t||||||	f|d ||
||d |d |d |d |d |d	 |d
 |d t| ddt| ddd q$|S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr&   r)   r%   r(   r'   r    r   r!   r"   r#   
grad_scale	found_inf)r)   rj   beta1beta2r%   r(   r'   r    r   r!   r"   r#   rl   rm   ) _cuda_graph_capture_health_checkr/   enable_gradrB   rk   r   getattr)rC   closurelossrU   rd   re   rf   rg   rh   ri   rn   ro   rj   r6   r6   r7   rG      s^   





z	Adam.step)r   r   r   r   FN)__name__
__module____qualname__r   r   rS   r   r   boolr   r>   rM   rk   r   rG   __classcell__r6   r6   rE   r7   r      sN    	

	
AIa  Implements Adam algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \beta_1, \beta_2
                \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)}          \\
            &\hspace{13mm}      \lambda \text{ (weight decay)},  \: \textit{amsgrad},
                \:\textit{maximize}                                                              \\
            &\textbf{initialize} :  m_0 \leftarrow 0 \text{ ( first moment)},
                v_0\leftarrow 0 \text{ (second moment)},\: \widehat{v_0}^{max}\leftarrow 0\\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\

            &\hspace{5mm}\textbf{if} \: \textit{maximize}:                                       \\
            &\hspace{10mm}g_t           \leftarrow   -\nabla_{\theta} f_t (\theta_{t-1})         \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})          \\
            &\hspace{5mm}\textbf{if} \: \lambda \neq 0                                           \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{5mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{5mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{5mm}\widehat{v_t} \leftarrow   v_t/\big(1-\beta_2^t \big)                   \\
            &\hspace{5mm}\textbf{if} \: amsgrad                                                  \\
            &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
                \widehat{v_t})                                                                   \\
            &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big)                                 \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}} + \epsilon \big)                                       \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
    a  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR
            is not yet supported for all our implementations. Please use a float
            LR if you are not also specifying fused=True or capturable=True.
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        amsgrad (bool, optional): whether to use the AMSGrad variant of this
            algorithm from the paper `On the Convergence of Adam and Beyond`_
            (default: False)
        z	
        z
    .. _Adam\: A Method for Stochastic Optimization:
        https://arxiv.org/abs/1412.6980
    .. _On the Convergence of Adam and Beyond:
        https://openreview.net/forum?id=ryQu7f-RZ

    r$   re   rf   rg   rh   ri   rl   rm   r)   rj   rn   ro   r%   r(   r'   r    r!   r"   c       
   !      C   s  |d u r|d u s
J t j rt|tsJ t| D ]:\}}|s%|| n||  }|| }|| }|| }t j sV|rVt }|j	j
|j	j
krN|j	j
|v sVJ d| d|d7 }|dkre|j||d}t |rt |}t |}t |}|rt || ||< t |}||d|
  ||j|| d| d |s|r|}d|
|  }d||  }|| }| }| }|r|r||  }n|| }|| t || ||  ||  || } n| ||  || } |||  nEt|}d|
|  }d||  }|| }t|}|r/t j|| ||| d ||  | |} n	| | |} |j|| | d |rUt | | rUt || ||< qd S )NIIf capturable=True, params and state_steps must be on supported devices: r9   r   r   alpha)value)out)r/   jitis_scriptingr;   rS   	enumerate_utilsis_compilingr   r-   r.   addr]   view_as_reallerp_mul_addcmul_conjnegsqrtclonecopy_maximumadd_addcdiv_r   r   view_as_complex)!r$   re   rf   rg   rh   ri   rl   rm   r)   rj   rn   ro   r%   r(   r'   r    r!   r"   iparamr\   rY   rZ   step_tcapturable_supported_devicesrG   bias_correction1bias_correction2	step_sizestep_size_negbias_correction2_sqrtr[   denomr6   r6   r7   _single_tensor_adamA  sv   








r   c       
            s  t | dkrd S ttr|stdtj s5|r5tddtfddt	| |D s5J d d|d u r=|d u s?J |rEJ d	t
| |||||g}| D ]\\}}}}}}}|	rs|rlt||||| nt|||| |rzt|}|d jrtj|tjd
ddd
d nt|d |dkr|rtj|||d ntj|||d}t||d   t| t|||d  ~|r&t |}t|}t|d t|d t| t| t| t| |}|}|rt|| t|}nt|}t|| t|| t|| t||| qT fdd|D }fdd|D }tfdd|D }dd |D }|rYt|| t|}nt|}t|| t|| t|||| qTd S )Nr   r+   F)supports_xlac                 3   s0    | ]\}}|j j|j jko|j j v V  qd S ru   )r-   r.   )r1   r3   rG   )r   r6   r7   r8     s    

z%_multi_tensor_adam.<locals>.<genexpr>r{   r9   z#_foreach ops don't support autogradr,   cpu)r-   r|   r   c                       g | ]
}d  t |  qS r   r   r1   rG   )rn   r6   r7   
<listcomp>H      z&_multi_tensor_adam.<locals>.<listcomp>c                    r   r   r   r   )ro   r6   r7   r   K  r   c                    s   g | ]} | d  qS )r6   r1   bc)r%   r6   r7   r   O  s    c                 S   s   g | ]}t |qS r6   )r   r   r6   r6   r7   r   Q  s    ) rQ   r;   r   r?   r/   r   r   r   rA   zipr   "_group_tensors_by_device_and_dtypevaluesr   _foreach_negis_cpu_foreach_add_rT   _foreach_add_foreach_lerp__foreach_mul__foreach_addcmul__foreach_pow_foreach_sub__foreach_neg__foreach_div__foreach_reciprocal__foreach_sqrt__foreach_maximum__foreach_sqrt_foreach_addcdiv_r   )r$   re   rf   rg   rh   ri   rl   rm   r)   rj   rn   ro   r%   r(   r'   r    r!   r"   grouped_tensorsdevice_paramsdevice_gradsdevice_exp_avgsdevice_exp_avg_sqsdevice_max_exp_avg_sqsdevice_state_steps_r   r   r   r   exp_avg_sq_sqrtr6   )rn   ro   r   r%   r7   _multi_tensor_adam  s   












r   returnc       
          C   sf  | sd S |r
t d|d ur|j|ini }|d ur|j|ini }t|tr1t|jdkr1|j|ind }t| |||||g}| D ]n\\}}\\}}}}}}}d\}}|d urc|||j	|dd}|d urr|||j	|dd}|d ur||vr|j	|dd||< || }t
|d t
j|||||||||
||||||d |d urt
||gt|  qBd S )	Nz9Adam with fused=True does not support differentiable=Truer   )NNT)non_blocking)r-   r   r   )	r)   r%   rn   ro   r(   r'   r    rl   rm   )r?   r-   r;   r   strr   r   itemsrN   tor/   r   _fused_adam_r   rQ   ) r$   re   rf   rg   rh   ri   rl   rm   r)   rj   rn   ro   r%   r(   r'   r    r!   r"   grad_scale_dictfound_inf_dictlr_dictr   r-   r   r   r   r   r   r   r   device_grad_scaledevice_found_infr6   r6   r7   _fused_adamc  sv   $r   )single_tensor_fnFr   r#   c                C   s   |	du r|du rt | |dd\}}|rt|tr|sd}|	du r"d}	|du r(d}tj s:tdd |D s:td|rEtj	 rEtd|	rPtj	 rPtd|	rZtj	 sZt
}n|rdtj	 sdt}nt}|| ||||||||||||||||
|d	 dS )
znFunctional API that performs Adam algorithm computation.

    See :class:`~torch.optim.Adam` for details.
    NF)	use_fusedc                 s   s    | ]	}t |tjV  qd S ru   )r;   r/   r   )r1   tr6   r6   r7   r8     s    
zadam.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsz6torch.jit.script not supported with foreach optimizersz4torch.jit.script not supported with fused optimizers)r)   rj   rn   ro   r%   r(   r'   r    r!   r"   rl   rm   )r
   r;   r   r/   r   r   rA   r?   r   r   r   r   r   )r$   re   rf   rg   rh   ri   r   r!   r"   r#   rl   rm   rj   r)   rn   ro   r%   r(   r'   r    r   funcr6   r6   r7   r     sZ   "

)NFFNNNF)$typingr   r   r   r   r/   r   torch.utils._foreach_utilsr   	optimizerr	   r
   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   __all__r   __doc__ry   rS   r   r   r   r   r6   r6   r6   r7   <module>   sr  L `'E


w


 ,


X
	

