o
    Ti?"                     @   sP   d Z ddlZddlmZ edZddlmZ ddlmZ G dd	 d	ej	j
ZdS )
z[
Copyright NVIDIA/apex
This file is adapted from fused adam in NVIDIA/apex, commit 6bd01c4
    N   )MultiTensorApplyi   )get_accelerator)FusedAdamBuilderc                       sH   e Zd ZdZ								d fdd		Z fd
dZdddZ  ZS )	FusedAdama@
  Implements Adam algorithm.

    Currently GPU-only.  Requires Apex to be installed via
    ``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.

    This version of fused Adam implements 2 fusions.

      * Fusion of the Adam update's elementwise operations
      * A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.

    :class:`apex.optimizers.FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,
    or ``torch.optim.Adam`` with ``adam_w_mode=False``::

        opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
        ...
        opt.step()

    :class:`apex.optimizers.FusedAdam` may be used with or without Amp.  If you wish to use :class:`FusedAdam` with Amp,
    you may choose any ``opt_level``::

        opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
        model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
        ...
        opt.step()

    In general, ``opt_level="O1"`` is recommended.


    .. warning::
        A previous version of :class:`FusedAdam` allowed a number of additional arguments to ``step``.  These additional arguments
        are now deprecated and unnecessary.

    Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.

    Arguments:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups.
        lr (float, optional): learning rate. (default: 1e-3)
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square. (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability. (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        amsgrad (boolean, optional): whether to use the AMSGrad variant of this
            algorithm from the paper `On the Convergence of Adam and Beyond`_
            (default: False) NOT SUPPORTED in FusedAdam!
        adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
            True for decoupled weight decay(also known as AdamW) (default: True)
        set_grad_none (bool, optional): whether set grad to None when zero_grad()
            method is called. (default: True)

    .. _Adam - A Method for Stochastic Optimization:
        https://arxiv.org/abs/1412.6980
    .. _On the Convergence of Adam and Beyond:
        https://openreview.net/forum?id=ryQu7f-RZ
    MbP?Tg?g+?:0yE>        Fc
                    sj   |rt dt|||||d}
tt| ||
 |rdnd| _|	| _t  }t	 
dg| _|j| _d S )Nz/FusedAdam does not support the AMSGrad variant.)lrbias_correctionbetasepsweight_decayr   r   )RuntimeErrordictsuperr   __init__adam_w_modeset_grad_noner   loadr   	IntTensor_dummy_overflow_bufmulti_tensor_adam)selfparamsr   r   r   r   r   r   amsgradr   defaultsfused_adam_cuda	__class__ Q/home/ubuntu/.local/lib/python3.10/site-packages/deepspeed/ops/adam/fused_adam.pyr   L   s   
zFusedAdam.__init__c                    s<   | j r| jD ]}|d D ]}d |_qqd S tt|   d S )Nr   )r   param_groupsgradr   r   	zero_grad)r   grouppr   r!   r"   r%   c   s   
zFusedAdam.zero_gradNc                 C   s  t dd ||||fD rtdd}|dur| }| jD ]c}t|d dkr)q|d r/dnd}	|d	 \}
}d
|vr?d|d
< g g g g f\}}}}g g g g f\}}}}g g g g f\}}}}|d D ]}|jdu riqa|jjjrrtd| j| }t|dkr|d
d|d
< t	
|j|d< t	
|j|d< |jt	jkr||jj ||j ||d  ||d  qa|jt	jkr||j || ||d  ||d  qa|jt	jkr||jj ||j ||d  ||d  qatdt|dkr)|d
  d7  < t| j| j||||g|d |
||d |d
 | j|	|d  t|dkrU|d
  d7  < t| j| j||||g|d |
||d |d
 | j|	|d  t|dkr|d
  d7  < t| j| j||||g|d |
||d |d
 | j|	|d  q|S )a+  Performs a single optimization step.

        Arguments:
            closure (callable, optional): A closure that reevaluates the model
                and returns the loss.

        The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.
        c                 s   s    | ]}|d uV  qd S )Nr!   ).0r'   r!   r!   r"   	<genexpr>t   s    z!FusedAdam.step.<locals>.<genexpr>zuFusedAdam has been updated.  Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.Nr   r   r   r   r   stepzOFusedAdam does not support sparse gradients, please consider SparseAdam insteadexp_avg
exp_avg_sqz+FusedAdam only support fp16, bf16 and fp32.r   r   r   )anyr   r#   lenr$   data	is_sparsestategettorch
zeros_likedtypefloat16appendbfloat16float32multi_tensor_applierr   r   r   )r   closuregradsoutput_paramsscale
grad_normsgrad_scalerlossr&   r   beta1beta2g_16p_16m_16v_16g_bfp_bfm_bfv_bfg_32p_32m_32v_32r'   r1   r!   r!   r"   r*   k   s   	



zFusedAdam.step)r   Tr   r	   Tr
   FT)NNNNNN)__name__
__module____qualname____doc__r   r%   r*   __classcell__r!   r!   r   r"   r      s    ;r   )rS   r3   multi_tensor_applyr   r:   deepspeed.acceleratorr   deepspeed.ops.op_builderr   optim	Optimizerr   r!   r!   r!   r"   <module>   s   