o
    i                     @   sN   d Z ddlmZ ddlZddlZddlmZ ddlmZ G dd deeZ	dS )z$Noam learning rate scheduler module.    )UnionN)_LRScheduler)AbsBatchStepSchedulerc                	       sp   e Zd ZdZ			ddejjdeee	f deee	f def fd	d
Z
de	de	fddZdd Zdd Z  ZS )NoamLRa  The LR scheduler proposed by Noam

    Ref:
        "Attention Is All You Need", https://arxiv.org/pdf/1706.03762.pdf

    FIXME(kamo): PyTorch doesn't provide _LRScheduler as public class,
     thus the behaviour isn't guaranteed at forward PyTorch version.

    NOTE(kamo): The "model_size" in original implementation is derived from
     the model, but in this implementation, this parameter is a constant value.
     You need to change it if the model is changed.

    @  a  	optimizer
model_sizewarmup_steps
last_epochc                    sR   || _ || _t|jd d }| |}td| d| d t || d S )Nr   lrz0NoamLR is deprecated. Use WarmupLR(warmup_steps=z) with Optimizer(lr=))	r
   r   listparam_groupslr_for_WarmupLRwarningswarnsuper__init__)selfr	   r
   r   r   r   new_lr	__class__ M/home/ubuntu/.local/lib/python3.10/site-packages/funasr/schedulers/noam_lr.pyr      s   
zNoamLR.__init__r   returnc                 C   s   || j d  | jd  S )Ng      ?)r
   r   )r   r   r   r   r   r   0   s   zNoamLR.lr_for_WarmupLRc                 C   s   | j j d| j d| j dS )Nz(model_size=z, warmup_steps=r   )r   __name__r
   r   r   r   r   r   __repr__3   s   zNoamLR.__repr__c                    s     j d  fdd jD S )N   c                    s2   g | ]}| j d   td   jd   qS )g      g      )r
   minr   ).0r   r   step_numr   r   
<listcomp>;   s    $z!NoamLR.get_lr.<locals>.<listcomp>)r   base_lrsr   r   r#   r   get_lr9   s   
zNoamLR.get_lr)r   r   r   )r   
__module____qualname____doc__torchoptim	Optimizerr   intfloatr   r   r   r'   __classcell__r   r   r   r   r      s"    

r   )
r*   typingr   r   r+   torch.optim.lr_schedulerr   funasr.schedulers.abs_schedulerr   r   r   r   r   r   <module>   s    