o
    in                     @   sF   d Z ddlmZ ddlZddlmZ ddlmZ G dd deeZdS )z'Warm up learning rate scheduler module.    )UnionN)_LRScheduler)AbsBatchStepSchedulerc                       sP   e Zd ZdZ		ddejjdeee	f def fddZ
d	d
 Zdd Z  ZS )WarmupLRa  The WarmupLR scheduler

    This scheduler is almost same as NoamLR Scheduler except for following difference:

    NoamLR:
        lr = optimizer.lr * model_size ** -0.5
             * min(step ** -0.5, step * warmup_step ** -1.5)
    WarmupLR:
        lr = optimizer.lr * warmup_step ** 0.5
             * min(step ** -0.5, step * warmup_step ** -1.5)

    Note that the maximum lr equals to optimizer.lr in this scheduler.

    a  	optimizerwarmup_steps
last_epochc                    s   || _ t || d S )N)r	   super__init__)selfr   r	   r
   	__class__ O/home/ubuntu/.local/lib/python3.10/site-packages/funasr/schedulers/warmup_lr.pyr      s   zWarmupLR.__init__c                 C   s   | j j d| j dS )Nz(warmup_steps=))r   __name__r	   r   r   r   r   __repr__'   s   zWarmupLR.__repr__c                    s     j d  fdd jD S )N   c                    s2   g | ]}| j d   td  j d   qS )g      ?g      g      )r	   min).0lrr   step_numr   r   
<listcomp>,   s    $z#WarmupLR.get_lr.<locals>.<listcomp>)r
   base_lrsr   r   r   r   get_lr*   s   
zWarmupLR.get_lr)r   r   )r   
__module____qualname____doc__torchoptim	Optimizerr   intfloatr   r   r   __classcell__r   r   r   r   r      s    
r   )	r!   typingr   r"   torch.optim.lr_schedulerr   funasr.schedulers.abs_schedulerr   r   r   r   r   r   <module>   s    