o
    zi#                     @   s<   d dl Z d dlmZ ddlmZmZmZ G dd deZdS )    N)	Optimizer   )OptFloatOptLossClosureParamsc                       s^   e Zd ZdZ					ddedededed	ed
eddf fddZddedefddZ	  Z
S )SGDLRDa@  Implements stochastic gradient descent (optionally with momentum).

    Nesterov momentum is based on the formula from
    `On the importance of initialization and momentum in deep learning`__.

    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float): learning rate
        momentum (float, optional): momentum factor (default: 0)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        dampening (float, optional): dampening for momentum (default: 0)

    Example:
        >>> optimizer = SGD(model.parameters(), lr=0.1, momentum=0.9)
        >>> optimizer.zero_grad()
        >>> loss_fn(model(input), target).backward()
        >>> optimizer.step()

    __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
    {Gz?r           paramslrmomentum	dampeningweight_decaydropoutreturnNc                    s   |dkrt d||dk rt d||dk r!t d||dk r,t d|t|||||d}tt| || d S )Nr	   zInvalid learning rate: {}zInvalid momentum value: {}zInvalid dampening value: {}r   zInvalid weight_decay value: {})r   r   r   r   r   )
ValueErrorformatdictsuperr   __init__)selfr
   r   r   r   r   r   defaults	__class__ J/home/ubuntu/.local/lib/python3.10/site-packages/torch_optimizer/sgdlrd.pyr      s$   	zSGDLRD.__init__closurec                 C   s  d}|dur	| }| j D ]t}|d }|d }|d }|d D ]a}|jdu r&q|jj}t|j|d  }	t|	}
|dkrD|||j |dkrl| j| }d|vr]t|	  }|d< n|d }|
|d	| | |d
 |
 }||  }|jd| qq|S )zPerforms a single optimization step.

        Arguments:
            closure: A closure that reevaluates the model and returns the loss.
        Nr   r   r   r
   r   r   momentum_bufferr   r   )param_groupsgraddatatorch	ones_like	bernoulliadd_stateclonedetachmul_)r   r   lossgroupr   r   r   pd_pmmaskparam_statebuf
lr_dropoutI_bufr   r   r   step;   s<   



zSGDLRD.step)r   r   r   r   r	   )N)__name__
__module____qualname____doc__r   floatr   r   r   r4   __classcell__r   r   r   r   r      s.    r   )r"   torch.optim.optimizerr   typesr   r   r   r   r   r   r   r   <module>   s    