o
    3wi                     @   s@   d dl mZ G dd deZG dd deZG dd deZdS )	    )Optimizer2Statec                       s8   e Zd Z												
		d fdd	Z  ZS )LAMBMbP?Tg?g+?:0yE>r   F    N   d         ?c                    s(   t  jd||||||	|
|||dd dS )a  
        Base LAMB optimizer.

        Arguments:
            params (`torch.tensor`):
                The input parameters to optimize.
            lr (`float`, defaults to 1e-3):
                The learning rate.
            bias_correction (`bool`, defaults to `True`):
                Whether to apply bias correction to the first and second-order moments.
            betas (`tuple(float, float)`, defaults to (0.9, 0.999)):
                The beta values are the decay rates of the first and second-order moment of the optimizer.
            eps (`float`, defaults to 1e-8):
                The epsilon value prevents division by zero in the optimizer.
            weight_decay (`float`, defaults to 1e-2):
                The weight decay value for the optimizer.
            amsgrad (`bool`, defaults to `False`):
                Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.
            adam_w_mode (`bool`, defaults to `True`):
                Whether to use the AdamW variant.
            optim_bits (`int`, defaults to 32):
                The number of bits of the optimizer state.
            args (`object`, defaults to `None`):
                An object with additional arguments.
            min_8bit_size (`int`, defaults to 4096):
                The minimum number of elements of the parameter tensors for 8-bit optimization.
            percentile_clipping (`int`, defaults to 100):
                Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.
            block_wise (`bool`, defaults to `True`):
                Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.
            max_unorm (`float`, defaults to 1.0):
                The maximum gradient norm.
        lambr
   	max_unormNsuper__init__)selfparamslrbias_correctionbetasepsweight_decayamsgradadam_w_mode
optim_bitsargsmin_8bit_sizepercentile_clipping
block_wiser   	__class__ T/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/bitsandbytes/optim/lamb.pyr   	   s   2
zLAMB.__init__)r   Tr   r   r   FTr   Nr   r	   Fr
   __name__
__module____qualname__r   __classcell__r!   r!   r   r"   r      s    r   c                       6   e Zd Z													
d fdd	Z  ZS )LAMB8bitr   Tr   r   r   FNr   r	   r
   c                    (   t  jd|||||d|	|
||dd dS )a(  
        8-bit LAMB optimizer.

        Arguments:
            params (`torch.tensor`):
                The input parameters to optimize.
            lr (`float`, defaults to 1e-3):
                The learning rate.
            bias_correction (`bool`, defaults to `True`):
                Whether to apply bias correction to the first and second-order moments.
            betas (`tuple(float, float)`, defaults to (0.9, 0.999)):
                The beta values are the decay rates of the first and second-order moment of the optimizer.
            eps (`float`, defaults to 1e-8):
                The epsilon value prevents division by zero in the optimizer.
            weight_decay (`float`, defaults to 1e-2):
                The weight decay value for the optimizer.
            amsgrad (`bool`, defaults to `False`):
                Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.
            adam_w_mode (`bool`, defaults to `True`):
                Whether to use the AdamW variant.
            args (`object`, defaults to `None`):
                An object with additional arguments.
            min_8bit_size (`int`, defaults to 4096):
                The minimum number of elements of the parameter tensors for 8-bit optimization.
            percentile_clipping (`int`, defaults to 100):
                Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.
            block_wise (`bool`, defaults to `True`):
                Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.
            max_unorm (`float`, defaults to 1.0):
                The maximum gradient norm.
        r      r
   r   Nr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r!   r"   r   L      /
zLAMB8bit.__init__r   Tr   r   r   FTNr   r	   Fr
   r#   r!   r!   r   r"   r)   K       r)   c                       r(   )	LAMB32bitr   Tr   r   r   FNr   r	   r
   c                    r*   )a)  
        32-bit LAMB optimizer.

        Arguments:
            params (`torch.tensor`):
                The input parameters to optimize.
            lr (`float`, defaults to 1e-3):
                The learning rate.
            bias_correction (`bool`, defaults to `True`):
                Whether to apply bias correction to the first and second-order moments.
            betas (`tuple(float, float)`, defaults to (0.9, 0.999)):
                The beta values are the decay rates of the first and second-order moment of the optimizer.
            eps (`float`, defaults to 1e-8):
                The epsilon value prevents division by zero in the optimizer.
            weight_decay (`float`, defaults to 1e-2):
                The weight decay value for the optimizer.
            amsgrad (`bool`, defaults to `False`):
                Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.
            adam_w_mode (`bool`, defaults to `True`):
                Whether to use the AdamW variant.
            args (`object`, defaults to `None`):
                An object with additional arguments.
            min_8bit_size (`int`, defaults to 4096):
                The minimum number of elements of the parameter tensors for 8-bit optimization.
            percentile_clipping (`int`, defaults to 100):
                Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.
            block_wise (`bool`, defaults to `True`):
                Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.
            max_unorm (`float`, defaults to 1.0):
                The maximum gradient norm.
        r   r   r
   r   Nr   r,   r   r!   r"   r      r-   zLAMB32bit.__init__r.   r#   r!   r!   r   r"   r0      r/   r0   N)bitsandbytes.optim.optimizerr   r   r)   r0   r!   r!   r!   r"   <module>   s   C@