o
    	Ti"                     @   s2   d dl mZmZ d dlmZ eG dd dZdS )    )	dataclassfield)Optionalc                   @   s  e Zd ZU dZedddidZee ed< edddidZ	eed	< edd
g dddZ
ee ed< edddidZeed< edddidZee ed< edddidZeed< edddidZeed< edddidZeed< edddidZeed< edddidZeee  ed< eddd idZeee  ed!< eddd"idZeee  ed#< ed$dd%idZeed&< eddd'idZeed(< eddd)idZeed*< eddd+idZeed,< eddd-idZeed.< ed/d0d1d/gddZeed2< eddd3idZeed4< d5d6 ZdS )7ModelConfigu  
    Configuration class for the models.

    Using [`~transformers.HfArgumentParser`] we can turn this class into
    [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
    command line.

    Parameters:
        model_name_or_path (`str` or `None`, *optional*, defaults to `None`):
            Model checkpoint for weights initialization.
        model_revision (`str`, *optional*, defaults to `"main"`):
            Specific model version to use. It can be a branch name, a tag name, or a commit id.
        torch_dtype (`Literal["auto", "bfloat16", "float16", "float32"]` or `None`, *optional*, defaults to `None`):
            Override the default `torch.dtype` and load the model under this dtype. Possible values are

                - `"bfloat16"`: `torch.bfloat16`
                - `"float16"`: `torch.float16`
                - `"float32"`: `torch.float32`
                - `"auto"`: Automatically derive the dtype from the model's weights.

        trust_remote_code (`bool`, *optional*, defaults to `False`):
            Whether to allow for custom models defined on the Hub in their own modeling files. This option should only
            be set to `True` for repositories you trust and in which you have read the code, as it will execute code
            present on the Hub on your local machine.
        attn_implementation (`str` or `None`, *optional*, defaults to `None`):
            Which attention implementation to use. You can run `--attn_implementation=flash_attention_2`, in which case
            you must install this manually by running `pip install flash-attn --no-build-isolation`.
        use_peft (`bool`, *optional*, defaults to `False`):
            Whether to use PEFT for training.
        lora_r (`int`, *optional*, defaults to `16`):
            LoRA R value.
        lora_alpha (`int`, *optional*, defaults to `32`):
            LoRA alpha.
        lora_dropout (`float`, *optional*, defaults to `0.05`):
            LoRA dropout.
        lora_target_modules (`Union[str, list[str]]` or `None`, *optional*, defaults to `None`):
            LoRA target modules.
        lora_target_parameters (`Union[str, list[str]]` or `None`, *optional*, defaults to `None`):
            List of target parameters for LoRA.
        lora_modules_to_save (`list[str]` or `None`, *optional*, defaults to `None`):
            Model layers to unfreeze & train.
        lora_task_type (`str`, *optional*, defaults to `"CAUSAL_LM"`):
            Task type to pass for LoRA (use `"SEQ_CLS"` for reward modeling).
        use_rslora (`bool`, *optional*, defaults to `False`):
            Whether to use Rank-Stabilized LoRA, which sets the adapter scaling factor to `lora_alpha/√r`, instead of
            the original default value of `lora_alpha/r`.
        use_dora (`bool`, *optional*, defaults to `False`):
            Enable [Weight-Decomposed Low-Rank Adaptation (DoRA)](https://huggingface.co/papers/2402.09353). This
            technique decomposes the updates of the weights into two parts, magnitude and direction. Direction is
            handled by normal LoRA, whereas the magnitude is handled by a separate learnable parameter. This can
            improve the performance of LoRA, especially at low ranks. Right now, DoRA only supports linear and Conv2D
            layers. DoRA introduces a bigger overhead than pure LoRA, so it is recommended to merge weights for
            inference.
        load_in_8bit (`bool`, *optional*, defaults to `False`):
            Whether to use 8 bit precision for the base model. Works only with LoRA.
        load_in_4bit (`bool`, *optional*, defaults to `False`):
            Whether to use 4 bit precision for the base model. Works only with LoRA.
        bnb_4bit_quant_type (`str`, *optional*, defaults to `"nf4"`):
            Quantization type (`"fp4"` or `"nf4"`).
        use_bnb_nested_quant (`bool`, *optional*, defaults to `False`):
            Whether to use nested quantization.
    Nhelpz,Model checkpoint for weights initialization.)defaultmetadatamodel_name_or_pathmainzSSpecific model version to use. It can be a branch name, a tag name, or a commit id.model_revisionzGOverride the default `torch.dtype` and load the model under this dtype.)autobfloat16float16float32)r   choicestorch_dtypeFzWhether to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.trust_remote_codezWhich attention implementation to use. You can run `--attn_implementation=flash_attention_2`, in which case you must install this manually by running `pip install flash-attn --no-build-isolation`.attn_implementationz!Whether to use PEFT for training.use_peft   zLoRA R value.lora_r    zLoRA alpha.
lora_alphag?zLoRA dropout.lora_dropoutzLoRA target modules.lora_target_modulesz#List of target parameters for LoRA.lora_target_parametersz!Model layers to unfreeze & train.lora_modules_to_save	CAUSAL_LMz?Task type to pass for LoRA (use 'SEQ_CLS' for reward modeling).lora_task_typeu   Whether to use Rank-Stabilized LoRA, which sets the adapter scaling factor to `lora_alpha/√r`, instead of the original default value of `lora_alpha/r`.
use_rsloraa  Enable Weight-Decomposed Low-Rank Adaptation (DoRA). This technique decomposes the updates of the weights into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the magnitude is handled by a separate learnable parameter. This can improve the performance of LoRA, especially at low ranks. Right now, DoRA only supports linear and Conv2D layers. DoRA introduces a bigger overhead than pure LoRA, so it is recommended to merge weights for inference.use_dorazHWhether to use 8 bit precision for the base model. Works only with LoRA.load_in_8bitzHWhether to use 4 bit precision for the base model. Works only with LoRA.load_in_4bitnf4zQuantization type.fp4bnb_4bit_quant_typez#Whether to use nested quantization.use_bnb_nested_quantc                 C   sF   | j r
| jr
tdt| jdrt| jdkr!| jd | _d S d S d S )Nz8You can't use 8 bit and 4 bit precision at the same time__len__   r   )r!   r"   
ValueErrorhasattrr   len)self r-   L/home/ubuntu/.local/lib/python3.10/site-packages/trl/trainer/model_config.py__post_init__   s
   zModelConfig.__post_init__) __name__
__module____qualname____doc__r   r	   r   str__annotations__r   r   r   boolr   r   r   intr   r   floatr   listr   r   r   r   r    r!   r"   r%   r&   r/   r-   r-   r-   r.   r      s   
 ?
r   N)dataclassesr   r   typingr   r   r-   r-   r-   r.   <module>   s   