o
    	Ti'                     @   s^   d dl Z d dlZd dlmZmZ d dlmZmZ d dlm	Z	 ddl
mZ eG dd dZdS )	    N)	dataclassfield)AnyOptional)is_bitsandbytes_available   )flatten_dictc                   @   s  e Zd ZU dZeejej	d de
d  ddidZeed< ed	dd
idZeed< edddidZeed< eddddgddZee ed< edddidZeed< eeddidZeeef ed< eeddidZeeef ed< eeddidZeeef ed< edddidZeed< ed dd!idZeed"< ed#dd$idZeed%< eddd&idZeed'< ed(dd)idZeed*< ed+d,g d-ddZeed.< ed/dd0idZe ed1< ed	dd2idZ!eed3< ed4dd5idZ"eed6< ed7dd8idZ#e$ed9< ed:dd;idZ%e$ed<< eddd=idZ&eed>< ed?dd@idZ'e edA< edBddCidZ(e$edD< edEddFidZ)e$edG< edHddIidZ*e$edJ< edKddLidZ+e$edM< edNddOidZ,e$edP< edddQidZ-eedR< ed7ddSidZ.e$edT< edddUidZ/ee edV< ed/ddWidZ0e edX< edYddZidZ1eed[< ed\dd]idZ2e3eef ed^< ed?dd_idZ4e ed`< dadb Z5dcdd Z6dS )eAlignPropConfiga  
    Configuration class for the [`AlignPropTrainer`].

    Using [`~transformers.HfArgumentParser`] we can turn this class into
    [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
    command line.

    Parameters:
        exp_name (`str`, *optional*, defaults to `os.path.basename(sys.argv[0])[: -len(".py")]`):
            Name of this experiment (defaults to the file name without the extension).
        run_name (`str`, *optional*, defaults to `""`):
            Name of this run.
        seed (`int`, *optional*, defaults to `0`):
            Random seed for reproducibility.
        log_with (`str` or `None`, *optional*, defaults to `None`):
            Log with either `"wandb"` or `"tensorboard"`. Check
            [tracking](https://huggingface.co/docs/accelerate/usage_guides/tracking) for more details.
        log_image_freq (`int`, *optional*, defaults to `1`):
            Frequency for logging images.
        tracker_kwargs (`dict[str, Any]`, *optional*, defaults to `{}`):
            Keyword arguments for the tracker (e.g., `wandb_project`).
        accelerator_kwargs (`dict[str, Any]`, *optional*, defaults to `{}`):
            Keyword arguments for the accelerator.
        project_kwargs (`dict[str, Any]`, *optional*, defaults to `{}`):
            Keyword arguments for the accelerator project config (e.g., `logging_dir`).
        tracker_project_name (`str`, *optional*, defaults to `"trl"`):
            Name of project to use for tracking.
        logdir (`str`, *optional*, defaults to `"logs"`):
            Top-level logging directory for checkpoint saving.
        num_epochs (`int`, *optional*, defaults to `100`):
            Number of epochs to train.
        save_freq (`int`, *optional*, defaults to `1`):
            Number of epochs between saving model checkpoints.
        num_checkpoint_limit (`int`, *optional*, defaults to `5`):
            Number of checkpoints to keep before overwriting old ones.
        mixed_precision (`str`, *optional*, defaults to `"fp16"`):
            Mixed precision training.
        allow_tf32 (`bool`, *optional*, defaults to `True`):
            Allow `tf32` on Ampere GPUs.
        resume_from (`str`, *optional*, defaults to `""`):
            Path to resume training from a checkpoint.
        sample_num_steps (`int`, *optional*, defaults to `50`):
            Number of sampler inference steps.
        sample_eta (`float`, *optional*, defaults to `1.0`):
            Eta parameter for the DDIM sampler.
        sample_guidance_scale (`float`, *optional*, defaults to `5.0`):
            Classifier-free guidance weight.
        train_batch_size (`int`, *optional*, defaults to `1`):
            Batch size for training.
        train_use_8bit_adam (`bool`, *optional*, defaults to `False`):
            Whether to use the 8bit Adam optimizer from `bitsandbytes`.
        train_learning_rate (`float`, *optional*, defaults to `1e-3`):
            Learning rate.
        train_adam_beta1 (`float`, *optional*, defaults to `0.9`):
            Beta1 for Adam optimizer.
        train_adam_beta2 (`float`, *optional*, defaults to `0.999`):
            Beta2 for Adam optimizer.
        train_adam_weight_decay (`float`, *optional*, defaults to `1e-4`):
            Weight decay for Adam optimizer.
        train_adam_epsilon (`float`, *optional*, defaults to `1e-8`):
            Epsilon value for Adam optimizer.
        train_gradient_accumulation_steps (`int`, *optional*, defaults to `1`):
            Number of gradient accumulation steps.
        train_max_grad_norm (`float`, *optional*, defaults to `1.0`):
            Maximum gradient norm for gradient clipping.
        negative_prompts (`str` or `None`, *optional*, defaults to `None`):
            Comma-separated list of prompts to use as negative examples.
        truncated_backprop_rand (`bool`, *optional*, defaults to `True`):
            If `True`, randomized truncation to different diffusion timesteps is used.
        truncated_backprop_timestep (`int`, *optional*, defaults to `49`):
            Absolute timestep to which the gradients are backpropagated. Used only if `truncated_backprop_rand=False`.
        truncated_rand_backprop_minmax (`tuple[int, int]`, *optional*, defaults to `(0, 50)`):
            Range of diffusion timesteps for randomized truncated backpropagation.
        push_to_hub (`bool`, *optional*, defaults to `False`):
            Whether to push the final model to the Hub.
    r   Nz.pyhelpzJName of this experiment (defaults to the file name without the extension).)defaultmetadataexp_name zName of this run.run_namez Random seed for reproducibility.seedz)Log with either 'wandb' or 'tensorboard'.wandbtensorboard)r
   choiceslog_with   zFrequency for logging images.log_image_freqz:Keyword arguments for the tracker (e.g., `wandb_project`).)default_factoryr   tracker_kwargsz&Keyword arguments for the accelerator.accelerator_kwargszKKeyword arguments for the accelerator project config (e.g., `logging_dir`).project_kwargstrlz$Name of project to use for tracking.tracker_project_namelogsz2Top-level logging directory for checkpoint saving.logdird   zNumber of epochs to train.
num_epochsz2Number of epochs between saving model checkpoints.	save_freq   z:Number of checkpoints to keep before overwriting old ones.num_checkpoint_limitfp16zEMixed precision training. Possible values are 'fp16', 'bf16', 'none'.)r$   bf16nonemixed_precisionTzAllow `tf32` on Ampere GPUs.
allow_tf32z*Path to resume training from a checkpoint.resume_from2   z"Number of sampler inference steps.sample_num_stepsg      ?z#Eta parameter for the DDIM sampler.
sample_etag      @z Classifier-free guidance weight.sample_guidance_scalezBatch size for training.train_batch_sizeFz;Whether to use the 8bit Adam optimizer from `bitsandbytes`.train_use_8bit_adamgMbP?zLearning rate.train_learning_rateg?zBeta1 for Adam optimizer.train_adam_beta1g+?zBeta2 for Adam optimizer.train_adam_beta2g-C6?z Weight decay for Adam optimizer.train_adam_weight_decayg:0yE>z!Epsilon value for Adam optimizer.train_adam_epsilonz&Number of gradient accumulation steps.!train_gradient_accumulation_stepsz,Maximum gradient norm for gradient clipping.train_max_grad_normz<Comma-separated list of prompts to use as negative examples.negative_promptszJIf `True`, randomized truncation to different diffusion timesteps is used.truncated_backprop_rand1   zjAbsolute timestep to which the gradients are backpropagated. Used only if `truncated_backprop_rand=False`.truncated_backprop_timestep)r   r*   zFRange of diffusion timesteps for randomized truncated backpropagation.truncated_rand_backprop_minmaxz+Whether to push the final model to the Hub.push_to_hubc                 C   s(   i }| j  D ]\}}|||< qt|S )N)__dict__itemsr   )selfoutput_dictkeyvalue rC   P/home/ubuntu/.local/lib/python3.10/site-packages/trl/trainer/alignprop_config.pyto_dict   s   
zAlignPropConfig.to_dictc                 C   s   | j r
t stdd S d S )NzfYou need to install bitsandbytes to use 8bit Adam. You can install it with `pip install bitsandbytes`.)r/   r   ImportError)r?   rC   rC   rD   __post_init__   s
   zAlignPropConfig.__post_init__)7__name__
__module____qualname____doc__r   ospathbasenamesysargvlenr   str__annotations__r   r   intr   r   r   dictr   r   r   r   r   r   r    r!   r#   r'   r(   boolr)   r+   r,   floatr-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r:   r;   tupler<   rE   rG   rC   rC   rC   rD   r	      s   
 Mr	   )rL   rO   dataclassesr   r   typingr   r   transformersr   corer   r	   rC   rC   rC   rD   <module>   s   