o
    	Ti&                     @   sD   d dl mZmZ d dlmZmZ d dlmZ eG dd deZdS )    )	dataclassfield)AnyOptional)TrainingArgumentsc                       s,  e Zd ZU dZejddg ZedddidZee	d< ed	dd
idZ
ee e	d< edddidZee e	d< edddidZee e	d< ed	ddidZee e	d< edddidZee	d< edddidZee	d< ed	ddidZee e	d< edddidZee	d< eddd idZee	d!< ed"dd#idZee	d$< ed	dd%idZee e	d&< ed"dd'idZee	d(< ed	dd)idZeeeef  e	d< ed	dd*idZeeeef  e	d< ed	dd+idZee e	d,< eddd-idZee	d.< ed/dd0idZ ee	d1< ed2dd3idZ!ee	d4<  fd5d6Z"  Z#S )7	BCOConfigu  
    Configuration class for the [`BCOTrainer`].

    This class includes only the parameters that are specific to BCO training. For a full list of training arguments,
    please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this class may
    differ from those in [`~transformers.TrainingArguments`].

    Using [`~transformers.HfArgumentParser`] we can turn this class into
    [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
    command line.

    Parameters:
        max_length (`int` or `None`, *optional*, defaults to `1024`):
            Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want
            to use the default data collator.
        max_prompt_length (`int` or `None`, *optional*, defaults to `512`):
            Maximum length of the prompt. This argument is required if you want to use the default data collator.
        max_completion_length (`int` or `None`, *optional*, defaults to `None`):
            Maximum length of the completion. This argument is required if you want to use the default data collator
            and your model is an encoder-decoder.
        beta (`float`, *optional*, defaults to `0.1`):
            Parameter controlling the deviation from the reference model. Higher β means less deviation from the
            reference model.
        label_pad_token_id (`int`,  *optional*, defaults to `-100`):
            Label pad token id. This argument is required if you want to use the default data collator.
        padding_value (`int` or `None`, *optional*, defaults to `None`):
            Padding value to use. If `None`, the padding value of the tokenizer is used.
        truncation_mode (`str`, *optional*, defaults to `"keep_end"`):
            Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`.
            This argument is required if you want to use the default data collator.
        disable_dropout (`bool`, *optional*, defaults to `True`):
            Whether to disable dropout in the model and reference model.
        generate_during_eval (`bool`, *optional*, defaults to `False`):
            If `True`, generates and logs completions from both the model and the reference model to W&B or Comet
            during evaluation.
        is_encoder_decoder (`bool` or `None`, *optional*, defaults to `None`):
            When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument,
            you need to specify if the model returned by the callable is an encoder-decoder model.
        precompute_ref_log_probs (`bool`, *optional*, defaults to `False`):
            Whether to precompute reference model log probabilities for training and evaluation datasets. This is
            useful when training without the reference model to reduce the total GPU memory needed.
        model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
            Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a
            string.
        ref_model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
            Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the reference model
            from a string.
        dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
            Number of processes to use for processing the dataset.
        prompt_sample_size (`int`, *optional*, defaults to `1024`):
            Number of prompts that are fed to density ratio classifier.
        min_density_ratio (`float`, *optional*, defaults to `0.5`):
            Minimum value of the density ratio. The estimated density ratio is clamped to this value.
        max_density_ratio (`float`, *optional*, defaults to `10.0`):
            Maximum value of the density ratio. The estimated density ratio is clamped to this value.
    model_init_kwargsref_model_init_kwargs
   helpzLog every X updates steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps.)defaultmetadatalogging_stepsNzWhether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. If not set, it defaults to `True` if `fp16` is not set.bf16i   zMaximum length of the sequences (prompt + completion) in the batch. This argument is required if you want to use the default data collator.
max_lengthi   zeMaximum length of the prompt. This argument is required if you want to use the default data collator.max_prompt_lengthzMaximum length of the completion. This argument is required if you want to use the default data collator and your model is an encoder-decoder.max_completion_lengthg?uv   Parameter controlling the deviation from the reference model. Higher β means less deviation from the reference model.betaiz[Label pad token id. This argument is required if you want to use the default data collator.label_pad_token_idzLPadding value to use. If `None`, the padding value of the tokenizer is used.padding_valuekeep_endzTruncation mode to use when the prompt is too long. Possible values are `keep_end` or `keep_start`. This argument is required if you want to use the default data collator.truncation_modeTz<Whether to disable dropout in the model and reference model.disable_dropoutFzoIf `True`, generates and logs completions from both the model and the reference model to W&B during evaluation.generate_during_evalzWhen using the `model_init` argument (callable) to instantiate the model instead of the `model` argument, you need to specify if the model returned by the callable is an encoder-decoder model.is_encoder_decoderzWhether to precompute reference model log probabilities for training and evaluation datasets. This is useful when training without the reference model to reduce the total GPU memory needed.precompute_ref_log_probszoKeyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a string.zyKeyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the reference model from a string.z6Number of processes to use for processing the dataset.dataset_num_procz;Number of prompts that are fed to density ratio classifier.prompt_sample_sizeg      ?zYMinimum value of the density ratio. The estimated density ratio is clamped to this value.min_density_ratiog      $@zYMaximum value of the density ratio. The estimated density ratio is clamped to this value.max_density_ratioc                    s(   | j d u r	| j n| j | _ t   d S )N)r   fp16super__post_init__)self	__class__ J/home/ubuntu/.local/lib/python3.10/site-packages/trl/trainer/bco_config.pyr"      s   zBCOConfig.__post_init__)$__name__
__module____qualname____doc__r   _VALID_DICT_FIELDSr   r   float__annotations__r   r   boolr   intr   r   r   r   r   r   strr   r   r   r   r   dictr   r	   r   r   r   r   r"   __classcell__r&   r&   r$   r'   r      s   
 9	r   N)	dataclassesr   r   typingr   r   transformersr   r   r&   r&   r&   r'   <module>   s
   