o
    	Ti                     @   s@   d dl mZmZ d dlmZ d dlmZ eG dd deZdS )    )	dataclassfield)Optional)TrainingArgumentsc                       s   e Zd ZU dZedddidZeed< edddidZe	e
 ed	< ed
ddidZe
ed< edddidZe	e ed< ed
ddidZe
ed< edddidZe	e ed< edddidZe	e ed< edddidZe
ed<  fddZ  ZS )RewardConfiga?  
    Configuration class for the [`RewardTrainer`].

    This class includes only the parameters that are specific to Reward training. For a full list of training
    arguments, please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this
    class may differ from those in [`~transformers.TrainingArguments`].

    Using [`~transformers.HfArgumentParser`] we can turn this class into
    [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
    command line.

    Parameters:
        max_length (`int` or `None`, *optional*, defaults to `1024`):
            Maximum length of the sequences (prompt + completion) in the batch, filters out entries that exceed the
            limit. This argument is required if you want to use the default data collator.
        disable_dropout (`bool`, *optional*, defaults to `True`):
            Whether to disable dropout in the model.
        dataset_num_proc (`int`, *optional*, defaults to `None`):
            Number of processes to use for processing the dataset.
        center_rewards_coefficient (`float`, *optional*, defaults to `None`):
            Coefficient to incentivize the reward model to output mean-zero rewards (proposed by
            https://huggingface.co/papers/2312.09244, Eq. 2). Recommended value: `0.01`.
        remove_unused_columns (`bool`, *optional*, defaults to `False`):
            Whether to remove the columns that are not used by the model's forward pass. Can be `True` only if the
            dataset is pretokenized.
    
   helpzLog every X updates steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps.)defaultmetadatalogging_stepsNzWhether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. If not set, it defaults to `True` if `fp16` is not set.bf16TzWhether or not to average tokens across devices. If enabled, will use all_reduce to synchronize num_tokens_in_batch for precise loss calculation. Reference: https://github.com/huggingface/transformers/issues/34242 average_tokens_across_devicesi   zMaximum length of the sequences (prompt + completion) in the batch, filters out entries that exceed the limit. This argument is required if you want to use the default data collator.
max_lengthz<Whether to disable dropout in the model and reference model.disable_dropoutz6Number of processes to use for processing the dataset.dataset_num_proczCoefficient to incentivize the reward model to output mean-zero rewards (proposed by https://huggingface.co/papers/2312.09244, Eq. 2). Recommended value: `0.01`.center_rewards_coefficientFzWhether to remove the columns that are not used by the model's forward pass. Can be `True` only if the dataset is pretokenized.remove_unused_columnsc                    s(   | j d u r	| j n| j | _ t   d S )N)r   fp16super__post_init__)self	__class__ M/home/ubuntu/.local/lib/python3.10/site-packages/trl/trainer/reward_config.pyr   k   s   zRewardConfig.__post_init__)__name__
__module____qualname____doc__r   r   float__annotations__r   r   boolr   r   intr   r   r   r   r   __classcell__r   r   r   r   r      sR   
 r   N)dataclassesr   r   typingr   transformersr   r   r   r   r   r   <module>   s
   