o
    ߥi#                     @   s<   d dl Z d dlmZ d dlmZ e ZG dd deZdS )    N)PretrainedConfig)loggingc                       sp   e Zd ZdZdZ												
																							d fdd	Zedd Z  ZS )
GPT3Configa$  
    Configuration classes for GPT-3 model.

    Class attributes:

    - **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, can be used to recreate
      the correct object in [`~transformers.AutoConfig`].

    Args:
        vocab_size (`int`, *optional*, defaults to 25600):
            Vocabulary size of the GPT model. Defines the number of different
            tokens that can be represented by the `inputs_ids` passed when
            calling [`GPT3Model`].
        hidden_size (`int`, *optional*, defaults to 768):
            Dimensionality of the decoder layers and the pooler layer.
        ffn_hidden_size (`int`, *optional*, defaults to None):
            Dimensionality of the ffn layer, None defaults to four times the hidden_size.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer decoder.
        num_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the
            Transformer decoder.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (often named feed-forward)
            layer in the Transformer decoder.
        hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the
            decoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and
            `"gelu_new"` are supported.
        hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the
            embeddings, decoder, and pooler.
        attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention probabilities.
        max_position_embeddings (`int`, *optional*, defaults to 512):
            The maximum sequence length that this model might ever be used with.
            Typically set this to something large just in case (e.g., 512 or
            1024 or 2048).
        type_vocab_size (`int`, *optional*, defaults to 2):
            The vocabulary size of the `token_type_ids` passed when calling
            [`GPT3Model`].
        layernorm_epsilon (`float`, *optional*, defaults to 1e-12):
            The epsilon used by the layer normalization layers.
        bias_gelu_fusion (`bool`, *optional*, defaults to True):
            Whether to use gelu activation function when mixing bias.
        fp32_residual_connection (`bool`, *optional*, defaults to False):
            Whether to use fp32 for residual connection
            between layers to improve accuracy.
        sequence_parallel (`bool`, *optional*, defaults to False):
            Whether to use sequence parallel during training.
        bf16 (`bool`, *optional*, defaults to `False`):
            Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training.
            Requires Ampere or higher NVIDIA architecture or using CPU (no_cuda).
            This is an experimental API and it may change.
        fp16 (`bool`, *optional*, defaults to `False`):
            Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training.
        apply_query_key_layer_scaling (`bool`, *optional*, defaults to `True`):
            Whether to scale query and key layer parameters during training.
        init_method_std (`float`, *optional*, defaults to `0.02`):
            The standard deviation of the normal distribution for initialization process.
        eod_id (`int`, *optional*, defaults to `1`):
            The end of text label for tokenizer, also indicates the end of the generation.
        tokens_to_generate (`int`, *optional*, defaults to 100):
            Number of tokens to generate.
        top_k (`int`, *optional*, defaults to 0):
            Number of highest probability vocabulary tokens to keep for
            top-k-filtering that will be used by default in
            the `generate` method of the model.
        top_p (`float`, *optional*, defaults to 0.9):
            Value that will be used by default in the `generate` method of the model
            for `top_p`. If set to float < 1,
            only the most probable tokens with probabilities that add up to `top_p`
            or higher are kept for generation.
        temperature (`float`, *optional*, defaults to 1.0):
            The value used to module the next token probabilities that will be used
            by default in the `generate` method of the model. Must be strictly positive.
    gpt3 d     N      gelu皙?      -q=TF{Gz?   d   r   ?      ?c            #         sN  t  jdd|i|  || _|| _|d u rd| n|| _|| _|| _|| _|| _|| _	|	| _
|
| _|| _|| _|| _|| _|| _|| _|| _|rL|rLJ || _|| _|d u rc|| dks^J || | _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _ t!t"j#$dd }!t!t"j#$dd }"|!dk p|!dko|"dk | _%d S )Nlayer_norm_eps   r   .r       )&super__init__
vocab_sizehidden_sizeffn_hidden_sizenum_hidden_layersnum_attention_heads
hidden_actintermediate_sizehidden_dropout_probattention_probs_dropout_probmax_position_embeddingstype_vocab_sizelayernorm_epsilonbias_gelu_fusionfp32_residual_connectionsequence_parallelfp16bf16apply_query_key_layer_scalingattention_softmax_in_fp32kv_channelsmasked_softmax_fusionattention_dropoutbias_dropout_fusion(apply_residual_connection_post_layernormhidden_dropoutinit_method_stdeod_idtokens_to_generatetop_ktop_ptemperatureinttorch__version__splitno_persist_layer_norm)#selfr   r   r   r   r   r!   r    r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   kwargsTORCH_MAJORTORCH_MINOR	__class__r   \/home/ubuntu/.local/lib/python3.10/site-packages/modelscope/models/nlp/gpt3/configuration.pyr   h   sV   #
zGPT3Config.__init__c                 C   s   | j rtjS | jrtjS tjS )N)r*   r;   halfr+   bfloat16float)r?   r   r   rE   params_dtype   s
   zGPT3Config.params_dtype)r   r   Nr   r   r	   r
   r   r   r   r   r   TFFFFTFNTr   TFr   r   r   r   r   r   r   )	__name__
__module____qualname____doc__
model_typer   propertyrI   __classcell__r   r   rC   rE   r      sJ    NOr   )r;    transformers.configuration_utilsr   transformers.utilsr   
get_loggerloggerr   r   r   r   rE   <module>   s
   