o
    ¾e¦io#  ã                   @   s@   d Z ddlmZ ddlmZ e e¡ZG dd„ deƒZdgZ	dS )zOpenAI GPT-2 configurationé   )ÚPreTrainedConfig)Úloggingc                       sn   e Zd ZdZdZdgZdddddœZ			
																								d‡ fdd„	Z‡  ZS )Ú
GPT2ConfigaÂ  
    This is the configuration class to store the configuration of a [`GPT2Model`]. It is used to
    instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the GPT-2
    [openai-community/gpt2](https://huggingface.co/openai-community/gpt2) architecture.

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 50257):
            Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`GPT2Model`].
        n_positions (`int`, *optional*, defaults to 1024):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        n_embd (`int`, *optional*, defaults to 768):
            Dimensionality of the embeddings and hidden states.
        n_layer (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        n_head (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        n_inner (`int`, *optional*):
            Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
        activation_function (`str`, *optional*, defaults to `"gelu_new"`):
            Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
        resid_pdrop (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        embd_pdrop (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the embeddings.
        attn_pdrop (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention.
        layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
            The epsilon to use in the layer normalization layers.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        summary_type (`string`, *optional*, defaults to `"cls_index"`):
            Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`].

            Has to be one of the following options:

                - `"last"`: Take the last token hidden state (like XLNet).
                - `"first"`: Take the first token hidden state (like BERT).
                - `"mean"`: Take the mean of all tokens hidden states.
                - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
                - `"attn"`: Not implemented now, use multi-head attention.
        summary_use_proj (`bool`, *optional*, defaults to `True`):
            Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`].

            Whether or not to add a projection after the vector extraction.
        summary_activation (`str`, *optional*):
            Argument used when doing sequence summary. Used in for the multiple choice head in
            [`GPT2DoubleHeadsModel`].

            Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
        summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
            Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`].

            Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
        summary_first_dropout (`float`, *optional*, defaults to 0.1):
            Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`].

            The dropout ratio to be used after the projection and activation.
        scale_attn_weights (`bool`, *optional*, defaults to `True`):
            Scale attention weights by dividing by sqrt(hidden_size)..
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models).
        bos_token_id (`int`, *optional*, defaults to 50256):
            Id of the beginning of sentence token in the vocabulary.
        eos_token_id (`int`, *optional*, defaults to 50256):
            Id of the end of sentence token in the vocabulary.
        pad_token_id (`int`, *optional*):
            Padding token id.
        scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
            Whether to additionally scale attention weights by `1 / layer_idx + 1`.
        reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
            Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
            dot-product/softmax to float() when training with mixed precision.
        add_cross_attention (`bool`, *optional*, defaults to `False`):
            Whether cross-attention layers should be added to the model.
        tie_word_embeddings (`bool`, *optional*, defaults to `True`):
            Whether to tie weight embeddings

    Example:

    ```python
    >>> from transformers import GPT2Config, GPT2Model

    >>> # Initializing a GPT2 configuration
    >>> configuration = GPT2Config()

    >>> # Initializing a model (with random weights) from the configuration
    >>> model = GPT2Model(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Úgpt2Úpast_key_valuesÚn_embdÚn_positionsÚn_headÚn_layer)Úhidden_sizeÚmax_position_embeddingsÚnum_attention_headsÚnum_hidden_layerséQÄ  é   é   é   NÚgelu_newçš™™™™™¹?çñhãˆµøä>ç{®Gáz”?Ú	cls_indexTéPÄ  Fc                    s²   || _ || _|| _|| _|| _|| _|| _|| _|| _|| _	|	| _
|
| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _tƒ jdi |¤Ž d S )N© )Úadd_cross_attentionÚtie_word_embeddingsÚ
vocab_sizer   r   r
   r	   Ún_innerÚactivation_functionÚresid_pdropÚ
embd_pdropÚ
attn_pdropÚlayer_norm_epsilonÚinitializer_rangeÚsummary_typeÚsummary_use_projÚsummary_activationÚsummary_first_dropoutÚsummary_proj_to_labelsÚscale_attn_weightsÚ	use_cacheÚscale_attn_by_inverse_layer_idxÚreorder_and_upcast_attnÚbos_token_idÚeos_token_idÚpad_token_idÚsuperÚ__init__)Úselfr   r   r   r
   r	   r   r   r   r    r!   r"   r#   r$   r%   r&   r(   r'   r)   r*   r-   r.   r/   r+   r,   r   r   Úkwargs©Ú	__class__r   úi/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/gpt2/configuration_gpt2.pyr1   …   s6   zGPT2Config.__init__)r   r   r   r   r   Nr   r   r   r   r   r   r   TNTr   TTr   r   NFFFT)	Ú__name__Ú
__module__Ú__qualname__Ú__doc__Ú
model_typeÚkeys_to_ignore_at_inferenceÚattribute_mapr1   Ú__classcell__r   r   r4   r6   r      sH    cü	år   N)
r:   Úconfiguration_utilsr   Úutilsr   Ú
get_loggerr7   Úloggerr   Ú__all__r   r   r   r6   Ú<module>   s   
 
+