o
    ¾e¦i!  ã                   @   s@   d Z ddlmZ ddlmZ e e¡ZG dd„ deƒZdgZ	dS )zOpenAI ImageGPT configurationé   )ÚPreTrainedConfig)Úloggingc                       sd   e Zd ZdZdZdgZdddddœZ			
																			d‡ fdd„	Z‡  ZS )ÚImageGPTConfiga=  
    This is the configuration class to store the configuration of a [`ImageGPTModel`]. It is
    used to instantiate a GPT-2 model according to the specified arguments, defining the model architecture.
    Instantiating a configuration with the defaults will yield a similar configuration to that of the ImageGPT
    [openai/imagegpt-small](https://huggingface.co/openai/imagegpt-small) architecture.

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 512):
            Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`ImageGPTModel`].
        n_positions (`int`, *optional*, defaults to 32*32):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        n_embd (`int`, *optional*, defaults to 512):
            Dimensionality of the embeddings and hidden states.
        n_layer (`int`, *optional*, defaults to 24):
            Number of hidden layers in the Transformer encoder.
        n_head (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer encoder.
        n_inner (`int`, *optional*, defaults to None):
            Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
        activation_function (`str`, *optional*, defaults to `"quick_gelu"`):
            Activation function (can be one of the activation functions defined in src/transformers/activations.py).
            Defaults to "quick_gelu".
        resid_pdrop (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        embd_pdrop (`int`, *optional*, defaults to 0.1):
            The dropout ratio for the embeddings.
        attn_pdrop (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention.
        layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
            The epsilon to use in the layer normalization layers.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        scale_attn_weights (`bool`, *optional*, defaults to `True`):
            Scale attention weights by dividing by sqrt(hidden_size)..
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models).
        scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
            Whether to additionally scale attention weights by `1 / layer_idx + 1`.
        reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
            Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
            dot-product/softmax to float() when training with mixed precision.

    Example:

    ```python
    >>> from transformers import ImageGPTConfig, ImageGPTModel

    >>> # Initializing a ImageGPT configuration
    >>> configuration = ImageGPTConfig()

    >>> # Initializing a model (with random weights) from the configuration
    >>> model = ImageGPTModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```ÚimagegptÚpast_key_valuesÚn_embdÚn_positionsÚn_headÚn_layer)Úhidden_sizeÚmax_position_embeddingsÚnum_attention_headsÚnum_hidden_layersé  é   é   é   é   NÚ
quick_geluçš™™™™™¹?çñhãˆµøä>ç{®Gáz”?TFc                    s”   || _ || _|| _|| _|| _|| _|| _|| _|| _|	| _	|
| _
|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _tƒ jdi |¤Ž d S )N© )Úadd_cross_attentionÚ
vocab_sizer   r   r
   r	   Ún_innerÚactivation_functionÚresid_pdropÚ
embd_pdropÚ
attn_pdropÚlayer_norm_epsilonÚinitializer_rangeÚscale_attn_weightsÚ	use_cacheÚscale_attn_by_inverse_layer_idxÚreorder_and_upcast_attnÚpad_token_idÚbos_token_idÚeos_token_idÚtie_word_embeddingsÚsuperÚ__init__)Úselfr   r   r   r
   r	   r   r   r   r   r   r    r!   r"   r#   r)   r$   r%   r   r&   r'   r(   Úkwargs©Ú	__class__r   úq/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/imagegpt/configuration_imagegpt.pyr+   `   s,   zImageGPTConfig.__init__)r   r   r   r   r   Nr   r   r   r   r   r   TTFFFFNNN)	Ú__name__Ú
__module__Ú__qualname__Ú__doc__Ú
model_typeÚkeys_to_ignore_at_inferenceÚattribute_mapr+   Ú__classcell__r   r   r.   r0   r      s>    ?ü	êr   N)
r4   Úconfiguration_utilsr   Úutilsr   Ú
get_loggerr1   Úloggerr   Ú__all__r   r   r   r0   Ú<module>   s   

{