o
    wi3A                     @   sP   d dl mZmZmZ ddlmZmZ G dd deZG dd deZddgZ	dS )	    )AnyOptionalUnion   )PretrainedConfiglayer_type_validationc                       s   e Zd ZdZdZdgZddddddddZdgdgfd	d
gd	gfd	gd	gfdZ																							 	!d$ fd"d#	Z  Z	S )%T5GemmaModuleConfiga  
        This is the configuration class to store the configuration of a [`T5GemmaModuleModel`]. It is used to instantiate an T5GemmaModule
        model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
        defaults will yield a similar configuration to that of the T5GemmaModule-7B.
        e.g. [google/t5_gemma_module-7b](https://huggingface.co/google/t5_gemma_module-7b)
        Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
        documentation from [`PretrainedConfig`] for more information.
        Args:
            vocab_size (`int`, *optional*, defaults to 256000):
                Vocabulary size of the T5GemmaModule model. Defines the number of different tokens that can be represented by the
                `inputs_ids` passed when calling [`T5GemmaModuleModel`]
            hidden_size (`int`, *optional*, defaults to 2304):
                Dimension of the hidden representations.
            intermediate_size (`int`, *optional*, defaults to 9216):
                Dimension of the MLP representations.
            num_hidden_layers (`int`, *optional*, defaults to 26):
                Number of hidden layers in the Transformer decoder.
            num_attention_heads (`int`, *optional*, defaults to 8):
                Number of attention heads for each attention layer in the Transformer decoder.
            num_key_value_heads (`int`, *optional*, defaults to 4):
                This is the number of key_value heads that should be used to implement Grouped Query Attention. If
                `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
                `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
                converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
                by meanpooling all the original heads within that group. For more details, check out [this
                paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
                `num_attention_heads`.
            head_dim (`int`, *optional*, defaults to 256):
                The attention head dimension.
            hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
                The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
                if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
            max_position_embeddings (`int`, *optional*, defaults to 8192):
                The maximum sequence length that this model might ever be used with.
            initializer_range (`float`, *optional*, defaults to 0.02):
                The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
            rms_norm_eps (`float`, *optional*, defaults to 1e-06):
                The epsilon used by the rms normalization layers.
            use_cache (`bool`, *optional*, defaults to `True`):
                Whether or not the model should return the last key/values attentions (not used by all models). Only
                relevant if `config.is_decoder=True`.
            pad_token_id (`int`, *optional*, defaults to 0):
                Padding token id.
            eos_token_id (`int`, *optional*, defaults to 1):
                End of stream token id.
            bos_token_id (`int`, *optional*, defaults to 2):
                Beginning of stream token id.
            tie_word_embeddings (`bool`, *optional*, defaults to `True`):
                Whether to tie weight embeddings
            rope_theta (`float`, *optional*, defaults to 10000.0):
                The base period of the RoPE embeddings.
            attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
                Whether to use a bias in the query, key, value and output projection layers during self-attention.
            attention_dropout (`float`, *optional*, defaults to 0.0):
                The dropout ratio for the attention probabilities.
            query_pre_attn_scalar (`float`, *optional*, defaults to 256):
                scaling factor used on the attention scores
            sliding_window (`int`, *optional*, defaults to 4096):
                in T5GemmaModule, every other layer uses sliding window attention. This is the size of the sliding window.
            layer_types (`list`, *optional*):
                Attention pattern for each layer.
            final_logit_softcapping (`float`, *optional*, defaults to 30.0):
                scaling factor when applying tanh softcapping on the logits.
            attn_logit_softcapping (`float`, *optional*, defaults to 50.0):
                scaling factor when applying tanh softcapping on the attention scores.

        ```python
        >>> from transformers import T5GemmaModuleModel, T5GemmaModuleConfig
        >>> # Initializing a T5GemmaModule t5_gemma_module-7b style configuration
        >>> configuration = T5GemmaModuleConfig()
        >>> # Initializing a model from the t5_gemma_module-7b style configuration
        >>> model = T5GemmaModuleModel(configuration)
        >>> # Accessing the model configuration
        >>> configuration = model.config
        ```
    Module config (encoder or decoder): the same as Gemma2Config.t5_gemma_modulepast_key_valuescolwiserowwise)zlayers.*.self_attn.q_projzlayers.*.self_attn.k_projzlayers.*.self_attn.v_projzlayers.*.self_attn.o_projzlayers.*.mlp.gate_projzlayers.*.mlp.up_projzlayers.*.mlp.down_proj	input_idsinputs_embedshidden_statesattention_mask)embed_tokenslayersnorm   	   $              gelu_pytorch_tanh    {Gz?ư>Tr              @F           N      >@      I@c                    s   t  jd||||d| || _|	| _|| _|| _|| _|| _|| _|| _	|
| _
|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _| jd u rZdd t| jD | _t| j d S )N)pad_token_idbos_token_ideos_token_idtie_word_embeddingsc                 S   s$   g | ]}t |d  d rdndqS )r   r    sliding_attentionfull_attention)bool).0i r/   n/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/transformers/models/t5gemma/configuration_t5gemma.py
<listcomp>   s    z0T5GemmaModuleConfig.__init__.<locals>.<listcomp>r/   )super__init__
vocab_sizemax_position_embeddingshidden_sizeintermediate_sizenum_hidden_layersnum_attention_headshead_dimnum_key_value_headsinitializer_rangerms_norm_eps	use_cache
rope_thetaattention_biasattention_dropouthidden_activationquery_pre_attn_scalarsliding_windowfinal_logit_softcappingattn_logit_softcappinglayer_typesranger   )selfr4   r6   r7   r8   r9   r;   r:   rB   r5   r<   r=   r>   r&   r(   r'   r)   r?   r@   rA   rC   rD   rG   rE   rF   kwargs	__class__r/   r0   r3   z   sB   
zT5GemmaModuleConfig.__init__)r   r   r   r   r   r   r   r   r   r   r   Tr   r   r    Tr!   Fr"   r   r#   Nr$   r%   )
__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferencebase_model_tp_planbase_model_pp_planr3   __classcell__r/   r/   rK   r0   r      sR    M


r   c                       sP  e Zd ZdZdZdgZi dddddddd	d
ddddd	dddddddd	dddddddd	dddddd	iZdgdgfddgdgfdgdgfdgdgfddgdgfdgdgfdZ							d0d ee	e
eeef f  d!ee	e
eeef f  d"ed#ed$ed%ed&ef fd'd(Z fd)d*Zd1d2d.d/Z  ZS )3T5GemmaConfiga6  
    This is the configuration class to store the configuration of a [`T5GemmaModel`]. It is used to instantiate an T5Gemma
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to a hypothetical balanced Gemma2 encoder-decoder model.
    e.g. [google/t5gemma-placeholder](https://huggingface.co/google/t5gemma-placeholder)
    ```python
    >>> from transformers import T5GemmaConfig, T5GemmaModel
    >>> t5gemma_config = T5GemmaConfig.from_pretrained("google/t5gemma-placeholder")
    >>> model = T5GemmaModel(t5gemma_config)
    ```
    Configuration objects inherit from [PretrainedConfig] and can be used to control the model outputs. Read the
    documentation from [PretrainedConfig] for more information.
    Args:
        encoder (`Union[T5GemmaModuleConfig, dict]`, optional, *optional*):
            Configuration for the encoder.
        decoder (`Union[T5GemmaModuleConfig, dict]`, optional, *optional*):
            Configuration for the decoder.
        is_encoder_decoder (bool, optional, *optional*, defaults to `True`):
            Whether the model is used as an encoder/decoder or not.
        dropout_rate (`float`, *optional*, defaults to 0.0):
            The ratio for all dropout layers (following T5).
        classifier_dropout_rate (`float`, *optional*, defaults to 0.0):
            The dropout ratio for classifier (following T5).
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for attention.
        tie_word_embeddings (`bool`, *optional*, defaults to `True`):
            Whether tie input and output embeddings.
        kwargs (additional keyword arguments, optional, *optional*):
            Will be passed to the PretrainedConfig base class.
    t5gemmar
   z!encoder.layers.*.self_attn.q_projr   z!encoder.layers.*.self_attn.k_projz!encoder.layers.*.self_attn.v_projz!encoder.layers.*.self_attn.o_projr   zencoder.layers.*.mlp.gate_projzencoder.layers.*.mlp.up_projzencoder.layers.*.mlp.down_projz!decoder.layers.*.self_attn.q_projz!decoder.layers.*.self_attn.k_projz!decoder.layers.*.self_attn.v_projz!decoder.layers.*.self_attn.o_projz"decoder.layers.*.cross_attn.q_projz"decoder.layers.*.cross_attn.k_projz"decoder.layers.*.cross_attn.v_projz"decoder.layers.*.cross_attn.o_projzdecoder.layers.*.mlp.gate_projzdecoder.layers.*.mlp.up_projzdecoder.layers.*.mlp.down_projr   r   r   r   )zencoder.embed_tokenszencoder.layerszencoder.normzdecoder.embed_tokenszdecoder.layerszdecoder.normNTr"   encoderdecoderis_encoder_decoderdropout_rateclassifier_dropout_raterA   r)   c           
         s`  t |trtdi |}n|d u rt }nt |ts#J t| dt |tr0tdi |}n|d u r7|}nt |tsEJ t| dtdi | }tdi | }d|_||_||_|| _d|_d|_	||_||_|j
|_|| _dD ]}	|	|vrt||	||	< qxt jdi | || _|d|j	| _	|d|j| _|| _|| _|| _|| _d S )Nz is not supported.FT)r'   r&   r(   r>   r<   r/   )
isinstancedictr   typeto_dict
is_decoderr[   rA   rX   r>   r6   cross_attention_hidden_sizerY   getattrr2   r3   rZ   getr<   r\   r)   )
rI   rX   rY   rZ   r[   r\   rA   r)   rJ   special_token_keyrK   r/   r0   r3      sD   


zT5GemmaConfig.__init__c                    s>   g d}||v rt | j|| t | j|| t || d S )N)output_hidden_statesoutput_attentions_attn_implementationr[   rA   )setattrrX   rY   r2   __setattr__)rI   keyvalueshared_attr_with_submodulesrK   r/   r0   rj   9  s
   zT5GemmaConfig.__setattr__Freturnr   c                 C   s   ~| S )Nr/   )rI   rY   r/   r/   r0   get_text_configG  s   zT5GemmaConfig.get_text_config)NNTr"   r"   r"   T)F)rn   r   )rM   rN   rO   rP   rQ   rR   rS   rT   r   r   r   r^   r   r,   floatr3   rj   ro   rU   r/   r/   rK   r0   rV      s    




=rV   N)
typingr   r   r   configuration_utilsr   r   r   rV   __all__r/   r/   r/   r0   <module>   s     