o
    ei:                     @   sT   d dl mZ ddlmZmZ ddlmZ G dd deZG dd deZddgZ	d	S )
    )Any   )PreTrainedConfiglayer_type_validation)RopeParametersc                4       sr  e Zd ZdZdZdgZddddddddZdgdgfd	d
gd	gfd	gd	gfdZ																								 	d<d!edB d"edB d#edB d$edB d%edB d&edB d'edB d(e	dB d)edB d*e
dB d+edB d,edB d-edB d.edB d/edB d0edB d1eee	ef B dB d2edB d3e
dB d4edB d5edB d6ee	 dB d7e
dB d8e
dB d9edB f2 fd:d;Z  ZS )=T5GemmaModuleConfiga  
    This is the configuration class to store the configuration of a [`T5GemmaModuleModel`]. It is used to instantiate an T5GemmaModule
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the T5GemmaModule-7B.
    e.g. [google/t5_gemma_module-7b](https://huggingface.co/google/t5_gemma_module-7b)
    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 256000):
            Vocabulary size of the T5GemmaModule model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`T5GemmaModuleModel`]
        hidden_size (`int`, *optional*, defaults to 2304):
            Dimension of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to 9216):
            Dimension of the MLP representations.
        num_hidden_layers (`int`, *optional*, defaults to 26):
            Number of hidden layers in the Transformer decoder.
        num_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer decoder.
        num_key_value_heads (`int`, *optional*, defaults to 4):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
            `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
            by meanpooling all the original heads within that group. For more details, check out [this
            paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
            `num_attention_heads`.
        head_dim (`int`, *optional*, defaults to 256):
            The attention head dimension.
        hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
            The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
            if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
        max_position_embeddings (`int`, *optional*, defaults to 8192):
            The maximum sequence length that this model might ever be used with.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        rms_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the rms normalization layers.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`.
        pad_token_id (`int`, *optional*, defaults to 0):
            Padding token id.
        eos_token_id (`int`, *optional*, defaults to 1):
            End of stream token id.
        bos_token_id (`int`, *optional*, defaults to 2):
            Beginning of stream token id.
        tie_word_embeddings (`bool`, *optional*, defaults to `True`):
            Whether to tie weight embeddings
        rope_parameters (`RopeParameters`, *optional*):
            Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
            a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
            with longer `max_position_embeddings`.
        attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
            Whether to use a bias in the query, key, value and output projection layers during self-attention.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        query_pre_attn_scalar (`float`, *optional*, defaults to 256):
            scaling factor used on the attention scores
        sliding_window (`int`, *optional*, defaults to 4096):
            in T5GemmaModule, every other layer uses sliding window attention. This is the size of the sliding window.
        layer_types (`list`, *optional*):
            Attention pattern for each layer.
        final_logit_softcapping (`float`, *optional*, defaults to 30.0):
            scaling factor when applying tanh softcapping on the logits.
        attn_logit_softcapping (`float`, *optional*, defaults to 50.0):
            scaling factor when applying tanh softcapping on the attention scores.
        is_decoder (`bool`, *optional*, defaults to `False`):
            Whether to only use the decoder in an encoder-decoder architecture, otherwise it has no effect on
            decoder-only or encoder-only architectures.

    ```python
    >>> from transformers import T5GemmaModuleModel, T5GemmaModuleConfig
    >>> # Initializing a T5GemmaModule t5_gemma_module-7b style configuration
    >>> configuration = T5GemmaModuleConfig()
    >>> # Initializing a model from the t5_gemma_module-7b style configuration
    >>> model = T5GemmaModuleModel(configuration)
    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```t5_gemma_modulepast_key_valuescolwiserowwise)zlayers.*.self_attn.q_projzlayers.*.self_attn.k_projzlayers.*.self_attn.v_projzlayers.*.self_attn.o_projzlayers.*.mlp.gate_projzlayers.*.mlp.up_projzlayers.*.mlp.down_proj	input_idsinputs_embedshidden_statesattention_mask)embed_tokenslayersnorm   	   $              gelu_pytorch_tanh    {Gz?ư>Tr         NF                 >@      I@
vocab_sizehidden_sizeintermediate_sizenum_hidden_layersnum_attention_headsnum_key_value_headshead_dimhidden_activationmax_position_embeddingsinitializer_rangerms_norm_eps	use_cachepad_token_ideos_token_idbos_token_idtie_word_embeddingsrope_parametersattention_biasattention_dropoutquery_pre_attn_scalarsliding_windowlayer_typesfinal_logit_softcappingattn_logit_softcapping
is_decoderc                    s   || _ || _|| _|| _|| _|| _|	| _|| _|| _|| _	|| _
|| _|| _|
| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _| jd u rXdd t| j	D | _t| j| j	 || _t jdi | d S )Nc                 S   s$   g | ]}t |d  d rdndqS )r   r   sliding_attentionfull_attention)bool).0i rB   o/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/t5gemma/configuration_t5gemma.py
<listcomp>   s    z0T5GemmaModuleConfig.__init__.<locals>.<listcomp>rB   )r<   r0   r2   r1   r3   r$   r,   r%   r&   r'   r(   r*   r)   r-   r.   r/   r5   r6   r+   r7   r8   r:   r;   r9   ranger   r4   super__init__)selfr$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   kwargs	__class__rB   rC   rG      s>   
zT5GemmaModuleConfig.__init__)r   r   r   r   r   r   r   r   r   r   r   Tr   r   r   TNFr    r   r!   Nr"   r#   F)__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferencebase_model_tp_planbase_model_pp_planintstrfloatr?   r   dictlistrG   __classcell__rB   rB   rJ   rC   r      s    R


	

r   c                       s   e Zd ZdZdZdgZeedZ								dd	eee	e	f B dB d
eee	e	f B dB de
dB dedB dedB dedB de
dB dedB f fddZ  ZS )T5GemmaConfiga  
    This is the configuration class to store the configuration of a [`T5GemmaModel`]. It is used to instantiate an T5Gemma
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to a hypothetical balanced Gemma2 encoder-decoder model.
    e.g. [google/t5gemma-2b-2b-prefixlm-it](https://huggingface.co/google/t5gemma-2b-2b-prefixlm-it)
    ```python
    >>> from transformers import T5GemmaConfig, T5GemmaModel
    >>> t5gemma_config = T5GemmaConfig.from_pretrained("google/t5gemma-2b-2b-prefixlm-it")
    >>> model = T5GemmaModel(t5gemma_config)
    ```
    Configuration objects inherit from [PreTrainedConfig] and can be used to control the model outputs. Read the
    documentation from [PreTrainedConfig] for more information.
    Args:
        encoder (`Union[T5GemmaModuleConfig, dict]`, optional, *optional*):
            Configuration for the encoder.
        decoder (`Union[T5GemmaModuleConfig, dict]`, optional, *optional*):
            Configuration for the decoder.
        is_encoder_decoder (bool, optional, *optional*, defaults to `True`):
            Whether the model is used as an encoder/decoder or not.
        dropout_rate (`float`, *optional*, defaults to 0.0):
            The ratio for all dropout layers (following T5).
        classifier_dropout_rate (`float`, *optional*, defaults to 0.0):
            The dropout ratio for classifier (following T5).
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for attention.
        tie_word_embeddings (`bool`, *optional*, defaults to `True`):
            Whether tie input and output embeddings.
        vocab_size (`int`, *optional*, defaults to 256000):
            Vocabulary size of the T5Gemma model (the same as Gemma 2).
        kwargs (additional keyword arguments, optional, *optional*):
            Will be passed to the PreTrainedConfig base class.
    t5gemmar	   )encoderdecoderNTr    r   r\   r]   is_encoder_decoderdropout_rateclassifier_dropout_rater6   r3   r$   c	                    sJ  t |trtdi |}n|d u rt }nt |ts#J t| dt |tr0tdi |}n|d u r7|}nt |tsEJ t| dtdi | }tdi | }d|_||_||_|| _d|_d|_	||_||_|j
|_|| _dD ]}
|
|	vrt||
|	|
< qxt jdi |	 || _|	d|j| _|| _|| _|| _d S )Nz is not supported.FT)r2   r0   r1   r-   rB   )
isinstancerW   r   typeto_dictr<   r_   r6   r\   r/   r%   cross_attention_hidden_sizer]   getattrrF   rG   r^   getr-   r`   r3   r$   )rH   r\   r]   r^   r_   r`   r6   r3   r$   rI   special_token_keyrJ   rB   rC   rG      s@   


zT5GemmaConfig.__init__)NNTr    r    r    Tr   )rL   rM   rN   rO   rP   rQ   r   sub_configsrW   r   r?   rV   rT   rG   rY   rB   rB   rJ   rC   rZ      s<    !
	rZ   N)
typingr   configuration_utilsr   r   modeling_rope_utilsr   r   rZ   __all__rB   rB   rB   rC   <module>   s    &^