o
    ei9                     @   sX   d dl mZ ddlmZmZ ddlmZ ddlmZ e	e
ZG dd deZdgZdS )	    )Literal   )PreTrainedConfiglayer_type_validation)RopeParameters)loggingc                F       s  e Zd ZdZdZdgZdddZ fddZ			
																																dJdedB dedB dedB d edB d!edB d"e	dB d#edB d$e
dB d%e
dB d&e
dB d'edB d(edB d)edB d*edB d+edB d,edB d-edB d.e
dB d/ee	 dB d0eed1 ef dB d2edB d3e
dB d4edB d5e
dB d6edB d7ed8 d9e
dB d:edB d;e	dB d<edB d=edB d>edB d?edB d@edB fD fdAdBZdKdCdDZ fdEdFZedGdH ZejdIdH Z  ZS )LModernBertConfigaO  
    This is the configuration class to store the configuration of a [`ModernBertModel`]. It is used to instantiate an ModernBert
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the ModernBERT-base.
    e.g. [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base)

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 50368):
            Vocabulary size of the ModernBert model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`ModernBertModel`]
        hidden_size (`int`, *optional*, defaults to 768):
            Dimension of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to 1152):
            Dimension of the MLP representations.
        num_hidden_layers (`int`, *optional*, defaults to 22):
            Number of hidden layers in the Transformer decoder.
        num_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer decoder.
        hidden_activation (`str` or `function`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the decoder. Will default to `"gelu"`
            if not specified.
        max_position_embeddings (`int`, *optional*, defaults to 8192):
            The maximum sequence length that this model might ever be used with.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        initializer_cutoff_factor (`float`, *optional*, defaults to 2.0):
            The cutoff factor for the truncated_normal_initializer for initializing all weight matrices.
        norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the rms normalization layers.
        norm_bias (`bool`, *optional*, defaults to `False`):
            Whether to use bias in the normalization layers.
        pad_token_id (`int`, *optional*, defaults to 50283):
            Padding token id.
        eos_token_id (`int`, *optional*, defaults to 50282):
            End of stream token id.
        bos_token_id (`int`, *optional*, defaults to 50281):
            Beginning of stream token id.
        cls_token_id (`int`, *optional*, defaults to 50281):
            Classification token id.
        sep_token_id (`int`, *optional*, defaults to 50282):
            Separation token id.
        attention_bias (`bool`, *optional*, defaults to `False`):
            Whether to use a bias in the query, key, value and output projection layers during self-attention.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        layer_types (`list`, *optional*):
            Attention pattern for each layer.
        rope_parameters (`dict`, *optional*):
            Dictionary mapping attention patterns (`"full_attention"`, `"sliding_attention"`) to `RopeParameters`.
            Each value should be a dictionary containing `rope_type` and optional scaling parameters.
        local_attention (`int`, *optional*, defaults to 128):
            The window size for local attention.
        embedding_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the embeddings.
        mlp_bias (`bool`, *optional*, defaults to `False`):
            Whether to use bias in the MLP layers.
        mlp_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the MLP layers.
        decoder_bias (`bool`, *optional*, defaults to `True`):
            Whether to use bias in the decoder layers.
        classifier_pooling (`str`, *optional*, defaults to `"cls"`):
            The pooling method for the classifier. Should be either `"cls"` or `"mean"`. In local attention layers, the
            CLS token doesn't attend to all tokens on long sequences.
        classifier_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the classifier.
        classifier_bias (`bool`, *optional*, defaults to `False`):
            Whether to use bias in the classifier.
        classifier_activation (`str`, *optional*, defaults to `"gelu"`):
            The activation function for the classifier.
        deterministic_flash_attn (`bool`, *optional*, defaults to `False`):
            Whether to use deterministic flash attention. If `False`, inference will be faster but not deterministic.
        sparse_prediction (`bool`, *optional*, defaults to `False`):
            Whether to use sparse prediction for the masked language model instead of returning the full dense logits.
        sparse_pred_ignore_index (`int`, *optional*, defaults to -100):
            The index to ignore for the sparse prediction.
        reference_compile (`bool`, *optional*):
            Whether to compile the layers of the model which were compiled during pretraining. If `None`, then parts of
            the model will be compiled if 1) `triton` is installed, 2) the model is not on MPS, 3) the model is not
            shared between devices, and 4) the model is not resized after initialization. If `True`, then the model may
            be faster in some scenarios. This argument is deprecated and will be removed in a future version.
        tie_word_embeddings (`bool`, *optional*, defaults to `True`):
            Whether to tie weight embeddings

    Examples:

    ```python
    >>> from transformers import ModernBertModel, ModernBertConfig

    >>> # Initializing a ModernBert style configuration
    >>> configuration = ModernBertConfig()

    >>> # Initializing a model from the modernbert-base style configuration
    >>> model = ModernBertModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```
modernbertpast_key_valuesg     Ag     @)globallocalc                    s0   |dkr|d urt d d }t || d S )Nreference_compilezThe `reference_compile` argument is deprecated and will be removed in `transformers v5.2.0`Use `torch.compile()` directly on the model instead.)loggerwarning_oncesuper__setattr__)selfnamevalue	__class__ u/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/modernbert/configuration_modernbert.pyr      s   zModernBertConfig.__setattr__             gelu    {Gz?       @h㈵>Fk  j  i          N   Tcls
vocab_sizehidden_sizeintermediate_sizenum_hidden_layersnum_attention_headshidden_activationmax_position_embeddingsinitializer_rangeinitializer_cutoff_factornorm_eps	norm_biaspad_token_ideos_token_idbos_token_idcls_token_idsep_token_idattention_biasattention_dropoutlayer_typesrope_parameters)full_attentionsliding_attentionlocal_attentionembedding_dropoutmlp_biasmlp_dropoutdecoder_biasclassifier_poolingr(   meanclassifier_dropoutclassifier_biasclassifier_activationdeterministic_flash_attnsparse_predictionsparse_pred_ignore_indexr   tie_word_embeddingsc#           $         s>  | _ | _| _| _| _|" _| _| _| _| _	| _
| _| _|	 _|
 _| _| _| _| _| _| _| _| _| _| _| _| _| _| _| _|  _|! _ jdvrnt d j d| _!|#"dd _# j!d u r fddt$ j
D  _!t% j! j
 | _&t' j(di |# d S )	NrF   zQInvalid value for `classifier_pooling`, should be either "cls" or "mean", but is .global_attn_every_n_layersr   c                    s"   g | ]}t | j rd ndqS r?   r>   )boolrP   ).0ir   r   r   
<listcomp>   s    z-ModernBertConfig.__init__.<locals>.<listcomp>r   ))r5   r7   r6   r8   r9   rN   r*   r0   r+   r,   r-   r.   r1   r2   r3   r4   r:   r;   r/   r@   rA   rB   rC   rD   rE   rH   rI   rJ   rK   rL   rM   r   
ValueErrorr<   getrP   ranger   r=   r   __init__)$r   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r@   rA   rB   rC   rD   rE   rH   rI   rJ   rK   rL   rM   r   rN   kwargsr   rU   r   rZ      sZ   &


zModernBertConfig.__init__c                 K   s   | dd }ddiddid}| jd ur| jn|| _|d ur.| jd | | jd | | jdd u r=ddi| jd< | jd d| d| jd	  | jdd u r\ddi| jd< | jd d| d
| jd  |   | j|d |S )Nrope_scaling	rope_typedefaultrQ   r>   r?   
rope_thetaglobal_rope_thetar   local_rope_thetar   )ignore_keys)popr=   updaterX   
setdefaultdefault_thetastandardize_rope_paramsvalidate_rope)r   ignore_keys_at_rope_validationr[   r\   default_rope_paramsr   r   r   convert_rope_params_to_dict   s*   

z,ModernBertConfig.convert_rope_params_to_dictc                    s   t   }|dd  |S )Nr   )r   to_dictrc   )r   outputr   r   r   rl     s   
zModernBertConfig.to_dictc                 C   s
   | j d S )zKHalf-window size: `local_attention` is the total window, so we divide by 2.   r@   rU   r   r   r   sliding_window  s   
zModernBertConfig.sliding_windowc                 C   s   |d | _ dS )z<Set sliding_window by updating local_attention to 2 * value.rn   Nro   )r   r   r   r   r   rp     s   )"r   r   r   r   r   r   r   r    r!   r"   Fr#   r$   r%   r%   r$   Fr&   NNr'   r&   Fr&   Tr(   r&   Fr   FFr)   NT)N)__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferencerf   r   intstrfloatrR   listdictr   r   rZ   rk   rl   propertyrp   setter__classcell__r   r   r   r   r       s    e
	

 !"#
[
r   N)typingr   configuration_utilsr   r   modeling_rope_utilsr   utilsr   
get_loggerrq   r   r   __all__r   r   r   r   <module>   s   
 
~