o
    -i%&                     @   s:   d Z ddlmZ ddlmZ eeZG dd deZdS )zNemotron model configuration    )PretrainedConfig)loggingc                       s\   e Zd ZdZdZdgZ										
														d fdd	Zdd Z  ZS )NemotronConfiga  
    This is the configuration class to store the configuration of a
    [`NemotronModel`]. It is used to instantiate a Nemotron model
    according to the specified arguments, defining the model architecture.
    Instantiating a configuration with the defaults will yield a similar
    configuration to that of the Nemotron-8B.

    Configuration objects inherit from [`PretrainedConfig`] and can be
    used to control the model outputs. Read the documentation from
    [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 256000):
            Vocabulary size of the Nemotron model. Defines the number of
            different tokens that can be represented by the
            `inputs_ids` passed when calling [`NemotronModel`]
        hidden_size (`int`, *optional*, defaults to 6144):
            Dimension of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to 24576):
            Dimension of the MLP representations.
        num_hidden_layers (`int`, *optional*, defaults to 32):
            Number of hidden layers in the Transformer decoder.
        num_attention_heads (`int`, *optional*, defaults to 48):
            Number of attention heads for each attention layer in the
            Transformer decoder.
        head_dim (`int`, *optional*):
            Projection weights dimension in multi-head attention. Set to
            hidden_size // num_attention_heads if None
        num_key_value_heads (`int`, *optional*):
            This is the number of key_value heads that should be used to
            implement Grouped Query Attention. If
            `num_key_value_heads=num_attention_heads`, the model will use
            Multi Head Attention (MHA), if
            `num_key_value_heads=1 the model will use Multi Query Attention
            (MQA) otherwise GQA is used. When converting a multi-head
            checkpoint to a GQA checkpoint, each group key and value
            head should be constructed by meanpooling all the original
            heads within that group. For more details checkout
            [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it
            is not specified, will default to `num_attention_heads`.
        hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
            The non-linear activation function (function or string) in the
            decoder.
        max_position_embeddings (`int`, *optional*, defaults to 4096):
            The maximum sequence length that this model might ever be used
            with.
        initializer_range (`float`, *optional*, defaults to 0.0134):
            The standard deviation of the truncated_normal_initializer for
            initializing all weight matrices.
        norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the normalization layers.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values
            attentions (not used by all models). Only relevant if
            `config.is_decoder=True`.
        pad_token_id (`int`, *optional*):
            Padding token id.
        bos_token_id (`int`, *optional*, defaults to 2):
            Beginning of stream token id.
        eos_token_id (`int`, *optional*, defaults to 3):
            End of stream token id.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether to tie weight embeddings
        rope_parameters (`dict`, *optional*):
            The parameters of the RoPE embeddings. Expected contents:
                `rope_theta` (`float`): The base period of the RoPE embeddings.
                `rope_type` (`str`):
                    The sub-variant of RoPE to use. Can be one of ['default', 'linear',
                    'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the
                    original RoPE implementation.
                `partial_rotary_factor` (`float`, *optional*, defaults to 0.5):
                    Percentage of the query and keys which will have rotary embedding.
        attention_bias (`bool`, *optional*, defaults to `False`):
            Whether to use a bias in the query, key, value and output
            projection layers during self-attention.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        mlp_bias (`bool`, *optional*, defaults to `False`):
            Whether to use a bias in up_proj and down_proj layers in the MLP
            layers.

    ```python
    >>> from transformers import NemotronModel, NemotronConfig
    >>> # Initializing a Nemotron nemotron-15b style configuration
    >>> configuration = NemotronConfig()
    >>> # Initializing a model from the nemotron-15b style configuration
    >>> model = NemotronModel(configuration)
    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```nemotronpast_key_values      `      0   Nrelu2   S!uq?h㈵>T      F        c                    s  || _ |	| _|| _|| _|| _|| _|p|d}|d ur|n|| | _|d u r*|}|| _|| _	|
| _
|| _|| _|dd }|pF|pFddi}|dd}d|vrU||d< |dpe|dpe|d	ped
}d	|vrn||d	< || _|   || _|| _|| _t jd||||d| d S )Nkv_channelsrope_scaling	rope_typedefault
rope_thetag     @rope_percentrope_percentagepartial_rotary_factorg      ?)pad_token_idbos_token_ideos_token_idtie_word_embeddings )
vocab_sizemax_position_embeddingshidden_sizeintermediate_sizenum_hidden_layersnum_attention_headsgethead_dimnum_key_value_heads
hidden_actinitializer_rangenorm_eps	use_cachepoprope_parameters_rope_parameters_validationattention_biasattention_dropoutmlp_biassuper__init__)selfr    r"   r#   r$   r%   r'   r(   r)   r!   r*   r+   r,   r   r   r   r   r.   r0   r1   r2   kwargsr   r   r   	__class__r   e/home/ubuntu/veenaModal/venv/lib/python3.10/site-packages/vllm/transformers_utils/configs/nemotron.pyr4   z   sV   

zNemotronConfig.__init__c                 C   s   | j du rdS | j dd}| j dd}|dvr td| |dkr<|du r,tdt|tr5|dkr>td	| dS dS )
z?
        Validate the `rope_parameters` configuration.
        Nr   factor>   linearr   dynamiczA`rope_type` must be one of ['default', 'linear', 'dynamic'], got r   z]If `rope_type` is not 'default', `rope_parameters` must include a `factor` field. Got `None`.g      ?z:`rope_parameters`'s factor field must be a float > 1, got )r.   r&   
ValueError
isinstancefloat)r5   r   r:   r   r   r9   r/      s.   
z*NemotronConfig._rope_parameters_validation)r   r   r	   r
   r   NNr   r   r   r   TNr   r   FNFr   F)	__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferencer4   r/   __classcell__r   r   r7   r9   r      s4    \Ir   N)	rC   transformersr   transformers.utilsr   
get_loggerr@   loggerr   r   r   r   r9   <module>   s
   
