o
    ei/                     @   sL   d Z ddlmZ ddlmZ ddlmZ eeZ	G dd deZ
dgZdS )zPhi-3 model configuration   )PreTrainedConfig)RopeParameters)loggingc                ,       sv  e Zd ZdZdZdgZdddddZdgdgfd	d
gd	gfd	gd	gfdZ																					d8dedB dedB dedB dedB dedB dedB d e	dB d!e	dB d"e	dB d#e
dB d$edB d%edB d&e	dB d'edB d(edB d)edB d*eee
ef B dB d+edB d,edB d-edB d.edB f* fd/d0Z	d9d2ee	B d3edB fd4d5Zd:d3edB f fd6d7Z  ZS );
Phi3Configa  
    This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the
    [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct).

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 32064):
            Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`Phi3Model`].
        hidden_size (`int`, *optional*, defaults to 3072):
            Dimension of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to 8192):
            Dimension of the MLP representations.
        num_hidden_layers (`int`, *optional*, defaults to 32):
            Number of hidden layers in the Transformer decoder.
        num_attention_heads (`int`, *optional*, defaults to 32):
            Number of attention heads for each attention layer in the Transformer decoder.
        num_key_value_heads (`int`, *optional*):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
            `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
            by meanpooling all the original heads within that group. For more details, check out [this
            paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
            `num_attention_heads`.
        resid_pdrop (`float`, *optional*, defaults to 0.0):
            Dropout probability for mlp outputs.
        embd_pdrop (`int`, *optional*, defaults to 0.0):
            The dropout ratio for the embeddings.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio after computing the attention scores.
        hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
            The non-linear activation function (function or string) in the decoder.
        max_position_embeddings (`int`, *optional*, defaults to 4096):
            The maximum sequence length that this model might ever be used with.
        original_max_position_embeddings (`int`, *optional*, defaults to 4096):
            The maximum sequence length that this model was trained with. This is used to determine the size of the
            original RoPE embeddings when using long scaling.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        rms_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon value used for the RMSNorm.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether to tie weight embeddings
        rope_parameters (`RopeParameters`, *optional*):
            Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
            a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
            with longer `max_position_embeddings`.
        bos_token_id (`int`, *optional*, defaults to 1):
            The id of the "beginning-of-sequence" token.
        eos_token_id (`int`, *optional*, defaults to 32000):
            The id of the "end-of-sequence" token.
        pad_token_id (`int`, *optional*, defaults to 32000):
            The id of the padding token.
        sliding_window (`int`, *optional*):
            Sliding window attention window size. If `None`, no sliding window is applied.

    Example:

    ```python
    >>> from transformers import Phi3Model, Phi3Config

    >>> # Initializing a Phi-3 style configuration
    >>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct")

    >>> # Initializing a model from the configuration
    >>> model = Phi3Model(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```phi3past_key_valuescolwise_gather_outputrowwise_split_input)zlayers.*.self_attn.qkv_projzlayers.*.self_attn.o_projzlayers.*.mlp.gate_up_projzlayers.*.mlp.down_proj	input_idsinputs_embedshidden_statesattention_mask)embed_tokenslayersnorm@}             N        silu   {Gz?h㈵>TF    }  
vocab_sizehidden_sizeintermediate_sizenum_hidden_layersnum_attention_headsnum_key_value_headsresid_pdrop
embd_pdropattention_dropout
hidden_actmax_position_embeddings original_max_position_embeddingsinitializer_rangerms_norm_eps	use_cachetie_word_embeddingsrope_parametersbos_token_ideos_token_idpad_token_idsliding_windowc                    s   || _ || _|| _|| _|| _|d u r|}|| _|| _|| _|	| _|
| _	|| _
|| _|| _|| _|| _|| _|dd || _|| _|| _|| _|| _t jdi | d S )Npartial_rotary_factorg      ? )r   r   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r,   
setdefaultr0   r-   r.   r/   r+   super__init__)selfr   r   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   kwargs	__class__r2   i/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/phi3/configuration_phi3.pyr5   w   s2   zPhi3Config.__init__     @default_thetaignore_keysc                 K   s   | dd }|p
| j| _| jd ur| jni | _| jd| d| | jd|d  |   | jdd }|d urC|dv rCd| jd< | j|d |S )Nrope_scaling
rope_thetar1   	rope_type)suyarnlongroper=   )popr,   r3   standardize_rope_paramsgetvalidate_rope)r6   r<   r=   r7   r>   rope_parameters_typer2   r2   r:   convert_rope_params_to_dict   s   
z&Phi3Config.convert_rope_params_to_dictc                    sH  t  j|d t| jtstd| j | jdd}| jdd}| jdd}t| j| j	 | jd  }|dvrBtd	| |durqt|t
rTtd
d |D s[td| t||d ksqtd|d  dt| |durt|t
rtdd |D std| t||d kstd|d  dt| dS dS )z?
        Validate the `rope_parameters` configuration.
        rD   z/`rope_parameters` must be a dictionary but got r@   Nshort_factorlong_factorr1   )defaultrC   z@`rope_parameters`'s type field must be one of ['longrope'], got c                 s       | ]
}t |ttfV  qd S N
isinstanceintfloat.0xr2   r2   r:   	<genexpr>       z+Phi3Config.validate_rope.<locals>.<genexpr>zF`rope_parameters`'s short_factor field must be a list of numbers, got    z8`rope_parameters`'s short_factor field must have length z, got c                 s   rN   rO   rP   rT   r2   r2   r:   rW      rX   zE`rope_parameters`'s long_factor field must be a list of numbers, got z7`rope_parameters`'s long_factor field must have length )r4   rH   rQ   r,   dict
ValueErrorrG   rR   r   r    listalllen)r6   r=   rI   rope_parameters_short_factorrope_parameters_long_factorrotary_ndimsr8   r2   r:   rH      sJ   zPhi3Config.validate_rope)r   r   r   r   r   Nr   r   r   r   r   r   r   r   TFNr   r   r   N)r;   NrO   )__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferencebase_model_tp_planbase_model_pp_planrR   rS   strboolr   rZ   r5   setrJ   rH   __classcell__r2   r2   r8   r:   r      s    O

	
7
 r   N)re   configuration_utilsr   modeling_rope_utilsr   utilsr   
get_loggerrb   loggerr   __all__r2   r2   r2   r:   <module>   s   
 
U