o
    	۷if%                     @   s   d Z ddlmZ ddlmZ ddlmZmZ ddlm	Z	m
Z
mZ ddlmZ ddlmZmZ dd	lmZ eeZG d
d deZG dd deZddgZdS )zCodeGen model configuration    )OrderedDict)Mapping)AnyOptional   )PreTrainedTokenizer
TensorTypeis_torch_available)PretrainedConfig)OnnxConfigWithPastPatchingSpec)loggingc                       sX   e Zd ZdZdZdddddZ						
														d fdd	Z  ZS )CodeGenConfiga  
    This is the configuration class to store the configuration of a [`CodeGenModel`]. It is used to instantiate a
    CodeGen model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the CodeGen
    [Salesforce/codegen-2B-mono](https://huggingface.co/Salesforce/codegen-2B-mono) architecture. Configuration objects
    inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from
    [`PretrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 50400):
            Vocabulary size of the CodeGen model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`CodeGenModel`].
        n_positions (`int`, *optional*, defaults to 2048):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        n_ctx (`int`, *optional*, defaults to 2048):
            This attribute is used in `CodeGenModel.__init__` without any real effect.
        n_embd (`int`, *optional*, defaults to 4096):
            Dimensionality of the embeddings and hidden states.
        n_layer (`int`, *optional*, defaults to 28):
            Number of hidden layers in the Transformer encoder.
        n_head (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer encoder.
        rotary_dim (`int`, *optional*, defaults to 64):
            Number of dimensions in the embedding that Rotary Position Embedding is applied to.
        n_inner (`int`, *optional*):
            Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
        activation_function (`str`, *optional*, defaults to `"gelu_new"`):
            Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
        resid_pdrop (`float`, *optional*, defaults to 0.0):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        embd_pdrop (`int`, *optional*, defaults to 0.0):
            The dropout ratio for the embeddings.
        attn_pdrop (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention.
        layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
            The epsilon to use in the layer normalization layers.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models).
        bos_token_id (`int`, *optional*, defaults to 50256):
            Beginning of stream token id.
        eos_token_id (`int`, *optional*, defaults to 50256):
            End of stream token id.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
            model has a output word embedding layer.

    Example:

    ```python
    >>> from transformers import CodeGenConfig, CodeGenModel

    >>> # Initializing a CodeGen 6B configuration
    >>> configuration = CodeGenConfig()

    >>> # Initializing a model (with random weights) from the configuration
    >>> model = CodeGenModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```codegenn_positionsn_embdn_headn_layer)max_position_embeddingshidden_sizenum_attention_headsnum_hidden_layers              @   Ngelu_new        h㈵>{Gz?TP  Fc                    s   || _ || _|| _|| _|| _|| _|| _|| _|	| _|
| _	|| _
|| _|| _|| _|| _|| _|| _t jd|||d| d S )N)bos_token_ideos_token_idtie_word_embeddings )
vocab_sizen_ctxr   r   r   r   n_inner
rotary_dimactivation_functionresid_pdrop
embd_pdrop
attn_pdroplayer_norm_epsiloninitializer_range	use_cacher#   r$   super__init__)selfr'   r   r(   r   r   r   r*   r)   r+   r,   r-   r.   r/   r0   r1   r#   r$   r%   kwargs	__class__r&   g/home/ubuntu/vllm_env/lib/python3.10/site-packages/transformers/models/codegen/configuration_codegen.pyr3   g   s,   
zCodeGenConfig.__init__)r   r   r   r   r   r   r   Nr   r   r   r   r    r!   Tr"   r"   F)__name__
__module____qualname____doc__
model_typeattribute_mapr3   __classcell__r&   r&   r6   r8   r      s6    @	r   c                       s   e Zd Z			ddededeee  def fdd	Z	e
d
eeeeef f fddZe
d
efddZe
d
efddZ				ddededededee d
eeef f fddZe
d
efddZ  ZS )CodeGenOnnxConfigdefaultNFconfigtaskpatching_specsuse_pastc                    s2   t  j||||d t| jdd sd| j_d S d S )N)rC   rD   rE   pad_token_idr   )r2   r3   getattr_configrF   )r4   rB   rC   rD   rE   r6   r&   r8   r3      s   zCodeGenOnnxConfig.__init__returnc                 C   sJ   t ddddi}| jr| j|dd ddd|d< |S ddd|d< |S )	N	input_idsbatchsequence)r      inputs)	directionzpast_sequence + sequenceattention_mask)r   rE   fill_with_past_key_values_)r4   common_inputsr&   r&   r8   rN      s   zCodeGenOnnxConfig.inputsc                 C      | j jS N)rH   r   r4   r&   r&   r8   
num_layers      zCodeGenOnnxConfig.num_layersc                 C   rS   rT   )rH   r   rU   r&   r&   r8   r      rW   z%CodeGenOnnxConfig.num_attention_heads	tokenizer
batch_size
seq_lengthis_pair	frameworkc                    s   t t| j|||||d}td|d i}| jrIt stddd l|d j\}}	|	d }
|| j	|
| j
j| j	 f  fddt| jD |d< |d	 |d	< | jrj|d	 j}j|d	 j||
|d
gdd|d	< |S )N)rZ   r[   r\   r]   rJ   zACannot generate dummy past_keys inputs without PyTorch installed.r      c                    s    g | ]}    fqS r&   )zeros).0_
past_shapetorchr&   r8   
<listcomp>   s    z;CodeGenOnnxConfig.generate_dummy_inputs.<locals>.<listcomp>past_key_valuesrP   )dtyperM   )dim)r2   r   generate_dummy_inputsr   rE   r	   
ValueErrorrd   shaper   rH   r   rangerV   rg   catones)r4   rY   rZ   r[   r\   r]   rR   ordered_inputsrK   seqlenpast_key_values_length
mask_dtyper6   rb   r8   ri      s2   




z'CodeGenOnnxConfig.generate_dummy_inputsc                 C   s   dS )N   r&   rU   r&   r&   r8   default_onnx_opset   s   z$CodeGenOnnxConfig.default_onnx_opset)rA   NF)rX   rX   FN)r9   r:   r;   r
   strr   listr   boolr3   propertyr   intrN   rV   r   r   r   r   ri   rt   r?   r&   r&   r6   r8   r@      sL    
 

,r@   N)r<   collectionsr   collections.abcr   typingr   r    r   r   r	   configuration_utilsr
   onnxr   r   utilsr   
get_loggerr9   loggerr   r@   __all__r&   r&   r&   r8   <module>   s   
xQ