o
    ¾e¦i”8  ã                   @   s\   d Z ddlmZ ddlmZ ddlmZ e e¡Z	G dd„ deƒZ
G dd„ deƒZdgZd	S )
zEvolla model configurationé   )ÚPreTrainedConfig)ÚRopeParameters)Úloggingc                       sD   e Zd ZdZ												
							d‡ fdd„	Z‡  ZS )ÚSaProtConfigaÊ
  This is the configuration class to store the configuration of a [`EvollaSaProtProteinEncoder`]. It is used to instantiate a
    SaProt model according to the specified arguments, defining the model architecture.

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 446):
            Vocabulary size of the protein sequence model. Defines the number of different tokens that can be represented
            by the `inputs_ids` passed when calling [`EvollaModel`].
        mask_token_id (`int`, *optional*, defaults to 4):
            The id of the *mask* token in the protein sequence model.
        pad_token_id (`int`, *optional*, defaults to 1):
            The id of the *padding* token in the protein sequence model.
        hidden_size (`int`, *optional*, defaults to 1280):
            Dimensionality of the protein sequence model layers and the pooler layer.
        num_hidden_layers (`int`, *optional*, defaults to 33):
            Number of hidden layers in the protein sequence model.
        num_attention_heads (`int`, *optional*, defaults to 20):
            Number of attention heads for each attention layer in the protein sequence model.
        intermediate_size (`int`, *optional*, defaults to 5120):
            Dimensionality of the intermediate layers in the protein sequence model.
        hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the hidden layers in the protein sequence model.
        attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention probabilities in the protein sequence model.
        max_position_embeddings (`int`, *optional*, defaults to 1026):
            The maximum sequence length that the protein sequence model might ever be used with. Typically set this to
            something large just in case (e.g., 512 or 1024 or 2048).
        layer_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon value for the layer normalization layer in the protein sequence model.
        position_embedding_type (`str`, *optional*, defaults to `"rotary"`):
            The type of position embedding to use in the protein sequence model. Currently only `"rotary"` is supported.
        emb_layer_norm_before (`bool`, *optional*, defaults to `False`):
            Whether to apply layer normalization before the position embedding in the protein sequence model.
        token_dropout (`bool`, *optional*, defaults to `True`):
            Whether to apply dropout to the tokens in the protein sequence model.é¾  é   é   é   é!   é   é   çš™™™™™¹?é  ç{®Gáz”?çñhãˆµøä>ÚrotaryFTc                    s|   t ƒ jdi |¤Ž || _|| _|| _|| _|| _|| _|| _|| _	|| _
|| _|	| _|
| _|| _|| _|| _|| _|| _d S )N© )ÚsuperÚ__init__Úpad_token_idÚmask_token_idÚ
is_decoderÚadd_cross_attentionÚ
vocab_sizeÚhidden_sizeÚnum_hidden_layersÚnum_attention_headsÚintermediate_sizeÚhidden_dropout_probÚattention_probs_dropout_probÚmax_position_embeddingsÚinitializer_rangeÚlayer_norm_epsÚposition_embedding_typeÚemb_layer_norm_beforeÚtoken_dropout)Úselfr   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r%   r   r   Úkwargs©Ú	__class__r   úm/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/evolla/configuration_evolla.pyr   ?   s$   
zSaProtConfig.__init__)r   r   r   r	   r
   r   r   r   r   r   r   r   r   FTFF)Ú__name__Ú
__module__Ú__qualname__Ú__doc__r   Ú__classcell__r   r   r(   r*   r      s(    (îr   c                @       sv  e Zd ZdZdZdeiZdZ									
											
			
										d8dedB de	dB de	dB de	dB de	dB de	dB de	dB de
dB de	dB d e	dB d!eee
ef B dB d"edB d#edB d$edB d%e	dB d&edB d'edB d(e	dB d)e	dB d*e	dB d+e	dB d,e	dB d-e	dB d.edB d/e	dB d0e	dB d1e	dB d2edB d3edB d4edB d5edB f>‡ fd6d7„Z‡  ZS )9ÚEvollaConfiga©  
    This is the configuration class to store the configuration of a [`EvollaModel`]. It is used to instantiate an
    Evolla model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the Evolla-10B.

    e.g. [westlake-repl/Evolla-10B-hf](https://huggingface.co/westlake-repl/Evolla-10B-hf)

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        protein_encoder_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`SaProtConfig`].
        vocab_size (`int`, *optional*, defaults to 128256):
            Vocabulary size of the Evolla llama model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`EvollaModel`].
        hidden_size (`int`, *optional*, defaults to 4096):
            Dimensionality of the llama layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 14336):
            Dimensionality of the intermediate layers in the llama model.
        num_hidden_layers (`int`, *optional*, defaults to 32):
            Number of hidden layers in the llama model.
        num_attention_heads (`int`, *optional*, defaults to 32):
            Number of attention heads for each attention layer in the llama model.
        num_key_value_heads (`int`, *optional*, defaults to 8):
            Number of key-value pairs for each attention layer in the llama model.
        hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
            The non-linear activation function (function or string) in the llama model. If string, `"gelu"`, `"relu"`,
            `"selu"` and `"silu"` are supported.
        max_position_embeddings (`int`, *optional*, defaults to 8192):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        rms_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon value for the RMS-norm layer in the llama model.
        rope_parameters (`float`, *optional*):
            The scaling factor for the RoPE layer in the llama model.
        attention_bias (`bool`, *optional*, defaults to `False`):
            Whether to use bias in the attention layer.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention layer.
        mlp_bias (`bool`, *optional*, defaults to `False`):
            Whether to use bias in the MLP layer.
        aligner_ffn_mult (`int`, *optional*, defaults to 4):
            The FFN multiplier for the aligner layer.
        aligner_enable_bias (`bool`, *optional*, defaults to `True`):
            Whether to use bias in the aligner layer.
        aligner_attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention probabilities in the aligner layer.
        aligner_num_add_layers (`int`, *optional*, defaults to 8):
            The number of additional layers for the aligner layer.
        resampler_depth (`int`, *optional*, defaults to 6):
            The depth of the resampler layer in the llama model.
        resampler_dim_head (`int`, *optional*, defaults to 64):
            The dimension of the heads in the resampler layer in the llama model.
        resampler_heads (`int`, *optional*, defaults to 8):
            The number of heads in the resampler layer in the llama model.
        resampler_num_latents (`int`, *optional*, defaults to 64):
            The number of latents in the resampler layer in the llama model.
        resampler_ff_mult (`int`, *optional*, defaults to 4):
            The FFN multiplier for the resampler layer.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        pad_token_id (`int`, *optional*):
            The id of the *padding* token.
        bos_token_id (`int`, *optional*, defaults to 128000):
            The id of the *beginning-of-sequence* token.
        eos_token_id (`int`, *optional*, defaults to 128009):
            The id of the *end-of-sequence* token.
        use_cache (`bool`, *optional*, defaults to `False`):
            Whether or not the model should return the last key/values attentions (not used by all models).
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether or not to tie the input and output word embeddings.
        is_decoder (`bool`, *optional*, defaults to `False`):
            Whether to only use the decoder in an encoder-decoder architecture, otherwise it has no effect on
            decoder-only or encoder-only architectures.
        add_cross_attention (`bool`, *optional*, defaults to `False`):
            Whether cross-attention layers should be added to the model.

    Example:

    ```python
    >>> from transformers import EvollaModel, EvollaConfig

    >>> # Initializing a Evolla evolla-10b style configuration
    >>> configuration = EvollaConfig()

    >>> # Initializing a model from the evolla-10b style configuration
    >>> model = EvollaModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```ÚEvollaModelÚprotein_encoder_configg    €„ANé õ é   é 8  é    é   Úsilué    r   Fç        r   Tr   é   é@   r   é ô é	ô r   r   r   r   r   Únum_key_value_headsÚ
hidden_actr    Úrms_norm_epsÚrope_parametersÚattention_biasÚattention_dropoutÚmlp_biasÚaligner_ffn_multÚaligner_enable_biasÚ$aligner_attention_probs_dropout_probÚaligner_num_add_layersÚresampler_depthÚresampler_dim_headÚresampler_headsÚresampler_num_latentsÚresampler_ff_multr!   r   Úbos_token_idÚeos_token_idÚ	use_cacheÚtie_word_embeddingsr   r   c            !         sö   || _ || _|| _|| _|| _|| _|| _|| _|| _|	| _	|
| _
|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|d u r\i }t d¡ tdi |¤Ž| _|| _|| _|| _ || _!t"ƒ j#di | ¤Ž d S )NzX`protein_encoder_config` is `None`. Initializing the `SaProtConfig` with default values.r   )$r   r   r   r   r   r   r   r?   r@   r    rA   rR   rC   rD   rE   rF   rG   rH   rI   rQ   r!   rJ   rK   rL   rM   rN   rB   ÚloggerÚinfor   r2   r   rO   rP   r   r   )!r&   r2   r   r   r   r   r   r?   r@   r    rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   r!   r   rO   rP   rQ   rR   r   r   r'   r(   r   r*   r   Ë   sH   #
zEvollaConfig.__init__)Nr3   r4   r5   r6   r6   r7   r8   r9   r   NFr:   Fr   Tr   r7   r;   r<   r7   r<   r   r   Nr=   r>   FFFF)r+   r,   r-   r.   Ú
model_typer   Úsub_configsÚdefault_thetaÚdictÚintÚstrr   ÚboolÚfloatr   r/   r   r   r(   r*   r0   i   sÆ    ]àþýüûúùø	÷
öõôóòñðïîíìëêéèçæåäãâá àr0   N)r.   Úconfiguration_utilsr   Úmodeling_rope_utilsr   Úutilsr   Ú
get_loggerr+   rS   r   r0   Ú__all__r   r   r   r*   Ú<module>   s   
Q 
0