o
    eiB                     @   sb   d Z ddlmZ ddlmZ eeZG dd deZG dd deZ	G dd	 d	eZ
g d
ZdS )zGroupViT model configuration   )PreTrainedConfig)loggingc                       sH   e Zd ZdZdZdZ								
								d fdd	Z  ZS )GroupViTTextConfiga>  
    This is the configuration class to store the configuration of a [`GroupViTTextModel`]. It is used to instantiate an
    GroupViT model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the GroupViT
    [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 49408):
            Vocabulary size of the GroupViT text model. Defines the number of different tokens that can be represented
            by the `inputs_ids` passed when calling [`GroupViTModel`].
        hidden_size (`int`, *optional*, defaults to 256):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 1024):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 4):
            Number of attention heads for each attention layer in the Transformer encoder.
        max_position_embeddings (`int`, *optional*, defaults to 77):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-5):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        dropout (`float`, *optional*, defaults to 0.0):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        initializer_factor (`float`, *optional*, defaults to 1.0):
            A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
            testing).

    Example:

    ```python
    >>> from transformers import GroupViTTextConfig, GroupViTTextModel

    >>> # Initializing a GroupViTTextModel with nvidia/groupvit-gcc-yfcc style configuration
    >>> configuration = GroupViTTextConfig()

    >>> model = GroupViTTextModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```groupvit_text_modeltext_config               M   
quick_geluh㈵>        {Gz?      ?       c                    sp   t  jdi | || _|| _|| _|| _|| _|| _|	| _|| _	|| _
|| _|| _|| _|| _|| _|
| _d S )N )super__init__pad_token_idbos_token_ideos_token_id
vocab_sizehidden_sizeintermediate_sizedropoutnum_hidden_layersnum_attention_headsmax_position_embeddingslayer_norm_eps
hidden_actinitializer_rangeinitializer_factorattention_dropout)selfr   r   r   r   r    r!   r#   r"   r   r&   r$   r%   r   r   r   kwargs	__class__r   q/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/groupvit/configuration_groupvit.pyr   P   s    
zGroupViTTextConfig.__init__)r   r   r	   r
   r   r   r   r   r   r   r   r   r   r   r   __name__
__module____qualname____doc__
model_typebase_config_keyr   __classcell__r   r   r)   r+   r      s(    5r   c                       s^   e Zd ZdZdZdZddg ddg dg d	d
ddddddddddddgf fdd	Z  ZS )GroupViTVisionConfiga@  
    This is the configuration class to store the configuration of a [`GroupViTVisionModel`]. It is used to instantiate
    an GroupViT model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the GroupViT
    [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        hidden_size (`int`, *optional*, defaults to 384):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 1536):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        depths (`list[int]`, *optional*, defaults to [6, 3, 3]):
            The number of layers in each encoder block.
        num_group_tokens (`list[int]`, *optional*, defaults to [64, 8, 0]):
            The number of group tokens for each stage.
        num_output_groups (`list[int]`, *optional*, defaults to [64, 8, 8]):
            The number of output groups for each stage, 0 means no group.
        num_attention_heads (`int`, *optional*, defaults to 6):
            Number of attention heads for each attention layer in the Transformer encoder.
        image_size (`int`, *optional*, defaults to 224):
            The size (resolution) of each image.
        patch_size (`int`, *optional*, defaults to 16):
            The size (resolution) of each patch.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-5):
            The epsilon used by the layer normalization layers.
        dropout (`float`, *optional*, defaults to 0.0):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        initializer_factor (`float`, *optional*, defaults to 1.0):
            A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
            testing).

    Example:

    ```python
    >>> from transformers import GroupViTVisionConfig, GroupViTVisionModel

    >>> # Initializing a GroupViTVisionModel with nvidia/groupvit-gcc-yfcc style configuration
    >>> configuration = GroupViTVisionConfig()

    >>> model = GroupViTVisionModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```groupvit_vision_modelvision_configi  i   )   r   r   r
   )@          )r8   r9   r9   r7         r   gelur   r   r   r   g      ?r   c                    s   t  jdi | || _|| _|| _|t|kr%td| dt|  || _|| _	|| _
|| _|| _|	| _|
| _|| _|| _|| _|| _|| _|| _|| _|| _d S )Nz&Manually setting num_hidden_layers to z1, but we expect num_hidden_layers = sum(depth) = r   )r   r   r   r   depthssumloggerwarningr   num_group_tokensnum_output_groupsr    
image_size
patch_sizenum_channelsr#   r"   r   r&   r$   r%   
assign_epsassign_mlp_ratio)r'   r   r   r>   r   rB   rC   r    rD   rE   rF   r#   r"   r   r&   r$   r%   rG   rH   r(   r)   r   r+   r      s2   
zGroupViTVisionConfig.__init__r,   r   r   r)   r+   r4   v   s.    7r4   c                       s:   e Zd ZdZdZeedZ					d
 fdd		Z  Z	S )GroupViTConfiga  
    [`GroupViTConfig`] is the configuration class to store the configuration of a [`GroupViTModel`]. It is used to
    instantiate a GroupViT model according to the specified arguments, defining the text model and vision model
    configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the GroupViT
    [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        text_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`GroupViTTextConfig`].
        vision_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`GroupViTVisionConfig`].
        projection_dim (`int`, *optional*, defaults to 256):
            Dimensionality of text and vision projection layers.
        projection_intermediate_dim (`int`, *optional*, defaults to 4096):
            Dimensionality of intermediate layer of text and vision projection layers.
        logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
            The initial value of the *logit_scale* parameter. Default is used as per the original GroupViT
            implementation.
        kwargs (*optional*):
            Dictionary of keyword arguments.
    groupvit)r   r6   Nr      /L
F@c                    s  | dd }| dd }|d urT|d u ri }tdi | }	|	 D ]+\}
}|
|v rN|||
 krN|
dkrN|
|v rCd|
 d|
 d}nd|
 d}t| q#||	 |d ur|d u r^i }tdi | }d	|v rxd
d |d	  D |d	< | D ]+\}
}|
|v r|||
 kr|
dkr|
|v rd|
 d|
 d}nd|
 d}t| q||| |d u rt }td nt|t	rtdi |}|d u rt }td nt|t	rtdi |}|| _
|| _|| _|| _|| _d| _d| _d| _t jdi | d S )Ntext_config_dictvision_config_dicttransformers_version`zp` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["z"]` will be used instead.zn`text_config_dict` is provided which will be used to initialize `GroupViTTextConfig`. The value `text_config["z"]` will be overridden.id2labelc                 S   s   i | ]	\}}t ||qS r   )str).0keyvaluer   r   r+   
<dictcomp>4  s    z+GroupViTConfig.__init__.<locals>.<dictcomp>zv` is found in both `vision_config_dict` and `vision_config` but with different values. The value `vision_config_dict["zt`vision_config_dict` is provided which will be used to initialize `GroupViTVisionConfig`. The value `vision_config["zS`text_config` is `None`. initializing the `GroupViTTextConfig` with default values.zW`vision_config` is `None`. initializing the `GroupViTVisionConfig` with default values.r   r   Fr   )popr   to_dictitemsr@   infoupdater4   
isinstancedictr   r6   projection_dimprojection_intermediate_dimlogit_scale_init_valuer$   r%   output_segmentationr   r   )r'   r   r6   r^   r_   r`   r(   rM   rN   _text_config_dictrT   rU   message_vision_config_dictr)   r   r+   r      sz   







zGroupViTConfig.__init__)NNr   rK   rL   )
r-   r.   r/   r0   r1   r   r4   sub_configsr   r3   r   r   r)   r+   rI      s    
rI   )rI   r   r4   N)r0   configuration_utilsr   utilsr   
get_loggerr-   r@   r   r4   rI   __all__r   r   r   r+   <module>   s   
_l 