o
    eiC                     @   sb   d Z ddlmZ ddlmZ eeZG dd deZG dd deZ	G dd	 d	eZ
g d
ZdS )zCLIP model configuration   )PreTrainedConfig)loggingc                       sH   e Zd ZdZdZdZ									
							d fdd	Z  ZS )CLIPTextConfiga  
    This is the configuration class to store the configuration of a [`CLIPTextModel`]. It is used to instantiate a CLIP
    text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the text encoder of the CLIP
    [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 49408):
            Vocabulary size of the CLIP text model. Defines the number of different tokens that can be represented by
            the `inputs_ids` passed when calling [`CLIPModel`].
        hidden_size (`int`, *optional*, defaults to 512):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 2048):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        projection_dim (`int`, *optional*, defaults to 512):
            Dimensionality of text and vision projection layers.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer encoder.
        max_position_embeddings (`int`, *optional*, defaults to 77):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        initializer_factor (`float`, *optional*, defaults to 1.0):
            A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
            testing).
        pad_token_id (`int`, *optional*, defaults to 1):
            Padding token id.
        bos_token_id (`int`, *optional*, defaults to 49406):
            Beginning of stream token id.
        eos_token_id (`int`, *optional*, defaults to 49407):
            End of stream token id.

    Example:

    ```python
    >>> from transformers import CLIPTextConfig, CLIPTextModel

    >>> # Initializing a CLIPTextConfig with openai/clip-vit-base-patch32 style configuration
    >>> configuration = CLIPTextConfig()

    >>> # Initializing a CLIPTextModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
    >>> model = CLIPTextModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```clip_text_modeltext_config               M   
quick_geluh㈵>        {Gz?      ?       c                    sp   t  jdi | || _|| _|| _|| _|| _|| _|| _|| _	|| _
|| _|	| _|| _|| _|| _|
| _d S N )super__init__pad_token_idbos_token_ideos_token_id
vocab_sizehidden_sizeintermediate_sizeprojection_dimnum_hidden_layersnum_attention_headsmax_position_embeddingslayer_norm_eps
hidden_actinitializer_rangeinitializer_factorattention_dropout)selfr   r   r   r   r    r!   r"   r$   r#   r'   r%   r&   r   r   r   kwargs	__class__r   i/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/clip/configuration_clip.pyr   W   s    
zCLIPTextConfig.__init__)r   r   r	   r   r
   r   r   r   r   r   r   r   r   r   r   __name__
__module____qualname____doc__
model_typebase_config_keyr   __classcell__r   r   r*   r,   r      s(    <r   c                       sD   e Zd ZdZdZdZ									
					d fdd	Z  ZS )CLIPVisionConfiga  
    This is the configuration class to store the configuration of a [`CLIPVisionModel`]. It is used to instantiate a
    CLIP vision encoder according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the vision encoder of the CLIP
    [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        hidden_size (`int`, *optional*, defaults to 768):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        projection_dim (`int`, *optional*, defaults to 512):
            Dimensionality of text and vision projection layers.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_channels (`int`, *optional*, defaults to 3):
            The number of input channels.
        image_size (`int`, *optional*, defaults to 224):
            The size (resolution) of each image.
        patch_size (`int`, *optional*, defaults to 32):
            The size (resolution) of each patch.
        hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        initializer_factor (`float`, *optional*, defaults to 1.0):
            A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
            testing).

    Example:

    ```python
    >>> from transformers import CLIPVisionConfig, CLIPVisionModel

    >>> # Initializing a CLIPVisionConfig with openai/clip-vit-base-patch32 style configuration
    >>> configuration = CLIPVisionConfig()

    >>> # Initializing a CLIPVisionModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
    >>> model = CLIPVisionModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```clip_vision_modelvision_config      r   r
   r          r   r   r   r   r   c                    sd   t  jdi | || _|| _|| _|| _|| _|| _|| _|| _	|| _
|| _|| _|
| _|	| _d S r   )r   r   r   r   r   r    r!   num_channels
patch_size
image_sizer%   r&   r'   r#   r$   )r(   r   r   r   r    r!   r<   r>   r=   r$   r#   r'   r%   r&   r)   r*   r   r,   r      s   
zCLIPVisionConfig.__init__)r8   r9   r   r
   r
   r   r:   r;   r   r   r   r   r   r-   r   r   r*   r,   r5      s$    6r5   c                       s2   e Zd ZdZdZeedZ	d	 fdd	Z  Z	S )

CLIPConfigaO  
    [`CLIPConfig`] is the configuration class to store the configuration of a [`CLIPModel`]. It is used to instantiate
    a CLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating
    a configuration with the defaults will yield a similar configuration to that of the CLIP
    [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        text_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`CLIPTextConfig`].
        vision_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`CLIPVisionConfig`].
        projection_dim (`int`, *optional*, defaults to 512):
            Dimensionality of text and vision projection layers.
        logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
            The initial value of the *logit_scale* parameter. Default is used as per the original CLIP implementation.
        kwargs (*optional*):
            Dictionary of keyword arguments.

    Example:

    ```python
    >>> from transformers import CLIPConfig, CLIPModel

    >>> # Initializing a CLIPConfig with openai/clip-vit-base-patch32 style configuration
    >>> configuration = CLIPConfig()

    >>> # Initializing a CLIPModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
    >>> model = CLIPModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config

    >>> # We can also initialize a CLIPConfig from a CLIPTextConfig and a CLIPVisionConfig
    >>> from transformers import CLIPTextConfig, CLIPVisionConfig

    >>> # Initializing a CLIPText and CLIPVision configuration
    >>> config_text = CLIPTextConfig()
    >>> config_vision = CLIPVisionConfig()

    >>> config = CLIPConfig(text_config=config_text, vision_config=config_vision)
    ```clip)r   r7   Nr   /L
F@c                    s  | dd }| dd }|d urT|d u ri }tdi | }| D ]+\}	}
|	|v rN|
||	 krN|	dkrN|	|v rCd|	 d|	 d}nd|	 d}t| q#|| |d ur|d u r^i }tdi | }d	|v rxd
d |d	  D |d	< | D ]+\}	}
|	|v r|
||	 kr|	dkr|	|v rd|	 d|	 d}nd|	 d}t| q||| |d u rt }td nt|t	rtdi |}|d u rt }td nt|t	rtdi |}|| _
|| _|| _|| _d| _t jdi | d S )Ntext_config_dictvision_config_dicttransformers_version`zp` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["z"]` will be used instead.zj`text_config_dict` is provided which will be used to initialize `CLIPTextConfig`. The value `text_config["z"]` will be overridden.id2labelc                 S   s   i | ]	\}}t ||qS r   )str).0keyvaluer   r   r,   
<dictcomp>;  s    z'CLIPConfig.__init__.<locals>.<dictcomp>zv` is found in both `vision_config_dict` and `vision_config` but with different values. The value `vision_config_dict["zp`vision_config_dict` is provided which will be used to initialize `CLIPVisionConfig`. The value `vision_config["zO`text_config` is `None`. initializing the `CLIPTextConfig` with default values.zS`vision_config` is `None`. initializing the `CLIPVisionConfig` with default values.r   r   )popr   to_dictitemsloggerinfoupdater5   
isinstancedictr   r7   r   logit_scale_init_valuer&   r   r   )r(   r   r7   r   rT   r)   rB   rC   _text_config_dictrI   rJ   message_vision_config_dictr*   r   r,   r     st   







zCLIPConfig.__init__)NNr   rA   )
r.   r/   r0   r1   r2   r   r5   sub_configsr   r4   r   r   r*   r,   r?      s    -
r?   )r?   r   r5   N)r1   configuration_utilsr   utilsr   
get_loggerr.   rO   r   r5   r?   __all__r   r   r   r,   <module>   s   
h\ 