o
    	۷i                     @   s  d dl Z d dlZd dlmZ d dlmZmZmZmZ d dl	Z
d dlZd dlmZ d dlm  mZ d dlmZ ddlmZ ddlmZ ddlmZ dd	lmZmZmZ dd
lmZmZ ddl m!Z! ddl"m#Z#m$Z$m%Z%m&Z&m'Z' ddl(m)Z) ddl*m+Z+m,Z,m-Z- ee%ddG dd de#Z.ee%ddG dd de#Z/ee%G dd de#Z0G dd dej1Z2	dXdej1dej3dej3dej3d eej3 d!e4d"e4fd#d$Z5G d%d& d&ej1Z6G d'd( d(ej1Z7G d)d* d*eZ8G d+d, d,ej1Z9G d-d. d.ej1Z:d/d0 Z;	3dYd4ej3d5e4d6e4d7e4d8e4d9ej3fd:d;Z<dZd>d?Z=d@dA Z>dBdC Z?e%G dDdE dEeZ@G dFdG dGej1ZAG dHdI dIej1ZBe%dJdG dKdL dLe@ZCG dMdN dNej1ZDe%dOdG dPdQ dQe@ZEe%G dRdS dSe@ZFe%dTdG dUdV dVe@ZGg dWZHdS )[    N)	dataclass)AnyCallableOptionalUnion)_calculate_fan_in_and_fan_out   )ACT2FN)_prepare_4d_attention_mask)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)ModelOutputTransformersKwargsauto_docstringcan_return_tuplefilter_out_non_signature_kwargs)check_model_inputs   )Siglip2ConfigSiglip2TextConfigSiglip2VisionConfigz}
    Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
    )custom_introc                   @   j   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eeejdf  ed< dZeeejdf  ed< dS )Siglip2VisionOutputz
    image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
        The image embeddings obtained by applying the projection layer to the pooler_output.
    Nimage_embedslast_hidden_state.hidden_states
attentions)__name__
__module____qualname____doc__r   r   torchFloatTensor__annotations__r    r!   tupler"    r+   r+   b/home/ubuntu/vllm_env/lib/python3.10/site-packages/transformers/models/siglip2/modeling_siglip2.pyr   +      
 r   ze
    Base class for text model's outputs that also contains a pooling of the last hidden states.
    c                   @   r   )Siglip2TextOutputz
    text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
        The text embeddings obtained by applying the projection layer to the pooler_output.
    Ntext_embedsr    .r!   r"   )r#   r$   r%   r&   r/   r   r'   r(   r)   r    r!   r*   r"   r+   r+   r+   r,   r.   =   r-   r.   c                   @   s   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eej ed< dZeej ed< dZeej ed< dZeed< dZeed	< d
ee fddZdS )Siglip2Outputa  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
        Contrastive loss for image-text similarity.
    logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
        The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
        similarity scores.
    logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
        The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
        similarity scores.
    text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
        The text embeddings obtained by applying the projection layer to the pooled output of [`Siglip2TextModel`].
    image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
        The image embeddings obtained by applying the projection layer to the pooled output of [`Siglip2VisionModel`].
    text_model_output (`BaseModelOutputWithPooling`):
        The output of the [`Siglip2TextModel`].
    vision_model_output (`BaseModelOutputWithPooling`):
        The output of the [`Siglip2VisionModel`].
    Nlosslogits_per_imagelogits_per_textr/   r   text_model_outputvision_model_outputreturnc                    s   t  fdd  D S )Nc                 3   s.    | ]}|d vr | nt  | V  qdS ))r4   r5   N)getattrto_tuple).0kselfr+   r,   	<genexpr>n   s
    
z)Siglip2Output.to_tuple.<locals>.<genexpr>)r*   keysr;   r+   r;   r,   r8   m   s   zSiglip2Output.to_tuple)r#   r$   r%   r&   r1   r   r'   r(   r)   r2   r3   r/   r   r4   r   r5   r*   r   r8   r+   r+   r+   r,   r0   O   s   
 r0   c                	       sb   e Zd Zdef fddZedejdejde	dejfdd	Z
d
ejdejdejfddZ  ZS )Siglip2VisionEmbeddingsconfigc                    sn   t    || _|j| _|j| _tj|j| j | j | jd| _	|j
| _
t| j
d | _t| j
| j| _d S )N)in_featuresout_featuresg      ?)super__init__r@   hidden_size	embed_dim
patch_sizennLinearnum_channelspatch_embeddingnum_patchesintposition_embedding_size	Embeddingposition_embeddingr<   r@   	__class__r+   r,   rD   u   s   
z Siglip2VisionEmbeddings.__init__positional_embeddingsspatial_shapes
max_lengthr6   c                 C   s   |j d }| j d }| j}tj|||f| j|d}| dddd} | jjdkr/| tj	} t
|D ];}|| \}}	tj| ||	fddd	d
}
|
|||	 dd}
|
|}
|
||d||	 f< |
d ||||	 df< q3|S )ac  
        Resize positional embeddings to image-specific size and pad to a fixed size.

        Args:
            positional_embeddings (`torch.Tensor`):
                Position embeddings of shape (height, width, embed_dim)
            spatial_shapes (`torch.LongTensor`):
                Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
            max_length (`int`):
                Maximum length of the positional embeddings to pad resized positional embeddings to

        Returns:
            `torch.Tensor`: Embeddings of shape (batch_size, max_length, embed_dim)
        r   )devicedtype   r   cpubilinearFT)sizemodealign_corners	antialiasN)shaperY   r'   emptyrX   permute	unsqueezetypetofloat32rangeFinterpolatereshape	transpose)rT   rU   rV   
batch_sizerF   source_dtyperesulted_positional_embeddingsiheightwidthresized_embeddingsr+   r+   r,   resize_positional_embeddings   s2   

	
z4Siglip2VisionEmbeddings.resize_positional_embeddingspixel_valuesc                 C   sT   | j jj}|  |j|d}| jj| j| jd}| j|||jd d}|| }|S )aH  
        Args:
            pixel_values (`torch.FloatTensor`):
                Pixel values of shape (batch_size, max_num_patches, num_channels * patch_size * patch_size)
            spatial_shapes (`list[tuple[int, int]]`):
                Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
        )rY   rW   r   )rV   )	rK   weightrY   rf   rP   rk   rN   rt   ra   )r<   ru   rU   target_dtypepatch_embedsrT   resized_positional_embeddings
embeddingsr+   r+   r,   forward   s   


zSiglip2VisionEmbeddings.forward)r#   r$   r%   r   rD   staticmethodr'   Tensor
LongTensorrM   rt   r(   r{   __classcell__r+   r+   rR   r,   r?   t   s    $:r?           modulequerykeyvalueattention_maskscalingdropoutc           
      K   s|   t ||dd| }|d ur|| }tjj|dt jd|j}tjj	||| j
d}t ||}	|	dd }	|	|fS )NrW   )dimrY   )ptrainingr   rZ   )r'   matmulrl   rH   
functionalsoftmaxrg   rf   rY   r   r   
contiguous)
r   r   r   r   r   r   r   kwargsattn_weightsattn_outputr+   r+   r,   eager_attention_forward   s   
r   c                
       sR   e Zd ZdZ fddZ	d
dejdeej deejeej f fdd	Z	  Z
S )Siglip2Attentionz=Multi-headed attention from 'Attention Is All You Need' paperc                    s   t    || _|j| _|j| _| j| j | _| j| j | jkr-td| j d| j d| jd | _	|j
| _d| _t| j| j| _t| j| j| _t| j| j| _t| j| j| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).      F)rC   rD   r@   rE   rF   num_attention_heads	num_headshead_dim
ValueErrorscaleattention_dropoutr   	is_causalrH   rI   k_projv_projq_projout_projrQ   rR   r+   r,   rD      s$   

zSiglip2Attention.__init__Nr!   r   r6   c              
   K   s   |j \}}}| |}| |}| |}	|||| j| jdd}|||| j| jdd}|	||| j| jdd}	t}
| j	j
dkrMt| j	j
 }
|
| |||	|| j| j| js\dn| jd\}}|||| }| |}||fS )z#Input shape: Batch x Time x Channelr   rZ   eagerr   )r   r   r   )ra   r   r   r   viewr   r   rl   r   r@   _attn_implementationr   r   r   r   r   rk   r   r   )r<   r!   r   r   rm   
seq_lengthrF   queriesr>   valuesattention_interfacer   r   r+   r+   r,   r{     s.   




zSiglip2Attention.forwardN)r#   r$   r%   r&   rD   r'   r}   r   r*   r{   r   r+   r+   rR   r,   r      s    r   c                       s2   e Zd Z fddZdejdejfddZ  ZS )
Siglip2MLPc                    sD   t    || _t|j | _t|j|j	| _
t|j	|j| _d S r   )rC   rD   r@   r	   
hidden_actactivation_fnrH   rI   rE   intermediate_sizefc1fc2rQ   rR   r+   r,   rD   /  s
   
zSiglip2MLP.__init__r!   r6   c                 C   s"   |  |}| |}| |}|S r   )r   r   r   )r<   r!   r+   r+   r,   r{   6  s   


zSiglip2MLP.forward)r#   r$   r%   rD   r'   r}   r{   r   r+   r+   rR   r,   r   .  s    r   c                	       sR   e Zd Zdeeef f fddZedej	dej	de
e dejfdd	Z  ZS )
Siglip2EncoderLayerr@   c                    sR   t    |j| _tj| j|jd| _t|| _	tj| j|jd| _
t|| _d S Neps)rC   rD   rE   rF   rH   	LayerNormlayer_norm_epslayer_norm1r   	self_attnlayer_norm2r   mlprQ   rR   r+   r,   rD   >  s   

zSiglip2EncoderLayer.__init__r!   r   r   r6   c                 K   sT   |}|  |}| jd||d|\}}|| }|}| |}| |}|| }|S )N)r!   r   r+   )r   r   r   r   )r<   r!   r   r   residual_r+   r+   r,   r{   F  s   



zSiglip2EncoderLayer.forward)r#   r$   r%   r   r   r   rD   r   r'   r}   r   r   r(   r{   r   r+   r+   rR   r,   r   =  s    r   c                       sN   e Zd ZdZdef fddZe	ddeej	 de
e defd	d
Z  ZS )Siglip2Encoderz
    Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
    [`Siglip2EncoderLayer`].

    Args:
        config: Siglip2Config
    r@   c                    s:   t     | _t fddt jD | _d| _d S )Nc                    s   g | ]}t  qS r+   )r   )r9   r   r@   r+   r,   
<listcomp>k  s    z+Siglip2Encoder.__init__.<locals>.<listcomp>F)	rC   rD   r@   rH   
ModuleListrh   num_hidden_layerslayersgradient_checkpointingrQ   rR   r   r,   rD   h  s   
 
zSiglip2Encoder.__init__Nr   r   r6   c                 K   s,   |}| j D ]}|||fi |}qt|dS )N)r    )r   r   )r<   inputs_embedsr   r   r!   encoder_layerr+   r+   r,   r{   o  s   

zSiglip2Encoder.forwardr   )r#   r$   r%   r&   r   rD   r   r   r'   r}   r   r   r   r{   r   r+   r+   rR   r,   r   _  s    r   c                       s\   e Zd Zdef fddZe		ddejdejdej	de
e d	e
e d
efddZ  ZS )Siglip2VisionTransformerr@   c                    sj   t    || _|j}t|| _t|| _tj	||j
d| _t|ds%dn|j| _| jr3t|| _d S d S )Nr   vision_use_headT)rC   rD   r@   rE   r?   rz   r   encoderrH   r   r   post_layernormhasattrr   use_head$Siglip2MultiheadAttentionPoolingHeadheadr<   r@   rF   rR   r+   r,   rD     s   


z!Siglip2VisionTransformer.__init__Nru   r   rU   output_attentionsoutput_hidden_statesr6   c                 C   s   |dur|n| j j}|dur|n| j j}| ||}|dur+| j jdkr+t||j}n|}| j||||d}|j}	| 	|	}	| j
rG| |	|nd}
t|	|
|j|jdS )z
        spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
            Tensor containing the spatial dimensions (height, width) of the input images.
        Nflash_attention_2)r   r   r   r   )r    pooler_outputr!   r"   )r@   r   r   rz   r   r
   rY   r   r    r   r   r   r   r!   r"   )r<   ru   r   rU   r   r   r!   encoder_attention_maskencoder_outputsr    r   r+   r+   r,   r{     s,   
z Siglip2VisionTransformer.forwardNN)r#   r$   r%   r   rD   r   r'   r(   r}   r~   r   boolr   r{   r   r+   r+   rR   r,   r     s$    r   c                 C   s   dd }||d|  k s||d|  krt jddd ||| | }||| | }| d| d d| d  |   | |td  | | | j||d d S )	Nc                 S   s   dt | t d  d S )N      ?       @)matherfsqrt)xr+   r+   r,   norm_cdf  s   z _trunc_normal_.<locals>.norm_cdfrZ   zjmean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.)
stacklevelr   r   )minmax)	warningswarnuniform_erfinv_mul_r   r   add_clamp_)tensormeanstdabr   lur+   r+   r,   _trunc_normal_  s    	
r   r          r   r   r   r   r   r   r6   c                 C   sN   t   t| dd|| | || W d   dS 1 s w   Y  dS )an  Fills the input Tensor with values drawn from a truncated
    normal distribution. The values are effectively drawn from the
    normal distribution :math:`\mathcal{N}(	ext{mean}, 	ext{std}^2)`
    with values outside :math:`[a, b]` redrawn until they are within
    the bounds. The method used for generating the random values works
    best when :math:`a \leq 	ext{mean} \leq b`.

    NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
    bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
    and the result is subsequently scaled and shifted by the mean and std args.

    Args:
        tensor: an n-dimensional `torch.Tensor`
        mean: the mean of the normal distribution
        std: the standard deviation of the normal distribution
        a: the minimum cutoff value
        b: the maximum cutoff value
    r   r   N)r'   no_gradr   r   r   )r   r   r   r   r   r+   r+   r,   trunc_normal_tf_  s   
"r   fan_innormalc           	      C   s  t | \}}|dkr|}n|dkr|}n
|dkr|| d }|| }|dkr3t| t|d d d S |dkrWt  | jt|d W d    d S 1 sPw   Y  d S |d	krtd
| }t  | | | W d    d S 1 syw   Y  d S td| )Nr   fan_outfan_avgrZ   truncated_normalg۶%?r   r   uniformr   zinvalid distribution )	r   r   r   r   r'   r   normal_r   r   )	r   r   r^   distributionr   r   denomvarianceboundr+   r+   r,   variance_scaling_  s(   
"
"r   c                 C      t | ddd d S )Nr   r   r^   r   r   r   r+   r+   r,   lecun_normal_     r  c                 C   r   )Nr   r   r   r   r  r+   r+   r,   default_flax_embed_init  r  r  c                   @   sH   e Zd ZU eed< dZdZg dZdZdZ	dZ
dZeedZdd ZdS )	Siglip2PreTrainedModelr@   siglip2T)Siglip2TextEmbeddingsr?   r   r   )r!   r"   c                 C   sf  t |tr%t | jtr| jjjn| jj}tjj|j	j
dt| d dS t |tjr2t|j
 dS t |trytj|jj
 tj|jj
 tj|jj
 tj|jj
 tj|jj tj|jj tj|jj tj|jj dS t |trtj|jj
 tj|jj
 tjj|jjdd tjj|jjdd dS t |trtj|jj tj|jjj tj|jjj dS t |t rt!"t!#d}|j$j%| |j&j'  dS t |t(rtjj|j)j
| jjjd | jj* d dS t |tj+tj,frt-|j
 |jdurtj|j dS dS t |tj.r1|jj'  |j
j%d dS dS )zInitialize the weightsr   r   gư>r   r   N)/
isinstancer?   r@   r   vision_configrE   rH   initr   rP   rv   npr   rO   r  r   xavier_uniform_r   r   r   r   zeros_biasr   r   r   r   probedata	attentionin_proj_weightin_proj_biasSiglip2Modelr'   logr   logit_scalefill_
logit_biaszero_Siglip2ForImageClassification
classifierinitializer_factorrI   Conv2dr  r   )r<   r   rr   logit_scale_initr+   r+   r,   _init_weights1  sX   

"






z$Siglip2PreTrainedModel._init_weightsN)r#   r$   r%   r   r)   base_model_prefixsupports_gradient_checkpointing_no_split_modules_supports_flash_attn_supports_sdpa_supports_flex_attn_supports_attention_backendr   r   _can_record_outputsr  r+   r+   r+   r,   r    s   
 r  c                	       sX   e Zd Zdef fddZ			ddeej deej deej dej	fd	d
Z
  ZS )r  r@   c                    sR   t    |j}t|j|| _t|j|| _| j	dt
|jddd d S )Nposition_ids)r   rW   F)
persistent)rC   rD   rE   rH   rO   
vocab_sizetoken_embeddingmax_position_embeddingsrP   register_bufferr'   arangeexpandr   rR   r+   r,   rD   a  s   

zSiglip2TextEmbeddings.__init__N	input_idsr(  r   r6   c                 C   s   |d ur	|j d n|j d }| jjj d }||kr#td| d| |d u r2| jd d d |f }|d u r;| |}| |}|| }|S )NrW   r   r   zRSequence length must be less than max_position_embeddings (got `sequence length`: z and max_position_embeddings: )ra   rP   rv   r   r(  r+  )r<   r0  r(  r   r   max_position_embeddingposition_embeddingsrz   r+   r+   r,   r{   m  s"   

zSiglip2TextEmbeddings.forwardNNN)r#   r$   r%   r   rD   r   r'   r~   r(   r}   r{   r   r+   r+   rR   r,   r  `  s    r  c                       sf   e Zd Zdef fddZee			ddeej	 deej	 deej	 de
e d	ef
d
dZ  ZS )Siglip2TextTransformerr@   c                    sP   t    || _|j}t|| _t|| _tj	||j
d| _t||j| _d S r   )rC   rD   r@   rE   r  rz   r   r   rH   r   r   final_layer_normrI   projection_sizer   r   rR   r+   r,   rD     s   


zSiglip2TextTransformer.__init__Nr0  r   r(  r   r6   c                 K   s   |d u rt d| }|d|d }| j||d}d| jjv }|r&d }n|d ur2|s2t||j}| jd||d|}|j	}	| 
|	}	|	d d dd d f }
| |
}
t|	|
dS )NzYou have to specify input_idsrW   )r0  r(  flash)r   r   )r    r   r+   )r   r]   r   rz   r@   r   r
   rY   r   r    r5  r   r   )r<   r0  r   r(  r   input_shaper!   uses_flash_attentionr   r    pooled_outputr+   r+   r,   r{     s0   	

zSiglip2TextTransformer.forwardr3  )r#   r$   r%   r   rD   r   r   r   r'   r}   r   r   r   r{   r   r+   r+   rR   r,   r4    s$    
r4  zL
    The text model from Siglip2 without any head or projection on top.
    c                       s   e Zd ZU eed< def fddZdejfddZdd Z	e
d	d
e			ddeej deej deej dee def
ddZ  ZS )Siglip2TextModelr@   c                    "   t  | t|| _|   d S r   )rC   rD   r4  
text_model	post_initrQ   rR   r+   r,   rD     s   
zSiglip2TextModel.__init__r6   c                 C   
   | j jjS r   r=  rz   r+  r;   r+   r+   r,   get_input_embeddings     
z%Siglip2TextModel.get_input_embeddingsc                 C   s   || j j_d S r   r@  )r<   r   r+   r+   r,   set_input_embeddings  s   z%Siglip2TextModel.set_input_embeddingsFtie_last_hidden_statesNr0  r   r(  r   c                 K   s   | j d|||d|S )a  
        Examples:

        ```python
        >>> from transformers import AutoTokenizer, Siglip2TextModel

        >>> model = Siglip2TextModel.from_pretrained("google/siglip2-base-patch16-224")
        >>> tokenizer = AutoTokenizer.from_pretrained("google/siglip2-base-patch16-224")

        >>> # important: make sure to set padding="max_length" as that's how the model was trained
        >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding="max_length", return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled (EOS token) states
        ```r0  r   r(  Nr+   )r=  )r<   r0  r   r(  r   r+   r+   r,   r{     s   zSiglip2TextModel.forwardr3  )r#   r$   r%   r   r)   rD   rH   ModulerA  rC  r   r   r   r'   r}   r   r   r   r{   r   r+   r+   rR   r,   r;    s*   
 r;  c                       sH   e Zd ZdZdef fddZddejdeej dejfd	d
Z	  Z
S )r   zMultihead Attention Pooling.r@   c                    sd   t    ttdd|j| _tjj|j|j	dd| _
tj|j|jd| _t|| _|j	| _d S )Nr   T)batch_firstr   )rC   rD   rH   	Parameterr'   randnrE   r  MultiheadAttentionr   r  r   r   	layernormr   r   r   rQ   rR   r+   r,   rD     s   

z-Siglip2MultiheadAttentionPoolingHead.__init__Nhidden_stater   r6   c                 C   s   |j d }| j|dd}|d ur3|j d |j d }}t||j|}|d| j|d}|d||}| j||||dd }|}| |}|| 	| }|d d df S )Nr   r   rW   )	attn_mask)
ra   r  repeatr
   rY   r   rk   r  rL  r   )r<   rM  r   rm   r  
target_len
source_lenr   r+   r+   r,   r{     s   

z,Siglip2MultiheadAttentionPoolingHead.forwardr   )r#   r$   r%   r&   r   rD   r'   r}   r   r{   r   r+   r+   rR   r,   r     s    *	r   zN
    The vision model from Siglip2 without any head or projection on top.
    c                       s   e Zd ZU eed< dZdef fddZdejfddZ	e
dd	e	
	
ddejdejdejdee dee defddZ  ZS )Siglip2VisionModelr@   ru   c                    r<  r   )rC   rD   r   vision_modelr>  rQ   rR   r+   r,   rD     s   
zSiglip2VisionModel.__init__r6   c                 C   r?  r   )rS  rz   rK   r;   r+   r+   r,   rA  &  rB  z'Siglip2VisionModel.get_input_embeddingsFrD  Npixel_attention_maskrU   r   r   c                 C   s   | j |||||dS )a9  
        pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
            Mask to avoid performing attention on padding pixel indices.
        spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
            Tensor containing the spatial dimensions (height, width) of the input images.

        Examples:

        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, Siglip2VisionModel

        >>> model = Siglip2VisionModel.from_pretrained("google/siglip2-base-patch16-224")
        >>> processor = AutoProcessor.from_pretrained("google/siglip2-base-patch16-224")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled features
        ```ru   r   rU   r   r   )rS  )r<   ru   rT  rU   r   r   r+   r+   r,   r{   )  s   #zSiglip2VisionModel.forwardr   )r#   r$   r%   r   r)   main_input_namerD   rH   rG  rA  r   r   r'   r(   r}   r~   r   r   r   r{   r   r+   r+   rR   r,   rR    s,   
 rR  c                       s$  e Zd ZU eed< def fddZe e		ddej	de
ej	 de
ej	 dejfd	d
Ze e			dde
ej de
ej	 de
ej dejfddZee									dde
ej de
ej de
ej	 de
ej de
ej	 de
ej de
e de
e de
e defddZ  ZS )r  r@   c                    s   t  | t|jtstdt|j dt|jts(tdt|j d|j}|j}t	
|}t
|}|j| _|j| _ttd| _ttd| _|   d S )NzNconfig.text_config is expected to be of type Siglip2TextConfig but is of type .zRconfig.vision_config is expected to be of type Siglip2VisionConfig but is of type r   )rC   rD   r  text_configr   	TypeErrorre   r	  r   r;  _from_configrR  r=  rS  rH   rI  r'   rJ  r  r  r>  )r<   r@   rX  r	  r=  rS  rR   r+   r,   rD   Y  s,   

zSiglip2Model.__init__Nr0  r   r(  r6   c                 C      | j |||d}|j}|S )aM  
        Returns:
            text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
            applying the projection layer to the pooled output of [`Siglip2TextModel`].

        Examples:

        ```python
        >>> from transformers import AutoTokenizer, AutoModel
        >>> import torch

        >>> model = AutoModel.from_pretrained("google/siglip2-base-patch16-224")
        >>> tokenizer = AutoTokenizer.from_pretrained("google/siglip2-base-patch16-224")

        >>> # important: make sure to set padding="max_length" as that's how the model was trained
        >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding="max_length", return_tensors="pt")
        >>> with torch.no_grad():
        ...     text_features = model.get_text_features(**inputs)
        ```rF  )r=  r   )r<   r0  r   r(  text_outputsr:  r+   r+   r,   get_text_featuresy  s   zSiglip2Model.get_text_featuresru   rT  rU   c                 C   r[  )a  
        pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
            Mask to avoid performing attention on padding pixel indices.
        spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
            Tensor containing the spatial dimensions (height, width) of the input images.

        Returns:
            image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
            applying the projection layer to the pooled output of [`Siglip2VisionModel`].

        Examples:

        ```python
        >>> import torch
        >>> from transformers import AutoProcessor, AutoModel
        >>> from transformers.image_utils import load_image

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = load_image(url)

        >>> model = AutoModel.from_pretrained("google/siglip2-base-patch16-224")
        >>> processor = AutoProcessor.from_pretrained("google/siglip2-base-patch16-224")

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> with torch.no_grad():
        ...     image_features = model.get_image_features(**inputs)
        ```
        )ru   r   rU   )rS  r   )r<   ru   rT  rU   vision_outputsr:  r+   r+   r,   get_image_features  s   %zSiglip2Model.get_image_featuresreturn_lossr   r   c
              	   C   sD  |dur|n| j j}|	dur|	n| j j}	| j|||||	d}
| j|||||	d}|
j}|j}||jdddd }||jdddd }t||	 
|j}| j
|j| j
|j}}||  | }|	 }d}|rtj|d|jd	}t| d|  }tjj|| }tj|dd
 }| }t|||||||
dS )a  
        pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
            Mask to avoid performing attention on padding pixel indices.
        spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
            Tensor containing the spatial dimensions (height, width) of the input images.
        return_loss (`bool`, *optional*):
            Whether or not to return the contrastive loss.

        Examples:

        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, AutoModel
        >>> import torch

        >>> model = AutoModel.from_pretrained("google/siglip2-base-patch16-224")
        >>> processor = AutoProcessor.from_pretrained("google/siglip2-base-patch16-224")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> texts = ["a photo of 2 cats", "a photo of 2 dogs"]
        >>> # important: we pass `padding=max_length` since the model was trained with this
        >>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt")

        >>> with torch.no_grad():
        ...     outputs = model(**inputs)

        >>> logits_per_image = outputs.logits_per_image
        >>> probs = torch.sigmoid(logits_per_image) # these are the probabilities
        >>> print(f"{probs[0][0]:.1%} that image 0 is '{texts[0]}'")
        31.9% that image 0 is 'a photo of 2 cats'
        ```
        NrU  )r0  r   r(  r   r   rZ   rW   T)r   r   keepdimr   )rX   r   )r1   r2   r3   r/   r   r4   r5   )r@   r   r   rS  r=  r   normr'   r   trf   rX   r  r  expeyer]   	ones_likerH   r   
logsigmoidsumr   r0   )r<   r0  ru   rT  rU   r   r(  r`  r   r   r^  r\  r   r/   r3   r  r  r2   r1   rf  m1_diag1logliknllr+   r+   r,   r{     sR   2zSiglip2Model.forwardr   r3  )	NNNNNNNNN)r#   r$   r%   r   r)   rD   r   r   r'   r}   r   r(   r]  r~   r_  r   r   r0   r{   r   r+   r+   rR   r,   r  U  s|   
  "-	
r  z
    Siglip2 vision encoder with an image classification head on top (a linear layer on top of the pooled final hidden states of
    the patch tokens) e.g. for ImageNet.
    c                       s   e Zd ZdZdeddf fddZe e						ddee	j
 dee	j
 dee	j d	ee	j
 d
ee dee defddZ  ZS )r  ru   r@   r6   Nc                    sZ   t  | |j| _t|j}|j| _|jdkr"t|jj	|jnt
 | _|   d S )Nr   )rC   rD   
num_labelsrR  rZ  r	  rS  rH   rI   rE   Identityr  r>  )r<   r@   rS  rR   r+   r,   rD   ?  s   "z&Siglip2ForImageClassification.__init__rT  rU   labelsr   r   c                 C   s   |dur|n| j j}|dur|n| j j}| j|||||d}|j}|dur>|d |j}	tj||	 ddtj|	dd }ntj	|dd}| 
|}
d}|durX| ||
| j }t||
|j|jdS )a  
        pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
            Mask to avoid performing attention on padding pixel indices.
        spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
            Tensor containing the spatial dimensions (height, width) of the input images.
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Examples:

        ```python
        >>> from transformers import AutoImageProcessor, Siglip2ForImageClassification
        >>> import torch
        >>> from PIL import Image
        >>> import requests

        >>> torch.manual_seed(3)  # doctest: +IGNORE_RESULT
        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> # note: we are loading a `Siglip2Model` from the hub here,
        >>> # so the head will be randomly initialized, hence the predictions will be random if seed is not set above.
        >>> image_processor = AutoImageProcessor.from_pretrained("google/siglip2-base-patch16-224")
        >>> model = Siglip2ForImageClassification.from_pretrained("google/siglip2-base-patch16-224")

        >>> inputs = image_processor(images=image, return_tensors="pt")
        >>> outputs = model(**inputs)
        >>> logits = outputs.logits
        >>> # model predicts one of the two classes
        >>> predicted_class_idx = logits.argmax(-1).item()
        >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
        Predicted class: LABEL_1
        ```
        N)r   rU   r   r   ).Nr   rb  )r1   logitsr!   r"   )r@   r   r   rS  r    rf   rX   r'   ri  r   r  loss_functionr   r!   r"   )r<   ru   rT  rU   ro  r   r   outputssequence_output	pool_maskrp  r1   r+   r+   r,   r{   Q  s2   /"
z%Siglip2ForImageClassification.forward)NNNNNN)r#   r$   r%   rV  r   rD   r   r   r   r'   r}   r~   r   r   r{   r   r+   r+   rR   r,   r  6  s4    r  )r  r  r;  rR  r  )r   )r   r   r   r   )r   r   r   )Ir   r   dataclassesr   typingr   r   r   r   numpyr  r'   torch.nnrH   torch.nn.functionalr   ri   torch.nn.initr   activationsr	   modeling_attn_mask_utilsr
   modeling_layersr   modeling_outputsr   r   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   r   utils.genericr   configuration_siglip2r   r   r   r   r.   r0   rG  r?   r}   floatr   r   r   r   r   r   r   r   r   r  r  r  r  r4  r;  r   rR  r  r  __all__r+   r+   r+   r,   <module>   s   #l
>"";%

D(81; ai