o
    	۷ip                     @   s(  d Z ddlZddlZddlmZmZmZ ddlZddlm	Z	 ddl
mZ ddlmZ ddlmZmZmZmZ dd	lmZmZ dd
lmZ ddlmZmZ ddlmZmZmZmZ ddl m!Z!m"Z" ddl#m$Z$ e%e&Z'G dd de	j(Z)G dd de	j(Z*	d:de	j(dej+dej+dej+deej+ de,de,fddZ-G dd de	j(Z.G d d! d!e	j(Z/G d"d# d#e	j(Z0G d$d% d%e	j(Z1G d&d' d'e	j(Z2G d(d) d)eZ3G d*d+ d+e	j(Z4eG d,d- d-eZ5eG d.d/ d/e5Z6G d0d1 d1e	j(Z7ed2d3G d4d5 d5e5Z8ed6d3G d7d8 d8e5Z9g d9Z:dS );zPyTorch ViT model.    N)CallableOptionalUnion)nn   )ACT2FN)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutputMaskedImageModelingOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack) find_pruneable_heads_and_indicesprune_linear_layer)TransformersKwargsauto_docstringlogging	torch_int)can_return_tuplecheck_model_inputs   )	ViTConfigc                	       st   e Zd ZdZddedef fddZdejde	d	e	d
ejfddZ
		ddejdeej ded
ejfddZ  ZS )ViTEmbeddingszb
    Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
    Fconfiguse_mask_tokenc                    s   t    ttdd|j| _|rttdd|jnd | _	t
|| _| jj}ttd|d |j| _t|j| _|j| _|| _d S )Nr   )super__init__r   	Parametertorchrandnhidden_size	cls_tokenzeros
mask_tokenViTPatchEmbeddingspatch_embeddingsnum_patchesposition_embeddingsDropouthidden_dropout_probdropout
patch_sizer   )selfr   r   r(   	__class__ Z/home/ubuntu/vllm_env/lib/python3.10/site-packages/transformers/models/vit/modeling_vit.pyr   0   s   
 

zViTEmbeddings.__init__
embeddingsheightwidthreturnc                 C   s   |j d d }| jj d d }tj s||kr||kr| jS | jddddf }| jddddf }|j d }|| j }	|| j }
t|d }|d|||}|dddd}t	j
j||	|
fdd	d
}|dddddd|}tj||fddS )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   N      ?r   r      bicubicF)sizemodealign_cornersdim)shaper)   r    jit
is_tracingr-   r   reshapepermuter   
functionalinterpolateviewcat)r.   r3   r4   r5   r(   num_positionsclass_pos_embedpatch_pos_embedr?   
new_height	new_widthsqrt_num_positionsr1   r1   r2   interpolate_pos_encoding<   s(   



z&ViTEmbeddings.interpolate_pos_encodingNpixel_valuesbool_masked_posrO   c                 C   s   |j \}}}}| j||d}|d ur1|j d }	| j||	d}
|d|
}|d|  |
|  }| j|dd}tj||fdd}|rN|| 	||| }n|| j
 }| |}|S )N)rO   r   r7         ?r>   )r@   r'   r%   expand	unsqueezetype_asr#   r    rH   rO   r)   r,   )r.   rP   rQ   rO   
batch_sizenum_channelsr4   r5   r3   
seq_lengthmask_tokensmask
cls_tokensr1   r1   r2   forwardd   s   


zViTEmbeddings.forwardF)NF)__name__
__module____qualname____doc__r   boolr   r    TensorintrO   r   
BoolTensorr\   __classcell__r1   r1   r/   r2   r   +   s    +r   c                       sB   e Zd ZdZdef fddZddejdedejfd	d
Z	  Z
S )r&   z
    This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
    `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
    Transformer.
    r   c                    s   t    |j|j}}|j|j}}t|tjj	r|n||f}t|tjj	r)|n||f}|d |d  |d |d   }|| _|| _|| _|| _
tj||||d| _d S )Nr   r   )kernel_sizestride)r   r   
image_sizer-   rW   r"   
isinstancecollectionsabcIterabler(   r   Conv2d
projection)r.   r   ri   r-   rW   r"   r(   r/   r1   r2   r      s   
 zViTPatchEmbeddings.__init__FrP   rO   r6   c              
   C   s   |j \}}}}|| jkrtd| j d| d|s?|| jd ks(|| jd kr?td| d| d| jd  d| jd  d		| |d
dd
}|S )NzoMake sure that the channel dimension of the pixel values match with the one set in the configuration. Expected z	 but got .r   r   zInput image size (*z) doesn't match model (z).r9   )r@   rW   
ValueErrorri   ro   flatten	transpose)r.   rP   rO   rV   rW   r4   r5   r3   r1   r1   r2   r\      s(   
zViTPatchEmbeddings.forwardr]   )r^   r_   r`   ra   r   r   r    rc   rb   r\   rf   r1   r1   r/   r2   r&      s    $r&           modulequerykeyvalueattention_maskscalingr,   c           
      K   s|   t ||dd| }tjj|dt jd|j}tjj	||| j
d}|d ur,|| }t ||}	|	dd }	|	|fS )Nr7   )r?   dtype)ptrainingr   r9   )r    matmulrt   r   rE   softmaxfloat32tor}   r,   r   
contiguous)
rv   rw   rx   ry   rz   r{   r,   kwargsattn_weightsattn_outputr1   r1   r2   eager_attention_forward   s   r   c                	       sP   e Zd Zdef fddZ	d
dejdeej deejejf fdd	Z	  Z
S )ViTSelfAttentionr   c                    s   t    |j|j dkrt|dstd|j d|j d|| _|j| _t|j|j | _| j| j | _	|j
| _| jd | _d| _tj|j| j	|jd| _tj|j| j	|jd| _tj|j| j	|jd| _d S )	Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads rp   g      F)bias)r   r   r"   num_attention_headshasattrrr   r   rd   attention_head_sizeall_head_sizeattention_probs_dropout_probdropout_probr{   	is_causalr   Linearqkv_biasrw   rx   ry   r.   r   r/   r1   r2   r      s"   

zViTSelfAttention.__init__Nhidden_states	head_maskr6   c              
   C   s   |j d }|d| j| jf}| |j| dd}| |j| dd}| |j| dd}t}| j	j
dkr?t| j	j
 }|| ||||| j| j| jsNdn| jd\}	}
|	 d d | jf }|	|}	|	|
fS )	Nr   r7   r   r9   eagerru   )r   r{   r,   r|   )r@   r   r   rx   rG   rt   ry   rw   r   r   _attn_implementationr   r   r{   r   r   r;   r   rC   )r.   r   r   rV   	new_shape	key_layervalue_layerquery_layerattention_interfacecontext_layerattention_probsnew_context_layer_shaper1   r1   r2   r\      s*   


zViTSelfAttention.forwardN)r^   r_   r`   r   r   r    rc   r   tupler\   rf   r1   r1   r/   r2   r      s    r   c                       sB   e Zd ZdZdef fddZdejdejdejfdd	Z  Z	S )
ViTSelfOutputz
    The residual connection is defined in ViTLayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    r   c                    s.   t    t|j|j| _t|j| _d S r   )	r   r   r   r   r"   denser*   r+   r,   r   r/   r1   r2   r        
zViTSelfOutput.__init__r   input_tensorr6   c                 C      |  |}| |}|S r   r   r,   r.   r   r   r1   r1   r2   r\        

zViTSelfOutput.forward)
r^   r_   r`   ra   r   r   r    rc   r\   rf   r1   r1   r/   r2   r      s    $r   c                       sV   e Zd Zdef fddZdee fddZddej	d	e
ej	 d
ej	fddZ  ZS )ViTAttentionr   c                    s*   t    t|| _t|| _t | _d S r   )r   r   r   	attentionr   outputsetpruned_headsr   r/   r1   r2   r     s   


zViTAttention.__init__headsc                 C   s   t |dkrd S t|| jj| jj| j\}}t| jj|| j_t| jj|| j_t| jj	|| j_	t| j
j|dd| j
_| jjt | | j_| jj| jj | j_| j|| _d S )Nr   r   r>   )lenr   r   r   r   r   r   rw   rx   ry   r   r   r   union)r.   r   indexr1   r1   r2   prune_heads  s   zViTAttention.prune_headsNr   r   r6   c                 C   s    |  ||\}}| ||}|S r   )r   r   )r.   r   r   self_attn_output_r   r1   r1   r2   r\   &  s   zViTAttention.forwardr   )r^   r_   r`   r   r   r   rd   r   r    rc   r   r\   rf   r1   r1   r/   r2   r     s    *r   c                       8   e Zd Zdef fddZdejdejfddZ  ZS )ViTIntermediater   c                    sD   t    t|j|j| _t|jt	rt
|j | _d S |j| _d S r   )r   r   r   r   r"   intermediate_sizer   rj   
hidden_actstrr   intermediate_act_fnr   r/   r1   r2   r   -  s
   
zViTIntermediate.__init__r   r6   c                 C   r   r   )r   r   )r.   r   r1   r1   r2   r\   5  r   zViTIntermediate.forward	r^   r_   r`   r   r   r    rc   r\   rf   r1   r1   r/   r2   r   ,  s    r   c                       s>   e Zd Zdef fddZdejdejdejfddZ  ZS )		ViTOutputr   c                    s.   t    t|j|j| _t|j| _	d S r   )
r   r   r   r   r   r"   r   r*   r+   r,   r   r/   r1   r2   r   <  r   zViTOutput.__init__r   r   r6   c                 C   s    |  |}| |}|| }|S r   r   r   r1   r1   r2   r\   A  s   

zViTOutput.forwardr   r1   r1   r/   r2   r   ;  s    $r   c                       sH   e Zd ZdZdef fddZddejdeej dejfd	d
Z	  Z
S )ViTLayerz?This corresponds to the Block class in the timm implementation.r   c                    sb   t    |j| _d| _t|| _t|| _t|| _	t
j|j|jd| _t
j|j|jd| _d S )Nr   eps)r   r   chunk_size_feed_forwardseq_len_dimr   r   r   intermediater   r   r   	LayerNormr"   layer_norm_epslayernorm_beforelayernorm_afterr   r/   r1   r2   r   K  s   



zViTLayer.__init__Nr   r   r6   c                 C   sB   |  |}| ||}|| }| |}| |}| ||}|S r   )r   r   r   r   r   )r.   r   r   hidden_states_normattention_outputlayer_outputr1   r1   r2   r\   U  s   


zViTLayer.forwardr   )r^   r_   r`   ra   r   r   r    rc   r   r\   rf   r1   r1   r/   r2   r   H  s    *
r   c                       sB   e Zd Zdef fddZd
dejdeej defdd	Z	  Z
S )
ViTEncoderr   c                    s:   t     | _t fddt jD | _d| _d S )Nc                    s   g | ]}t  qS r1   )r   ).0r   r   r1   r2   
<listcomp>j  s    z'ViTEncoder.__init__.<locals>.<listcomp>F)	r   r   r   r   
ModuleListrangenum_hidden_layerslayergradient_checkpointingr   r/   r   r2   r   g  s   
 
zViTEncoder.__init__Nr   r   r6   c                 C   s<   t | jD ]\}}|d ur|| nd }|||}qt|dS )N)last_hidden_state)	enumerater   r	   )r.   r   r   ilayer_modulelayer_head_maskr1   r1   r2   r\   m  s   
zViTEncoder.forwardr   )r^   r_   r`   r   r   r    rc   r   r	   r\   rf   r1   r1   r/   r2   r   f  s    (r   c                   @   sb   e Zd ZU eed< dZdZdZddgZdZ	dZ
dZdZeedZdeejejejf fd	d
ZdS )ViTPreTrainedModelr   vitrP   Tr   r   )r   
attentionsrv   c                 C   s  t |tjtjfr0tjj|jjt	j
d| jjd|jj|j_|jdur.|jj  dS dS t |tjrE|jj  |jjd dS t |trtjj|jjt	j
d| jjd|jj|j_tjj|jjt	j
d| jjd|jj|j_|jdur|jj  dS dS dS )zInitialize the weightsru   )meanstdNrR   )rj   r   r   rn   inittrunc_normal_weightdatar   r    r   r   initializer_ranger}   r   zero_r   fill_r   r)   r#   r%   )r.   rv   r1   r1   r2   _init_weights  s>   





z ViTPreTrainedModel._init_weightsN)r^   r_   r`   r   __annotations__base_model_prefixmain_input_namesupports_gradient_checkpointing_no_split_modules_supports_sdpa_supports_flash_attn_supports_flex_attn_supports_attention_backendr   r   _can_record_outputsr   r   r   rn   r   r   r1   r1   r1   r2   r   u  s   
 "r   c                       s   e Zd Zddededef fddZdefd	d
Zdee	e
e	 f fddZedde				ddeej deej deej dee dee defddZ  ZS )ViTModelTFr   add_pooling_layerr   c                    s\   t  | || _t||d| _t|| _tj|j	|j
d| _|r%t|nd| _|   dS )z
        add_pooling_layer (bool, *optional*, defaults to `True`):
            Whether to add a pooling layer
        use_mask_token (`bool`, *optional*, defaults to `False`):
            Whether to use a mask token for masked image modeling.
        )r   r   N)r   r   r   r   r3   r   encoderr   r   r"   r   	layernorm	ViTPoolerpooler	post_init)r.   r   r   r   r/   r1   r2   r     s   
zViTModel.__init__r6   c                 C   s   | j jS r   )r3   r'   )r.   r1   r1   r2   get_input_embeddings  s   zViTModel.get_input_embeddingsheads_to_prunec                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr   r   r   r   )r.   r   r   r   r1   r1   r2   _prune_heads  s   zViTModel._prune_heads)tie_last_hidden_statesNrP   rQ   r   rO   r   c                 K   s   |du rt d| || jj}| jjjjj}|j|kr!|	|}| j|||d}| j
||d}|j}	| |	}	| jdurB| |	nd}
t|	|
dS )z
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
        Nz You have to specify pixel_values)rQ   rO   )r   )r   pooler_output)rr   get_head_maskr   r   r3   r'   ro   r   r}   r   r   r   r   r   r
   )r.   rP   rQ   r   rO   r   expected_dtypeembedding_outputencoder_outputssequence_outputpooled_outputr1   r1   r2   r\     s   


zViTModel.forward)TFNNNN)r^   r_   r`   r   rb   r   r&   r   dictrd   listr   r   r   r   r    rc   re   r   r   r
   r\   rf   r1   r1   r/   r2   r     s.    r   c                       r   )r   r   c                    s,   t    t|j|j| _t|j | _	d S r   )
r   r   r   r   r"   pooler_output_sizer   r   
pooler_act
activationr   r/   r1   r2   r     s   
zViTPooler.__init__r   r6   c                 C   s(   |d d df }|  |}| |}|S )Nr   )r   r
  )r.   r   first_token_tensorr  r1   r1   r2   r\     s   

zViTPooler.forwardr   r1   r1   r/   r2   r     s    r   ac  
    ViT Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://huggingface.co/papers/2111.09886).

    <Tip>

    Note that we provide a script to pre-train this model on custom data in our [examples
    directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).

    </Tip>
    )custom_introc                       sp   e Zd Zdef fddZee				ddeej	 deej
 deej	 dee d	ee d
efddZ  ZS )ViTForMaskedImageModelingr   c                    sX   t  | t|ddd| _ttj|j|jd |j	 ddt
|j| _|   d S )NFT)r   r   r9   r   )in_channelsout_channelsrg   )r   r   r   r   r   
Sequentialrn   r"   encoder_striderW   PixelShuffledecoderr   r   r/   r1   r2   r   
  s   

z"ViTForMaskedImageModeling.__init__NrP   rQ   r   rO   r   r6   c                 K   sB  |dur| j j| j jkrtd| j j d| j j d| j|f|||d|}|j}|ddddf }|j\}}	}
t|	d  }}|	dd	d
||
||}| |}d}|dur| j j| j j }|
d
||}|| j jd| j jd	d }tjj||dd}||  | d  | j j }t|||j|jdS )a+  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).

        Examples:
        ```python
        >>> from transformers import AutoImageProcessor, ViTForMaskedImageModeling
        >>> import torch
        >>> from PIL import Image
        >>> import requests

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
        >>> model = ViTForMaskedImageModeling.from_pretrained("google/vit-base-patch16-224-in21k")

        >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
        >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
        >>> # create random boolean mask of shape (batch_size, num_patches)
        >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()

        >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
        >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
        >>> list(reconstructed_pixel_values.shape)
        [1, 3, 224, 224]
        ```NzWhen `bool_masked_pos` is provided, `patch_size` must be equal to `encoder_stride` to ensure that the reconstructed image has the same dimensions as the input. Got `patch_size` = z and `encoder_stride` = rp   )rQ   r   rO   r   r8   r   r9   r7   none)	reductiongh㈵>)lossreconstructionr   r   )r   r-   r  rr   r   r   r@   mathfloorrD   rC   r  ri   repeat_interleaverT   r   r   rE   l1_losssumrW   r   r   r   )r.   rP   rQ   r   rO   r   outputsr  rV   sequence_lengthrW   r4   r5   reconstructed_pixel_valuesmasked_im_lossr;   rZ   reconstruction_lossr1   r1   r2   r\     sP   &
 z!ViTForMaskedImageModeling.forwardr  )r^   r_   r`   r   r   r   r   r   r    rc   re   rb   r   r   r   r\   rf   r1   r1   r/   r2   r    s*    r  a  
    ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
    the [CLS] token) e.g. for ImageNet.

    <Tip>

        Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by
        setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
        position embeddings to the higher resolution.

    </Tip>
    c                       sp   e Zd Zdef fddZee				ddeej	 deej	 deej	 dee
 d	ee d
efddZ  ZS )ViTForImageClassificationr   c                    sR   t  | |j| _t|dd| _|jdkrt|j|jnt | _	| 
  d S )NF)r   r   )r   r   
num_labelsr   r   r   r   r"   Identity
classifierr   r   r/   r1   r2   r     s
   $z"ViTForImageClassification.__init__NrP   r   labelsrO   r   r6   c                 K   sv   | j |f||d|}|j}|dddddf }| |}	d}
|dur1| j||	| jfi |}
t|
|	|j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        )r   rO   Nr   )r  logitsr   r   )r   r   r%  loss_functionr   r   r   r   )r.   rP   r   r&  rO   r   r  r  r  r'  r  r1   r1   r2   r\     s(   
z!ViTForImageClassification.forwardr  )r^   r_   r`   r   r   r   r   r   r    rc   rb   r   r   r   r\   rf   r1   r1   r/   r2   r"  p  s*    r"  )r"  r  r   r   )ru   );ra   collections.abcrk   r  typingr   r   r   r    r   activationsr   modeling_layersr   modeling_outputsr	   r
   r   r   modeling_utilsr   r   processing_utilsr   pytorch_utilsr   r   utilsr   r   r   r   utils.genericr   r   configuration_vitr   
get_loggerr^   loggerModuler   r&   rc   floatr   r   r   r   r   r   r   r   r   r   r   r  r"  __all__r1   r1   r1   r2   <module>   sn   
X.
4-Jg5