o
    wiҫ                     @   s  d Z ddlZddlmZ ddlmZ ddlmZm	Z	m
Z
 ddlZddlZddlZddlmZ ddlmZmZmZ dd	lmZ dd
lmZ ddlmZmZ ddlmZmZ ddlmZm Z  ddl!m"Z"m#Z#m$Z$ ddl%m&Z&m'Z' ddl(m)Z) e$*e+Z,ee#ddG dd de"Z-ee#ddG dd de"Z.dd Z/G dd dej0Z1G dd dej0Z2	dDd ej0d!ej3d"ej3d#ej3d$e	ej3 d%e4d&e4fd'd(Z5G d)d* d*ej0Z6G d+d, d,ej0Z7G d-d. d.ej0Z8G d/d0 d0ej0Z9G d1d2 d2ej0Z:G d3d4 d4eZ;G d5d6 d6ej0Z<e#G d7d8 d8eZ=e#G d9d: d:e=Z>G d;d< d<ej0Z?e#d=dG d>d? d?e=Z@e#d@dG dAdB dBe=ZAg dCZBdS )Ez,PyTorch VideoMAE (masked autoencoder) model.    N)deepcopy)	dataclass)CallableOptionalUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)GradientCheckpointingLayer)BaseModelOutputImageClassifierOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel) find_pruneable_heads_and_indicesprune_linear_layer)ModelOutputauto_docstringlogging)IMAGENET_DEFAULT_MEANIMAGENET_DEFAULT_STD   )VideoMAEConfigz[
    Class for VideoMAEDecoder's outputs, with potential hidden states and attentions.
    )custom_introc                   @   sP   e Zd ZU dZdZeej ed< dZ	ee
ej  ed< dZee
ej  ed< dS )VideoMAEDecoderOutputz
    logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
        Pixel reconstruction logits.
    Nlogitshidden_states
attentions)__name__
__module____qualname____doc__r   r   torchFloatTensor__annotations__r   tupler    r(   r(   k/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/transformers/models/videomae/modeling_videomae.pyr   -   s
   
 r   zb
    Class for VideoMAEForPreTraining's outputs, with potential hidden states and attentions.
    c                   @   sb   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eeej  ed< dZeeej  ed< dS )VideoMAEForPreTrainingOutputz
    loss (`torch.FloatTensor` of shape `(1,)`):
        Pixel reconstruction loss.
    logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
        Pixel reconstruction logits.
    Nlossr   r   r   )r    r!   r"   r#   r+   r   r$   r%   r&   r   r   r'   r   r(   r(   r(   r)   r*   >   s   
 r*   c                    s    fddt fddt| D }t |dddddf |dddddf< t |dddddf |dddddf< t|dS )	z Sinusoid position encoding tablec                    s    fddt D S )Nc              	      s(   g | ]}t d d|d     qS )i'     )nppower).0hid_j)d_hidpositionr(   r)   
<listcomp>Y   s   ( zOget_sinusoid_encoding_table.<locals>.get_position_angle_vec.<locals>.<listcomp>)ranger2   )r1   r5   r)   get_position_angle_vecX   s   z;get_sinusoid_encoding_table.<locals>.get_position_angle_vecc                    s   g | ]} |qS r(   r(   )r/   pos_i)r6   r(   r)   r3   [       z/get_sinusoid_encoding_table.<locals>.<listcomp>Nr   r,   r   )r-   arrayr4   sincosr$   r%   	unsqueeze)
n_positionr1   sinusoid_tabler(   )r1   r6   r)   get_sinusoid_encoding_tableT   s
   ..r?   c                       (   e Zd ZdZ fddZdd Z  ZS )VideoMAEEmbeddingsz7
    Construct the patch and position embeddings.

    c                    s8   t    t|| _| jj| _t| j|j| _|| _d S N)	super__init__VideoMAEPatchEmbeddingspatch_embeddingsnum_patchesr?   hidden_sizeposition_embeddingsconfigselfrJ   	__class__r(   r)   rD   h   s
   



zVideoMAEEmbeddings.__init__c                 C   sZ   |  |}|| j |j|jdd }|d ur+|j\}}}||  }||d|}|S )NTdevicecopy)rF   rI   detachtype_astorP   shapereshape)rL   pixel_valuesbool_masked_pos
embeddings
batch_size_num_channelsr(   r(   r)   forwardq   s   

zVideoMAEEmbeddings.forwardr    r!   r"   r#   rD   r^   __classcell__r(   r(   rM   r)   rA   b   s    	rA   c                       r@   )rE   aw  
    Video to Patch Embedding. This module turns a batch of videos of shape (batch_size, num_frames, num_channels,
    height, width) into a tensor of shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.

    The seq_len (the number of patches) equals (number of frames // tubelet_size) * (height // patch_size) * (width //
    patch_size).

    c           	         s   t    |j}|j}|j}|j}|j}|j}t|t	j
jr |n||f}t|t	j
jr-|n||f}|| _|| _t|| _|d |d  |d |d   || j  }|| _|| _tj||| j|d |d f| j|d |d fd| _d S )Nr   r   )in_channelsout_channelskernel_sizestride)rC   rD   
image_size
patch_sizer]   rH   
num_framestubelet_size
isinstancecollectionsabcIterableintrG   r   Conv3d
projection)	rL   rJ   re   rf   r]   rH   rg   rh   rG   rM   r(   r)   rD      s,   

(z VideoMAEPatchEmbeddings.__init__c              
   C   s   |j \}}}}}|| jkrtd|| jd ks|| jd kr6td| d| d| jd  d| jd  d	|dddd	d
}| |ddd}|S )NzeMake sure that the channel dimension of the pixel values match with the one set in the configuration.r   r   zInput image size (*z) doesn't match model (z).r,   r      )rV   r]   
ValueErrorre   permutero   flatten	transpose)rL   rX   r[   rg   r]   heightwidthrZ   r(   r(   r)   r^      s   
(zVideoMAEPatchEmbeddings.forwardr_   r(   r(   rM   r)   rE      s    	rE           modulequerykeyvalueattention_maskscalingdropoutc           
      K   s|   t ||dd| }tjj|dt jd|j}tjj	||| j
d}|d ur,|| }t ||}	|	dd }	|	|fS )NrR   )dimdtype)ptrainingr   r,   )r$   matmulru   r   
functionalsoftmaxfloat32rU   r   r   r   
contiguous)
ry   rz   r{   r|   r}   r~   r   kwargsattn_weightsattn_outputr(   r(   r)   eager_attention_forward   s   r   c                
       sv   e Zd Zdeddf fddZdejdejfddZ		dd
eej de	de
eejejf eej f fddZ  ZS )VideoMAESelfAttentionrJ   returnNc                    s
  t    |j|j dkrt|dstd|j d|j d|| _|j| _t|j|j | _| j| j | _	|j
| _| jd | _d| _tj|j| j	dd| _tj|j| j	dd| _tj|j| j	dd| _|jr}tt| j	| _tt| j	| _d S d | _d | _d S )	Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads .g      Fbias)rC   rD   rH   num_attention_headshasattrrr   rJ   rm   attention_head_sizeall_head_sizeattention_probs_dropout_probdropout_probr~   	is_causalr   Linearrz   r{   r|   qkv_bias	Parameterr$   zerosq_biasv_biasrK   rM   r(   r)   rD      s,   


zVideoMAESelfAttention.__init__xc                 C   s6   |  d d | j| jf }||}|ddddS )NrR   r   r,   r   r   )sizer   r   viewrs   )rL   r   new_x_shaper(   r(   r)   transpose_for_scores   s   
z*VideoMAESelfAttention.transpose_for_scoresF	head_maskoutput_attentionsc              
   C   s"  | j d urtj| jddnd }tjj|| jj|d}tjj|| j	j| jd}tjj|| j
j| j d}| |}| |}	| |}
t}| jjdkr]| jjdkrW|rWtd nt| jj }|| |
||	|| j| j| jsldn| jd\}}| d d	 | jf }||}|r||f}|S |f}|S )
NF)requires_grad)inputweightr   eagersdpaz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.rx   )r   r~   r   r   )r   r$   
zeros_liker   r   r   linearr{   r   r|   rz   r   r   rJ   _attn_implementationloggerwarning_oncer   r   r~   r   r   r   r   rW   )rL   r   r   r   k_biaskeysvaluesqueries	key_layervalue_layerquery_layerattention_interfacecontext_layerattention_probsnew_context_layer_shapeoutputsr(   r(   r)   r^      s<   




zVideoMAESelfAttention.forwardNF)r    r!   r"   r   rD   r$   Tensorr   r   boolr   r'   r^   r`   r(   r(   rM   r)   r      s    r   c                       sF   e Zd ZdZdeddf fddZdejdejdejfd	d
Z  Z	S )VideoMAESelfOutputz
    The residual connection is defined in VideoMAELayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    rJ   r   Nc                    s.   t    t|j|j| _t|j| _d S rB   )	rC   rD   r   r   rH   denseDropouthidden_dropout_probr   rK   rM   r(   r)   rD   '     
zVideoMAESelfOutput.__init__r   input_tensorc                 C      |  |}| |}|S rB   r   r   rL   r   r   r(   r(   r)   r^   ,     

zVideoMAESelfOutput.forward)
r    r!   r"   r#   r   rD   r$   r   r^   r`   r(   r(   rM   r)   r   !  s    $r   c                       s~   e Zd Zdeddf fddZdee ddfddZ			dd
ej	de
ej	 dedeeej	ej	f eej	 f fddZ  ZS )VideoMAEAttentionrJ   r   Nc                    s*   t    t|| _t|| _t | _d S rB   )rC   rD   r   	attentionr   outputsetpruned_headsrK   rM   r(   r)   rD   5  s   


zVideoMAEAttention.__init__headsc                 C   s   t |dkrd S t|| jj| jj| j\}}t| jj|| j_t| jj|| j_t| jj	|| j_	t| j
j|dd| j
_| jjt | | j_| jj| jj | j_| j|| _d S )Nr   r   r   )lenr   r   r   r   r   r   rz   r{   r|   r   r   r   union)rL   r   indexr(   r(   r)   prune_heads;  s   zVideoMAEAttention.prune_headsFr   r   r   c                 C   s4   |  |||}| |d |}|f|dd   }|S )Nr   r   )r   r   )rL   r   r   r   self_outputsattention_outputr   r(   r(   r)   r^   M  s   zVideoMAEAttention.forwardr   )r    r!   r"   r   rD   r   rm   r   r$   r   r   r   r   r'   r^   r`   r(   r(   rM   r)   r   4  s    r   c                       s<   e Zd Zdeddf fddZdejdejfddZ  ZS )	VideoMAEIntermediaterJ   r   Nc                    sD   t    t|j|j| _t|jt	rt
|j | _d S |j| _d S rB   )rC   rD   r   r   rH   intermediate_sizer   ri   
hidden_actstrr   intermediate_act_fnrK   rM   r(   r)   rD   ]  s
   
zVideoMAEIntermediate.__init__r   c                 C   r   rB   )r   r   )rL   r   r(   r(   r)   r^   e  r   zVideoMAEIntermediate.forward	r    r!   r"   r   rD   r$   r   r^   r`   r(   r(   rM   r)   r   \  s    r   c                       sB   e Zd Zdeddf fddZdejdejdejfdd	Z  ZS )
VideoMAEOutputrJ   r   Nc                    s.   t    t|j|j| _t|j| _	d S rB   )
rC   rD   r   r   r   rH   r   r   r   r   rK   rM   r(   r)   rD   n  r   zVideoMAEOutput.__init__r   r   c                 C   s    |  |}| |}|| }|S rB   r   r   r(   r(   r)   r^   s  s   

zVideoMAEOutput.forwardr   r(   r(   rM   r)   r   m  s    $r   c                       sl   e Zd ZdZdeddf fddZ		ddejd	eej d
e	de
eejejf eej f fddZ  ZS )VideoMAELayerz?This corresponds to the Block class in the timm implementation.rJ   r   Nc                    sb   t    |j| _d| _t|| _t|| _t|| _	t
j|j|jd| _t
j|j|jd| _d S )Nr   eps)rC   rD   chunk_size_feed_forwardseq_len_dimr   r   r   intermediater   r   r   	LayerNormrH   layer_norm_epslayernorm_beforelayernorm_afterrK   rM   r(   r)   rD     s   



zVideoMAELayer.__init__Fr   r   r   c                 C   s`   | j | |||d}|d }|dd  }|| }| |}| |}| ||}|f| }|S )N)r   r   r   )r   r   r   r   r   )rL   r   r   r   self_attention_outputsr   r   layer_outputr(   r(   r)   r^     s   


zVideoMAELayer.forwardr   )r    r!   r"   r#   r   rD   r$   r   r   r   r   r'   r^   r`   r(   r(   rM   r)   r   }  s    r   c                       sb   e Zd Zdeddf fddZ				ddejd	eej d
ededede	e
ef fddZ  ZS )VideoMAEEncoderrJ   r   Nc                    s:   t     | _t fddt jD | _d| _d S )Nc                       g | ]}t  qS r(   r   r/   r\   rJ   r(   r)   r3     r8   z,VideoMAEEncoder.__init__.<locals>.<listcomp>F)	rC   rD   rJ   r   
ModuleListr4   num_hidden_layerslayergradient_checkpointingrK   rM   r   r)   rD     s   
 
zVideoMAEEncoder.__init__FTr   r   r   output_hidden_statesreturn_dictc                 C   s   |rdnd }|r
dnd }t | jD ](\}}	|r||f }|d ur$|| nd }
|	||
|}|d }|r9||d f }q|rA||f }|sOtdd |||fD S t|||dS )Nr(   r   r   c                 s       | ]	}|d ur|V  qd S rB   r(   r/   vr(   r(   r)   	<genexpr>      z*VideoMAEEncoder.forward.<locals>.<genexpr>last_hidden_stater   r   )	enumerater   r'   r   )rL   r   r   r   r   r   all_hidden_statesall_self_attentionsilayer_modulelayer_head_masklayer_outputsr(   r(   r)   r^     s(   

zVideoMAEEncoder.forward)NFFT)r    r!   r"   r   rD   r$   r   r   r   r   r'   r   r^   r`   r(   r(   rM   r)   r     s&    	
r   c                   @   s4   e Zd ZeZdZdZdZdZdZ	dZ
dZdd ZdS )VideoMAEPreTrainedModelvideomaerX   Tc                 C   st   t |tjtjfr#|jjjd| jjd |j	dur!|j	j
  dS dS t |tjr8|j	j
  |jjd dS dS )zInitialize the weightsrx   )meanstdNg      ?)ri   r   r   rn   r   datanormal_rJ   initializer_ranger   zero_r   fill_)rL   ry   r(   r(   r)   _init_weights  s   
z%VideoMAEPreTrainedModel._init_weightsN)r    r!   r"   r   config_classbase_model_prefixmain_input_namesupports_gradient_checkpointing_supports_sdpa_supports_flash_attn_2_supports_flex_attn_supports_attention_backendr  r(   r(   r(   r)   r    s    r  c                       s   e Zd Z fddZdd Zdd Ze					ddejd	e	ej
 d
e	ej de	e de	e de	e deeef fddZ  ZS )VideoMAEModelc                    sT   t  | || _t|| _t|| _|jrd | _n
t	j
|j|jd| _|   d S )Nr   )rC   rD   rJ   rA   rZ   r   encoderuse_mean_pooling	layernormr   r   rH   r   	post_initrK   rM   r(   r)   rD     s   

zVideoMAEModel.__init__c                 C   s   | j jS rB   )rZ   rF   )rL   r(   r(   r)   get_input_embeddings  s   z"VideoMAEModel.get_input_embeddingsc                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr  r   r   r   )rL   heads_to_pruner   r   r(   r(   r)   _prune_heads  s   zVideoMAEModel._prune_headsNrX   rY   r   r   r   r   r   c           
      C   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}| || j j}| ||}| j|||||d}|d }	| jdurD| |	}	|sO|	f|dd  S t	|	|j
|jdS )a  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the
            batch must have the same number of masked patches. If `None`, then all patches are considered. Sequence
            length is `(num_frames // tubelet_size) * (image_size // patch_size) ** 2`.

        Examples:

        ```python
        >>> import av
        >>> import numpy as np

        >>> from transformers import AutoImageProcessor, VideoMAEModel
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`list[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`list[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 16 frames
        >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container, indices)

        >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
        >>> model = VideoMAEModel.from_pretrained("MCG-NJU/videomae-base")

        >>> # prepare video for the model
        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> # forward pass
        >>> outputs = model(**inputs)
        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 1568, 768]
        ```Nr   r   r   r   r   r   r   )rJ   r   r   use_return_dictget_head_maskr   rZ   r  r  r   r   r   )
rL   rX   rY   r   r   r   r   embedding_outputencoder_outputssequence_outputr(   r(   r)   r^     s.   Y

zVideoMAEModel.forward)NNNNN)r    r!   r"   rD   r  r  r   r$   r%   r   
BoolTensorr   r   r   r'   r   r^   r`   r(   r(   rM   r)   r    s2    
r  c                       s,   e Zd Z fddZ			dddZ  ZS )VideoMAEDecoderc                    s   t    |j|j |jd  }t| |j _|j _	|j
 _|j _t fddt|jD | _t|j| _|dkrFt|j|nt | _d| _|| _d S )Nr,   c                    r   r(   r   r   decoder_configr(   r)   r3     r8   z,VideoMAEDecoder.__init__.<locals>.<listcomp>r   F)rC   rD   r]   rh   rf   r   decoder_hidden_sizerH   decoder_num_hidden_layersr   decoder_num_attention_headsr   decoder_intermediate_sizer   r   r   r4   decoder_layersr   normr   Identityheadr   rJ   )rL   rJ   rG   decoder_num_labelsrM   r'  r)   rD     s   

zVideoMAEDecoder.__init__FTc                 C   s   |rdnd }|r
dnd }t | jD ]\}}	|r||f }|	|d |d}
|
d }|r0||
d f }q|r8||f }|dkrG|d d | d f }| |}| |}|s_tdd |||fD S t|||dS )Nr(   )r   r   r   r   c                 s   r   rB   r(   r   r(   r(   r)   r     r   z*VideoMAEDecoder.forward.<locals>.<genexpr>)r   r   r   )r   r-  r.  r0  r'   r   )rL   r   return_token_numr   r   r   r   r   r   r  r  r   r(   r(   r)   r^     s&   	



zVideoMAEDecoder.forward)FFT)r    r!   r"   rD   r^   r`   r(   r(   rM   r)   r&    s    r&  zb
    The VideoMAE Model transformer with the decoder on top for self-supervised pre-training.
    c                       sn   e Zd Z fddZe				ddejdejdeej	 dee
 dee
 d	ee
 d
eeef fddZ  ZS )VideoMAEForPreTrainingc                    s~   t  | || _t|| _tj|j|jdd| _	t
tdd|j| _t| jjj|j| _t|| jjjd| _|   d S )NFr   r   )rG   )rC   rD   rJ   r  r  r   r   rH   r)  encoder_to_decoderr   r$   r   
mask_tokenr?   rZ   rG   rI   r&  decoderr  rK   rM   r(   r)   rD     s   
zVideoMAEForPreTraining.__init__NrX   rY   r   r   r   r   r   c           #      C   sj  |dur|n| j j}| j||||||d}|d }| |}|j\}	}
}|du r,td| j|	dd|}|	 j
|jdd}||  |	d|}|| |	d|}tj|| | j| gdd	}| ||jd }|j}d}t  | j jd
kr|}n2|j}|j}ttj
||dddddddf }ttj
||dddddddf }|| | }|j\}	}}}}| j j| j j}}| j jr*||	|| |||| ||| |}|dddddddd
 }||	|| | | | | || | |}||jddd |jdddd  d  }||	|| | | | | || | | }nB| j jd
kr5td||	|| |||| ||| |}|dddddddd
 }||	|| | | | | || | | }|j\}	}}|| |	d|} W d   n	1 sw   Y  t! }!|!|| }|s|f|dd  }"|dur|f|" S |"S t"|||j#|j$dS )a  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the
            batch must have the same number of masked patches. Sequence length is `(num_frames // tubelet_size) *
            (image_size // patch_size) ** 2`.

        Examples:
        ```python
        >>> from transformers import AutoImageProcessor, VideoMAEForPreTraining
        >>> import numpy as np
        >>> import torch

        >>> num_frames = 16
        >>> video = list(np.random.randint(0, 256, (num_frames, 3, 224, 224)))

        >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
        >>> model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base")

        >>> pixel_values = image_processor(video, return_tensors="pt").pixel_values

        >>> num_patches_per_frame = (model.config.image_size // model.config.patch_size) ** 2
        >>> seq_length = (num_frames // model.config.tubelet_size) * num_patches_per_frame
        >>> bool_masked_pos = torch.randint(0, 2, (1, seq_length)).bool()

        >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
        >>> loss = outputs.loss
        ```N)rY   r   r   r   r   r   z!One must provided a boolean mask rR   TrO   r   r   r   )rP   r   rq      r,         r   )r   keepdim)r   unbiasedr:  gư>zQCan't unnormalize non-RGB images. Consider setting config.norm_pix_loss to False.r+   r   r   r   )%rJ   r   r  r4  rV   rr   rI   expandrT   rS   rU   rP   rW   r$   catr5  r6  r   no_gradr]   r   	as_tensorr   r   rh   rf   norm_pix_lossr   rs   r   r  varsqrtr
   r*   r   r   )#rL   rX   rY   r   r   r   r   r   r$  r[   seq_lenr]   expanded_position_embeddingspos_emb_visiblepos_emb_maskx_fulldecoder_outputsr   r+   framesrP   r   r  r  timerv   rw   rh   rf   frames_normvideos_patchr\   labelsloss_fctr   r(   r(   r)   r^     s   %	&&

J
zVideoMAEForPreTraining.forward)NNNN)r    r!   r"   rD   r   r$   r%   r%  r   r   r   r   r'   r*   r^   r`   r(   r(   rM   r)   r3    s,    
r3  z
    VideoMAE Model transformer with a video classification head on top (a linear layer on top of the average pooled hidden
    states of all tokens) e.g. for ImageNet.
    c                       sz   e Zd Z fddZe						ddeej deej deej dee dee d	ee d
e	e
ef fddZ  ZS )VideoMAEForVideoClassificationc                    sf   t  | |j| _t|| _|jrt|jnd | _	|jdkr(t
|j|jnt | _|   d S )Nr   )rC   rD   
num_labelsr  r  r  r   r   rH   fc_normr   r/  
classifierr  rK   rM   r(   r)   rD   }  s   
$z'VideoMAEForVideoClassification.__init__NrX   r   rN  r   r   r   r   c                 C   s  |dur|n| j j}| j|||||d}|d }| jdur&| |d}n|dddf }| |}	d}
|dur| j jdu rc| jdkrId| j _n| jdkr_|jt	j
ksZ|jt	jkr_d| j _nd| j _| j jdkrt }| jdkr{||	 | }
n+||	|}
n%| j jdkrt }||	d| j|d}
n| j jdkrt }||	|}
|s|	f|dd  }|
dur|
f| S |S t|
|	|j|jd	S )
a!  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Examples:

        ```python
        >>> import av
        >>> import torch
        >>> import numpy as np

        >>> from transformers import AutoImageProcessor, VideoMAEForVideoClassification
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`list[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`list[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 16 frames
        >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container, indices)

        >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")
        >>> model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")

        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> with torch.no_grad():
        ...     outputs = model(**inputs)
        ...     logits = outputs.logits

        >>> # model predicts one of the 400 Kinetics-400 classes
        >>> predicted_label = logits.argmax(-1).item()
        >>> print(model.config.id2label[predicted_label])
        eating spaghetti
        ```Nr  r   r   
regressionsingle_label_classificationmulti_label_classificationrR   r<  )rJ   r   r  rR  r  rS  problem_typerQ  r   r$   longrm   r
   squeezer	   r   r   r   r   r   )rL   rX   r   rN  r   r   r   r   r$  r   r+   rO  r   r(   r(   r)   r^     sR   \



"


z&VideoMAEForVideoClassification.forward)NNNNNN)r    r!   r"   rD   r   r   r$   r   r   r   r'   r   r^   r`   r(   r(   rM   r)   rP  v  s0    
rP  )r3  r  r  rP  )rx   )Cr#   collections.abcrj   rQ   r   dataclassesr   typingr   r   r   numpyr-   r$   torch.utils.checkpointr   torch.nnr   r	   r
   activationsr   modeling_layersr   modeling_outputsr   r   modeling_utilsr   r   pytorch_utilsr   r   utilsr   r   r   utils.constantsr   r   configuration_videomaer   
get_loggerr    r   r   r*   r?   ModulerA   rE   r   floatr   r   r   r   r   r   r   r   r  r  r&  r3  rP  __all__r(   r(   r(   r)   <module>   s   
!=
J(++ < 2 !