o
    ei                      @   s  d Z ddlZddlmZ ddlmZ ddlZddlmZ ddlm	Z
 ddlmZ dd	lmZmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZmZmZmZ ddlmZmZ ddl m!Z! ddl"m#Z# ddl$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z* ddl+m,Z, ddl-m.Z.m/Z/ e)0e1Z2ee'ddG dd de%Z3dej4dB dej4dB dedB fddZ5e,dd d!d"			#	dbd$ed!ej4d%ej4dB d&ej4d'edB d(ej4dB dej4dB d)ej6dB d*e7d+e7dB de8fd,d-Z9G d.d/ d/ej:Z;G d0d1 d1ej:Z<G d2d3 d3ej:Z=d4e<iZ>G d5d6 d6ej:Z?G d7d8 d8ej:Z@G d9d: d:ej:ZAG d;d< d<eZBG d=d> d>ej:ZCe'G d?d@ d@eZDG dAdB dBej:ZEG dCdD dDej:ZF	EdcdFej:dGej4dHej4dIej4d%ej4dB dJeGdKeGfdLdMZHG dNdO dOej:ZIG dPdQ dQeZJG dRdS dSej:ZKG dTdU dUej:ZLe'dVdG dWdX dXeDZMG dYdZ dZej:ZNe'd[dG d\d] d]eDZOe'd^dG d_d` d`eDeZPg daZQdS )dzPyTorch GIT model.    N)Callable)	dataclass)nn   )initialization)ACT2FN)CacheDynamicCache)PreTrainedConfig)GenerationMixin)create_masks_for_generate)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPastBaseModelOutputWithPoolingCausalLMOutputWithPast)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)apply_chunking_to_forward)ModelOutputTransformersKwargsauto_docstringcan_return_tuplelogging	torch_int)deprecate_kwarg   )	GitConfigGitVisionConfigz}
    Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
    )custom_introc                   @   sj   e Zd ZU dZdZejdB ed< dZejdB ed< dZ	e
ejdf dB ed< dZe
ejdf dB ed< dS )GitVisionModelOutputz
    image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
        The image embeddings obtained by applying the projection layer to the pooler_output.
    Nimage_embedslast_hidden_state.hidden_states
attentions)__name__
__module____qualname____doc__r"   torchFloatTensor__annotations__r#   r$   tupler%    r.   r.   b/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/git/modeling_git.pyr!   7   s   
 r!   token_type_idsimage_group_idsreturnc              
      s4   du rdS dt dt dt dt dtf
 fdd}|S )	z
    This function adds the correct offsets to the `q_idx` and `kv_idx` as the torch API can only accept lengths,
    not start and end indices.
    N	batch_idxhead_idxq_idxkv_idxr2   c                    s   t |jd k |d}t |jd k |d}| |f }t |jd k |d}| |f }t |jd k |d} | |f }t | jd k |d} | |f }	t | jd k |	d}	|dk|dk@ }
||	k}|
|@ S )Nr   r   )r*   whereshape)r3   r4   r5   r6   
safe_q_idxsafe_kv_idxtoken_type_ids_at_q_idxtoken_type_ids_at_kv_idximage_group_ids_at_q_idximage_group_ids_at_kv_idxis_image_blocksame_image_blockr1   r0   r.   r/   
inner_maskW   s   z0token_type_ids_mask_function.<locals>.inner_mask)intbool)r0   r1   rC   r.   rB   r/   token_type_ids_mask_functionK   s   	$rF   input_embedsz5.6.0inputs_embeds)versionnew_nameFconfigattention_maskcache_positionpast_key_valuesposition_idspixel_valuesis_trainingis_first_iterationc
                 K   s   |r
|du r
t d|  |||||d}|	dur|	n|du p&|j p&|du}	|durh|	rh|dk|j}tjj|dddddddf }|| @ }tj	|
 dd	d }t||d}t||j||d
< tdi |S )a  
    Overwrites the base `create_masks_for_generate` with `token_type_ids` masking to create the causal mask mapping
    for all kinds of forward passes. Gemma3 uses a bidirectional mask for images.

    Uses `pixel_values` as an optional input to disambiguate edge cases.
    Nz;`token_type_ids` is required as a model input when training)rK   rH   rL   rM   rN   rO   r   )r   r   r   )valuer7   dimor_mask_functionr.   )
ValueErrorget_text_configis_initializedtodevicer   
functionalpadr*   cumsumrD   r8   rF   r   )rK   rH   rL   rM   rN   rO   r0   rP   rQ   rR   kwargsmask_kwargsis_imageis_previous_imagenew_image_startr1   r.   r.   r/   create_causal_mask_mappings   s.   "
rd   c                       s\   e Zd ZdZ fddZ				ddejdB dejdB dejdB d	ed
ej	f
ddZ
  ZS )GitEmbeddingsz;Construct the embeddings from word and position embeddings.c                    sx   t    tj|j|j|jd| _t|j|j| _	tj
|j|jd| _
t|j| _| jdt|jddd d S )N)padding_idxepsrO   r   r7   F
persistent)super__init__r   	Embedding
vocab_sizehidden_sizepad_token_idword_embeddingsmax_position_embeddingsposition_embeddings	LayerNormlayer_norm_epsDropouthidden_dropout_probdropoutregister_bufferr*   arangeexpandselfrK   	__class__r.   r/   rm      s   

zGitEmbeddings.__init__Nr   	input_idsrO   rH   past_key_values_lengthr2   c           	      C   s   |d ur	|  }n|  d d }|d }|d u r&| jd d ||| f }|d u r0| |}n|}| |}||7 }| |}| |}|S )Nr7   r   )sizerO   rr   rt   ru   ry   )	r~   r   rO   rH   r   input_shape
seq_length
embeddingsrt   r.   r.   r/   forward   s   



zGitEmbeddings.forward)NNNr   )r&   r'   r(   r)   rm   r*   
LongTensorr+   rD   Tensorr   __classcell__r.   r.   r   r/   re      s$    re   c                       s\   e Zd Zd fdd	Z			ddejdejdB dedB dejdB deej f
d	d
Z	  Z
S )GitSelfAttentionNc                    s
  t    |j|j dkrt|dstd|j d|j d|| _|d u r1td| j	j
 d |j| _t|j|j | _| j| j | _t|jj|jj d d	 | _|jd ura|  j|j9  _t|j| j| _t|j| j| _t|j| j| _t|j| _d S )
Nr   embedding_sizezThe hidden size (z6) is not a multiple of the number of attention heads ()zInstantiating z without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.   r   )rl   rm   rp   num_attention_headshasattrrW   	layer_idxloggerwarning_oncer   r&   rD   attention_head_sizeall_head_sizevision_config
image_size
patch_sizeimage_patch_tokensnum_image_with_embeddingr   LinearquerykeyrS   rw   attention_probs_dropout_probry   r~   rK   r   r   r.   r/   rm      s,   


zGitSelfAttention.__init__r$   rL   rN   rM   r2   c                 C   s,  |j d }| ||d| j| jdd}| ||d| j| jdd}| ||d| j| jdd}|d urJ|j||| j	d|id\}}t
||dd}	|	t| j }	|d urd|	| }	tjj|	dd}
| |
}
t
|
|}|dddd	 }| d d | jf }||}||
fS )
Nr   r7   r   r   rM   )cache_kwargsrT   r   )r9   r   viewr   r   	transposer   rS   updater   r*   matmulmathsqrtr   r\   softmaxry   permute
contiguousr   r   )r~   r$   rL   rN   rM   
batch_sizequery_layer	key_layervalue_layerattention_scoresattention_probscontext_layernew_context_layer_shaper.   r.   r/   r      s8   



zGitSelfAttention.forwardN)NNN)r&   r'   r(   rm   r*   r   r+   r   r-   r   r   r.   r.   r   r/   r      s     r   c                       8   e Zd Z fddZdejdejdejfddZ  ZS )GitSelfOutputc                    sB   t    t|j|j| _tj|j|jd| _t|j	| _
d S Nrg   )rl   rm   r   r   rp   denseru   rv   rw   rx   ry   r}   r   r.   r/   rm   .     
zGitSelfOutput.__init__r$   input_tensorr2   c                 C   &   |  |}| |}| || }|S r   r   ry   ru   r~   r$   r   r.   r.   r/   r   4     

zGitSelfOutput.forwardr&   r'   r(   rm   r*   r   r   r   r.   r.   r   r/   r   -      $r   eagerc                       sf   e Zd Zd fdd	Z				ddejdejdB dedB dejdB d	edB d
e	ej fddZ
  ZS )GitAttentionNc                    s,   t    t|j ||d| _t|| _d S )Nr   )rl   rm   GIT_SELF_ATTENTION_CLASSES_attn_implementationr~   r   outputr   r   r.   r/   rm   A  s   
zGitAttention.__init__Fr$   rL   rN   rM   output_attentionsr2   c           	      C   s*   | j ||||d\}}| ||}||fS )N)rM   )r~   r   )	r~   r$   rL   rN   rM   r   attn_outputself_attn_weightsattention_outputr.   r.   r/   r   F  s   
zGitAttention.forwardr   NNNF)r&   r'   r(   rm   r*   r   r+   r   rE   r-   r   r   r.   r.   r   r/   r   @  s&    r   c                       2   e Zd Z fddZdejdejfddZ  ZS )GitIntermediatec                    sD   t    t|j|j| _t|jt	rt
|j | _d S |j| _d S r   )rl   rm   r   r   rp   intermediate_sizer   
isinstance
hidden_actstrr   intermediate_act_fnr}   r   r.   r/   rm   Z  s
   
zGitIntermediate.__init__r$   r2   c                 C   s   |  |}| |}|S r   )r   r   r~   r$   r.   r.   r/   r   b  s   

zGitIntermediate.forwardr   r.   r.   r   r/   r   Y  s    r   c                       r   )	GitOutputc                    sB   t    t|j|j| _tj|j|jd| _t	|j
| _d S r   )rl   rm   r   r   r   rp   r   ru   rv   rw   rx   ry   r}   r   r.   r/   rm   j  r   zGitOutput.__init__r$   r   r2   c                 C   r   r   r   r   r.   r.   r/   r   p  r   zGitOutput.forwardr   r.   r.   r   r/   r   i  r   r   c                       sn   e Zd Zd fdd	Z				ddejdejdB dedB dejdB d	edB d
e	ej fddZ
dd Z  ZS )GitLayerNc                    s>   t    |j| _d| _t||d| _t|| _t|| _	d S )Nr   r   )
rl   rm   chunk_size_feed_forwardseq_len_dimr   	attentionr   intermediater   r   r   r   r.   r/   rm   x  s   

zGitLayer.__init__Fr$   rL   rN   rM   r   r2   c           	      C   s4   | j |||||d\}}t| j| j| j|}||fS )N)r   rN   rM   )r   r   feed_forward_chunkr   r   )	r~   r$   rL   rN   rM   r   r   self_attention_weightslayer_outputr.   r.   r/   r     s   	
zGitLayer.forwardc                 C   s   |  |}| ||}|S r   )r   r   )r~   r   intermediate_outputr   r.   r.   r/   r     s   
zGitLayer.feed_forward_chunkr   r   )r&   r'   r(   rm   r*   r   r+   r   rE   r-   r   r   r   r.   r.   r   r/   r   w  s(    
r   c                       s   e Zd Z fddZ							ddejdejdB dedB d	edB d
edB dedB dedB dejdB de	ej e
B fddZ  ZS )
GitEncoderc                    :   t     | _t fddt jD | _d| _d S )Nc                    s   g | ]}t  |qS r.   )r   ).0irK   r.   r/   
<listcomp>  s    z'GitEncoder.__init__.<locals>.<listcomp>F)	rl   rm   rK   r   
ModuleListrangenum_hidden_layerslayergradient_checkpointingr}   r   r   r/   rm        
 
zGitEncoder.__init__NFTr$   rL   rN   	use_cacher   output_hidden_statesreturn_dictrM   r2   c	                 C   s   | j r| jr|rtd d}|r|d u rt| jd}|rdnd }	|r%dnd }
t| jD ] \}}|r7|	|f }	||||||}|d }|rL|
|d f }
q,|rT|	|f }	|sctdd |||	|
fD S t	|||	|
d	S )
NzZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fr   r.   r   r   c                 s   s    | ]	}|d ur|V  qd S r   r.   )r   vr.   r.   r/   	<genexpr>  s    z%GitEncoder.forward.<locals>.<genexpr>r#   rN   r$   r%   )
r   trainingr   r   r	   rK   	enumerater   r-   r   )r~   r$   rL   rN   r   r   r   r   rM   all_hidden_statesall_self_attentionsr   layer_modulelayer_outputsr.   r.   r/   r     sP   


zGitEncoder.forward)NNNFFTN)r&   r'   r(   rm   r*   r   r+   r   rE   r-   r   r   r   r.   r.   r   r/   r     s8    		
r   c                   @   s2   e Zd ZU eed< dZdZdZe	 dd Z
dS )GitPreTrainedModelrK   git)imagetextTc                 C   sb  t |tr7tj|jd| jjd tj|jj| jjd tj|j	j| jjd t
|jt|jjd d t |tjrWtj|jd| jjd |jdurUt|j dS dS t |tjrtj|jd| jjd |jdurt|jddst|j|j  dS dS dS t |tjrt|j t|j dS t |trt
|jt|jjd d dS dS )	zInitialize the weights        )meanstd)r   r7   ri   N_is_hf_initializedF)r   GitVisionEmbeddingsinitnormal_class_embeddingrK   initializer_rangepatch_embeddingweightposition_embeddingcopy_rO   r*   r{   r9   r|   r   r   biaszeros_rn   rf   getattrru   ones_re   )r~   moduler.   r.   r/   _init_weights  s*   
"

&z GitPreTrainedModel._init_weightsN)r&   r'   r(   r   r,   base_model_prefixinput_modalitiessupports_gradient_checkpointingr*   no_gradr  r.   r.   r.   r/   r     s   
 r   c                       sX   e Zd Zdef fddZdejdededejfdd	Zddej	dejfddZ
  ZS )r   rK   c                    s   t    || _|j| _|j| _|j| _tt	
| j| _tj|j| j| j| jdd| _| j| j d | _| jd | _t| j| j| _| jdt	| jddd d S )NF)in_channelsout_channelskernel_sizestrider  r   r   rO   ri   rj   )rl   rm   rK   rp   	embed_dimr   r   r   	Parameterr*   randnr   Conv2dnum_channelsr  num_patchesnum_positionsrn   r  rz   r{   r|   r}   r   r.   r/   rm     s"   
"zGitVisionEmbeddings.__init__r   heightwidthr2   c                 C   s  |j d d }| jjd}|j d d }tj s(||kr(||kr(| | jS |ddddf }|ddddf }|j d }	|| j }
|| j }t	|d }|
d|||	}|dddd}tjj||
|fdd	d
}|dddddd|	}tj||fddS )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   r   Nr7   g      ?r   r   bicubicF)r   modealign_cornersrT   )r9   r  r  	unsqueezer*   jit
is_tracingrO   r   r   reshaper   r   r\   interpolater   cat)r~   r   r  r  r  r  r  class_pos_embedpatch_pos_embedrU   
new_height	new_widthsqrt_num_positionsr.   r.   r/   interpolate_pos_encoding  s*   



z,GitVisionEmbeddings.interpolate_pos_encodingFrP   c              
   C   s   |j \}}}}|s&|| jks|| jkr&td| d| d| j d| j d	| jjj}| |j|d}|ddd}| j	
|dd}	tj|	|gdd	}
|r[|
| |
|| }
|
S |
| | j }
|
S )
NzInput image size (*z) doesn't match model ().dtyper   r   r7   rT   )r9   r   rW   r  r  r/  rZ   flattenr   r   r|   r*   r%  r+  r  rO   )r~   rP   r+  r   _r  r  target_dtypepatch_embedsclass_embedsr   r.   r.   r/   r   @  s    
zGitVisionEmbeddings.forwardF)r&   r'   r(   r   rm   r*   r   rD   r+  r+   r   r   r.   r.   r   r/   r      s     )r   c                       r   )GitVisionMLPc                    sD   t    || _t|j | _t|j|j	| _
t|j	|j| _d S r   )rl   rm   rK   r   r   activation_fnr   r   rp   r   fc1fc2r}   r   r.   r/   rm   T  s
   
zGitVisionMLP.__init__r$   r2   c                 C   s"   |  |}| |}| |}|S r   )r8  r7  r9  r   r.   r.   r/   r   [  s   


zGitVisionMLP.forwardr   r.   r.   r   r/   r6  S  s    r6  r   r
  r   r   rS   scalingry   c           
      K   s|   t ||dd| }|d ur|| }tjj|dt jd|j}tjj	||| j
d}t ||}	|	dd }	|	|fS )Nr7   r   )rU   r/  )pr   r   r   )r*   r   r   r   r\   r   float32rZ   r/  ry   r   r   )
r
  r   r   rS   rL   r:  ry   r_   attn_weightsr   r.   r.   r/   eager_attention_forwardc  s   
r>  c                       s\   e Zd ZdZ fddZ		ddejdejdB dedB d	eejejdB f fd
dZ	  Z
S )GitVisionAttentionz=Multi-headed attention from 'Attention Is All You Need' paperc                    s   t    || _|j| _|j| _| j| j | _| j| j | jkr-td| j d| j d| jd | _	|j
| _d| _t| j| j| _t| j| j| _t| j| j| _t| j| j| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: r-  g      F)rl   rm   rK   rp   r  r   	num_headshead_dimrW   scaleattention_dropoutry   	is_causalr   r   k_projv_projq_projout_projr}   r   r.   r/   rm   }  s$   

zGitVisionAttention.__init__NFr$   rL   r   r2   c              
   C   s   |j \}}}| |}| |}| |}	|||| j| jdd}|||| j| jdd}|	||| j| jdd}	t	| j
jt}
|
| |||	|| j| j| jsVdn| jd\}}|||| }| |}|sod}||fS )z#Input shape: Batch x Time x Channelr   r   r   )rD  r:  ry   N)r9   rG  rE  rF  r   r@  rA  r   r   get_interfacerK   r   r>  rD  rB  r   ry   r#  r   rH  )r~   r$   rL   r   r   r   r  querieskeysvaluesattention_interfacer   r=  r.   r.   r/   r     s2   




zGitVisionAttention.forward)NF)r&   r'   r(   r)   rm   r*   r   rE   r-   r   r   r.   r.   r   r/   r?  z  s    r?  c                       sV   e Zd Zdef fddZ	ddejdejdedB d	ee	 d
e
ej f
ddZ  ZS )GitVisionEncoderLayerrK   c                    sR   t    |j| _t|| _tj| j|jd| _	t
|| _tj| j|jd| _d S r   )rl   rm   rp   r  r?  	self_attnr   ru   rv   layer_norm1r6  mlplayer_norm2r}   r   r.   r/   rm     s   


zGitVisionEncoderLayer.__init__Fr$   rL   r   Nr_   r2   c                 K   sj   |}|  |}| jd|||d|\}}|| }|}| |}| |}|| }|f}|r3||f7 }|S )aI  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
                `(config.encoder_attention_heads,)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )r$   rL   r   Nr.   )rP  rO  rR  rQ  )r~   r$   rL   r   r_   residualr=  outputsr.   r.   r/   r     s$   




zGitVisionEncoderLayer.forwardr5  )r&   r'   r(   r   rm   r*   r   rE   r   r   r-   r+   r   r   r.   r.   r   r/   rN    s    rN  c                       sp   e Zd ZdZdef fddZe				ddejdB de	dB de	dB d	e	dB d
e
e deeB fddZ  ZS )GitVisionEncoderz
    Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
    [`GitVisionEncoderLayer`].

    Args:
        config: GitVisionConfig
    rK   c                    r   )Nc                    s   g | ]}t  qS r.   )rN  r   r1  r   r.   r/   r     s    z-GitVisionEncoder.__init__.<locals>.<listcomp>F)	rl   rm   rK   r   r   r   r   layersr   r}   r   r   r/   rm     r   zGitVisionEncoder.__init__NrL   r   r   r   r_   r2   c                 K   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}|r"dnd}|r(dnd}|}	t| jD ]#\}
}|r<||	f }||	|fd|i|}|d }	|rT||d f }q1|r\||	f }t|	||dS )ad  
        Args:
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
                Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
                This is useful if you want more control over how to convert `input_ids` indices into associated vectors
                than the model's internal embedding lookup matrix.
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        Nr.   r   r   r   r#   r$   r%   )rK   r   r   use_return_dictr   rW  r   )r~   rH   rL   r   r   r   r_   encoder_statesall_attentionsr$   idxencoder_layerr   r.   r.   r/   r     s6    

zGitVisionEncoder.forward)NNNN)r&   r'   r(   r)   r   rm   r   r*   r   rE   r   r   r-   r   r   r   r.   r.   r   r/   rU    s*    rU  c                       sn   e Zd Zdef fddZe					ddejdB dedB dedB d	edB d
edB de	e
B fddZ  ZS )GitVisionTransformerrK   c                    sR   t    || _|j}t|| _tj||jd| _	t
|| _tj||jd| _d S r   )rl   rm   rK   rp   r   r   r   ru   rv   pre_layrnormrU  encoderpost_layernorm)r~   rK   r  r   r.   r/   rm   ?  s   


zGitVisionTransformer.__init__NFrP   r   r   r+  r   r2   c           	      C   s   |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u r&td| j||d}| |}| j||||d}|d }| |}|sO|f|dd   S t	||j
|jdS )Nz You have to specify pixel_valuesr+  )rH   r   r   r   r   r   rX  )rK   r   r   rY  rW   r   r_  r`  ra  r   r$   r%   )	r~   rP   r   r   r+  r   r$   encoder_outputsr#   r.   r.   r/   r   I  s.   	

zGitVisionTransformer.forwardNNNFN)r&   r'   r(   r   rm   r   r*   r+   rE   r-   r   r   r   r.   r.   r   r/   r^  =  s*    
r^  zY
    The vision model from CLIP, used in GIT, without any head or projection on top.
    c                       s   e Zd ZU eed< dZdZdef fddZdej	fddZ
e							
		ddejd	B ded	B ded	B deded	B deeB fddZ  ZS )GitVisionModelrK   rP   )r   c                    s"   t  | t|| _|   d S r   )rl   rm   r^  vision_model	post_initr}   r   r.   r/   rm   ~  s   
zGitVisionModel.__init__r2   c                 C   s
   | j jjS r   )rf  r   r  r~   r.   r.   r/   get_input_embeddings     
z#GitVisionModel.get_input_embeddingsNFr   r   r+  r   c                 K   s(   |dur|n| j j}| j|||||dS )a  
        Examples:

        ```python
        >>> from PIL import Image
        >>> import httpx
        >>> from io import BytesIO
        >>> from transformers import AutoProcessor, GitVisionModel

        >>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
        >>> model = GitVisionModel.from_pretrained("microsoft/git-base")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> with httpx.stream("GET", url) as response:
        ...     image = Image.open(BytesIO(response.read()))

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        ```N)rP   r   r   r+  r   )rK   rY  rf  )r~   rP   r   r   r+  r   r_   r.   r.   r/   r     s   zGitVisionModel.forwardrd  )r&   r'   r(   r   r,   main_input_namer  rm   r   Moduleri  r   r*   r+   rE   r-   r   r   r   r.   r.   r   r/   re  s  s2   
 re  c                       s8   e Zd Zdef fddZdejdejfddZ  ZS )GitProjectionrK   c                    s@   t    || _tt|jj|jtj|j|jj	d| _
d S r   )rl   rm   rK   r   
Sequentialr   r   rp   ru   rv   visual_projectionr}   r   r.   r/   rm     s   

zGitProjection.__init__r   r2   c                 C   s
   |  |S r   )ro  )r~   r   r.   r.   r/   r     rj  zGitProjection.forward)	r&   r'   r(   r   rm   r*   r   r   r   r.   r.   r   r/   rm    s    rm  zy
    The bare GIT Model transformer consisting of a CLIP image encoder and text decoder outputting raw hidden-states
    c                       s   e Zd Z fddZdd Zdd Ze												dd	ejdB d
ejdB dejdB dejdB dejdB de	dB de
dB de
dB de
dB de
de
dB dejdB deej eB fddZ  ZS )GitModelc                    sr   t     | _t | _t j| _t | _	t
 | _ jd ur3t fddt jD | _|   d S )Nc                 3   s(    | ]}t td d  jjV  qdS )r   N)r   r  r*   zerosr   rp   rV  r   r.   r/   r     s
    
z$GitModel.__init__.<locals>.<genexpr>)rl   rm   rK   re   r   re  r   image_encoderr   r`  rm  ro  r   r   ParameterListr   img_temporal_embeddingrg  r}   r   r   r/   rm     s   




zGitModel.__init__c                 C   s   | j jS r   r   rr   rh  r.   r.   r/   ri    s   zGitModel.get_input_embeddingsc                 C   s   || j _d S r   ru  )r~   rS   r.   r.   r/   set_input_embeddings  s   zGitModel.set_input_embeddingsNFr   rL   rO   rP   rH   rN   r   r   r   r+  r   rM   r2   c              
   K   s  |dur|n| j j}|	dur|	n| j j}	|dur|n| j j}|dur$|n| j j}|dur4|dur4tdd}|durGt|tsC| n| }|du rZ|durZ|j	d dkrZ|| }| j
||||d}|du rutj|||j	d  |jd}tj|tjdd }|dur|jd	kr| j||
d
j}n=|jdkrg }t|j	d D ]"}| j|dd|ddddf |
d
j}|| j| 7 }|| qtj|dd}ntd| |}||d|d dd}tj||fdd}tj|tjdd }tj||gdd}tj|j	d |jtjd}|durtjt||gdd}n7|durU|j	d dkrUtj|g|j|jd}tj|j	d ||j	d  d f|j|jd}tj||gdd}t| j ||||d||}|}| j||||||	||d}|d }|s|f|dd  S t ||j!|j"|j#dS )a   
        Examples:

        ```python
        >>> from transformers import AutoProcessor, AutoModel
        >>> import httpx
        >>> from io import BytesIO
        >>> from PIL import Image

        >>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
        >>> model = AutoModel.from_pretrained("microsoft/git-base")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> with httpx.stream("GET", url) as response:
        ...     image = Image.open(BytesIO(response.read()))

        >>> text = "this is an image of two cats"

        >>> inputs = processor(images=image, text=text, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        ```NzDYou cannot specify both input_ids and inputs_embeds at the same timer   r   )r   rO   rH   r   )r[   r.  ).r      rb     rT   z#pixel_values must be of rank 4 or 5r7   )r[   r/  )r/  r[   )rL   rN   r   r   r   r   rM   r   )$rK   r   r   r   rY  rW   r   r   get_seq_lengthr9   r   r*   r{   r[   
zeros_likerD   ndimrr  r#   r   rt  appendr%  ro  repeatr   	ones_liketensorr/  onesrd   r`  r   rN   r$   r%   )r~   r   rL   rO   rP   rH   rN   r   r   r   r+  r   rM   r_   r   embedding_outputr0   visual_features	frame_idxvisual_features_frameprojected_visual_featuresimage_token_type_idsextended_attention_maskcausal_maskr$   rc  sequence_outputr.   r.   r/   r     s   (






zGitModel.forward)NNNNNNNNNFNN)r&   r'   r(   rm   ri  rv  r   r*   r   r   rE   r-   r   r   r   r.   r.   r   r/   rp    sX    	
rp  z`
    GIT Model with a `language modeling` head on top for autoregressive language modeling.
    c                !       s  e Zd ZddiZ fddZdd Zdd Ze																					
					ddej	d	B dej	d	B dej	d	B dej	d	B dej	d	B dej	d	B de
d	B ded	B ded	B ded	B deded	B deej	B dej	d	B deej	 eB fddZ											
d  fdd	Z  ZS )!GitForCausalLMzoutput.weightz%git.embeddings.word_embeddings.weightc                    s4   t  | t|| _t|j|j| _| 	  d S r   )
rl   rm   rp  r   r   r   rp   ro   r   rg  r}   r   r.   r/   rm     s   
zGitForCausalLM.__init__c                 C   s   | j S r   r   rh  r.   r.   r/   get_output_embeddings  s   z$GitForCausalLM.get_output_embeddingsc                 C   s
   || _ d S r   r  )r~   new_embeddingsr.   r.   r/   set_output_embeddings  rj  z$GitForCausalLM.set_output_embeddingsNFr   r   rL   rO   rP   rH   labelsrN   r   r   r   r+  r   logits_to_keeprM   r2   c                 K   sH  |dur|n| j j}|durd}| j||||||||	|
|||d}|d }t|tr0t| dn|}| |dd|ddf }d}|dur| jjjd j	j
j}|dd|dddf  }|ddddf  }| j|d| j j|dfd| j ji|}|s|f|dd  }|dur|f| S |S t|||j|j|jdS )	a0  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
            `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
            ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`

        Examples:

        Image captioning example:

        ```python
        >>> from transformers import AutoProcessor, AutoModelForCausalLM
        >>> import httpx
        >>> from io import BytesIO
        >>> from PIL import Image

        >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-coco")
        >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> with httpx.stream("GET", url) as response:
        ...     image = Image.open(BytesIO(response.read()))

        >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values

        >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
        >>> generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
        >>> print(generated_caption)
        two cats sleeping on a pink blanket next to remotes.
        ```

        Visual question answering (VQA) example:

        ```python
        >>> from transformers import AutoProcessor, AutoModelForCausalLM
        >>> from huggingface_hub import hf_hub_download
        >>> from PIL import Image

        >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-textvqa")
        >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-textvqa")

        >>> file_path = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset")
        >>> image = Image.open(file_path).convert("RGB")

        >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values

        >>> question = "what does the front of the bus say at the top?"

        >>> input_ids = processor(text=question, add_special_tokens=False).input_ids
        >>> input_ids = [processor.tokenizer.cls_token_id] + input_ids
        >>> input_ids = torch.tensor(input_ids).unsqueeze(0)

        >>> generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50)
        >>> print(processor.batch_decode(generated_ids, skip_special_tokens=True))
        ['what does the front of the bus say at the top? special']
        ```

        Video captioning example:

        ```python
        >>> import av
        >>> import numpy as np
        >>> from PIL import Image
        >>> from huggingface_hub import hf_hub_download
        >>> from transformers import AutoProcessor, AutoModelForCausalLM

        >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-vatex")
        >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-vatex")

        >>> # set seed for reproducibility
        >>> np.random.seed(45)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`list[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`list[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # load video
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample frames
        >>> num_frames = model.config.num_image_with_embedding
        >>> indices = sample_frame_indices(
        ...     clip_len=num_frames, frame_sample_rate=4, seg_len=container.streams.video[0].frames
        ... )
        >>> frames = read_video_pyav(container, indices)

        >>> pixel_values = processor(images=list(frames), return_tensors="pt").pixel_values

        >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)

        >>> print("Generated caption:", processor.batch_decode(generated_ids, skip_special_tokens=True))
        Generated caption: ['a woman is sitting at a table and she is talking about the food she is holding.']
        ```
        NF)rL   rO   rP   rH   rN   r   r   r   r+  r   rM   r   r7   r   ro   )losslogitsrN   r$   r%   )rK   rY  r   r   rD   slicer   r`  r   r   r~   r   r   loss_functionr   ro   r   rN   r$   r%   )r~   r   rL   rO   rP   rH   r  rN   r   r   r   r+  r   r  rM   r_   rT  r$   slice_indicesr  r  num_image_tokensshifted_logitsr   r.   r.   r/   r     sX    zGitForCausalLM.forwardc           
         s4   t  j|f|||||d|}	|s|s||	d< |	S )N)rN   rL   r   rM   rR   rP   )rl   prepare_inputs_for_generation)
r~   r   rN   rP   rL   r   rM   rR   r_   model_inputsr   r.   r/   r  d  s   
z,GitForCausalLM.prepare_inputs_for_generation)NNNNNNNNNNFNr   N)NNNNNF)r&   r'   r(   _tied_weights_keysrm   r  r  r   r*   r   r   rE   rD   r-   r   r   r  r   r.   r.   r   r/   r    sv    		
 Lr  )r  rp  r   re  )NNFN)r   )Rr)   r   collections.abcr   dataclassesr   r*   r    r   r   activationsr   cache_utilsr   r	   configuration_utilsr
   
generationr   masking_utilsr   modeling_layersr   modeling_outputsr   r   r   r   modeling_utilsr   r   processing_utilsr   pytorch_utilsr   utilsr   r   r   r   r   r   utils.deprecationr   configuration_gitr   r   
get_loggerr&   r   r!   r   rF   r+   rE   dictrd   rl  re   r   r   r   r   r   r   r   r   r   r   r6  floatr>  r?  rN  rU  r^  re  rm  rp  r  __all__r.   r.   r.   r/   <module>   s    

(		
8-S%DS
@3P69 B y