o
    iF                  	   @   s  d Z ddlZddlmZ ddlmZmZ ddlZddlmZ ddl	m
Z
 ddlmZ dd	lmZ dd
lmZmZ ddlmZmZmZmZmZmZmZmZmZ ddlmZ ddlmZ e rfddl m!Z!m"Z" ndd Z"dd Z!e#e$Z%dZ&dZ'g dZ(dZ)dZ*eG dd deZ+eG dd deZ,eG dd deZ-G dd dej.Z/G d d! d!ej.Z0G d"d# d#ej.Z1dLd&ej2d'e3d(e4d)ej2fd*d+Z5G d,d- d-ej.Z6G d.d/ d/ej.Z7G d0d1 d1ej.Z8G d2d3 d3ej.Z9G d4d5 d5ej.Z:G d6d7 d7ej.Z;G d8d9 d9ej.Z<G d:d; d;ej.Z=G d<d= d=ej.Z>G d>d? d?eZ?d@Z@dAZAedBe@G dCdD dDe?ZBedEe@G dFdG dGe?ZCedHe@G dIdJ dJe?eZDg dKZEdS )Mz1PyTorch Neighborhood Attention Transformer model.    N)	dataclass)OptionalUnion)nn   )ACT2FN)BackboneOutput)PreTrainedModel) find_pruneable_heads_and_indicesprune_linear_layer)	ModelOutputOptionalDependencyNotAvailableadd_code_sample_docstringsadd_start_docstrings%add_start_docstrings_to_model_forwardis_natten_availableloggingreplace_return_docstringsrequires_backends)BackboneMixin   )	NatConfig)
natten2davnatten2dqkrpbc                  O      t  Nr   argskwargs r    c/home/ubuntu/.local/lib/python3.10/site-packages/transformers/models/deprecated/nat/modeling_nat.pyr   /      r   c                  O   r   r   r   r   r    r    r!   r   2   r"   r   r   zshi-labs/nat-mini-in1k-224)r      r#   i   z	tiger catc                   @   sr   e Zd ZU dZdZeej ed< dZ	ee
ejdf  ed< dZee
ejdf  ed< dZee
ejdf  ed< dS )NatEncoderOutputa  
    Nat encoder's outputs, with potential hidden states and attentions.

    Args:
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nlast_hidden_state.hidden_states
attentionsreshaped_hidden_states)__name__
__module____qualname____doc__r%   r   torchFloatTensor__annotations__r&   tupler'   r(   r    r    r    r!   r$   G   s   
 r$   c                   @      e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eeejdf  ed< dZeeejdf  ed< dZeeejdf  ed< dS )	NatModelOutputaS  
    Nat model's outputs that also contains a pooling of the last hidden states.

    Args:
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
            Average pooling of the last layer hidden-state.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nr%   pooler_output.r&   r'   r(   )r)   r*   r+   r,   r%   r   r-   r.   r/   r3   r&   r0   r'   r(   r    r    r    r!   r2   h      
 r2   c                   @   r1   )	NatImageClassifierOutputa   
    Nat outputs for image classification.

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
            Classification (or regression if config.num_labels==1) loss.
        logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
            Classification (or regression if config.num_labels==1) scores (before SoftMax).
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nlosslogits.r&   r'   r(   )r)   r*   r+   r,   r6   r   r-   r.   r/   r7   r&   r0   r'   r(   r    r    r    r!   r5      r4   r5   c                       s>   e Zd ZdZ fddZdeej deej	 fddZ
  ZS )NatEmbeddingsz6
    Construct the patch and position embeddings.
    c                    s4   t    t|| _t|j| _t|j	| _
d S r   )super__init__NatPatchEmbeddingspatch_embeddingsr   	LayerNorm	embed_dimnormDropouthidden_dropout_probdropoutselfconfig	__class__r    r!   r:      s   

zNatEmbeddings.__init__pixel_valuesreturnc                 C   s"   |  |}| |}| |}|S r   )r<   r?   rB   )rD   rH   
embeddingsr    r    r!   forward   s   


zNatEmbeddings.forward)r)   r*   r+   r,   r:   r   r-   r.   r0   TensorrK   __classcell__r    r    rF   r!   r8      s    &r8   c                       s:   e Zd ZdZ fddZdeej dejfddZ	  Z
S )r;   z
    This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
    `hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a
    Transformer.
    c              
      sr   t    |j}|j|j}}|| _|dkrntdttj| j|d ddddtj|d |dddd| _	d S )Nr   z2Dinat only supports patch size of 4 at the moment.      rP   rN   rN   r   r   )kernel_sizestridepadding)
r9   r:   
patch_sizenum_channelsr>   
ValueErrorr   
SequentialConv2d
projection)rD   rE   rV   rW   hidden_sizerF   r    r!   r:      s   

zNatPatchEmbeddings.__init__rH   rI   c                 C   s>   |j \}}}}|| jkrtd| |}|dddd}|S )NzeMake sure that the channel dimension of the pixel values match with the one set in the configuration.r   rN   rP   r   )shaperW   rX   r[   permute)rD   rH   _rW   heightwidthrJ   r    r    r!   rK      s   

zNatPatchEmbeddings.forward)r)   r*   r+   r,   r:   r   r-   r.   rL   rK   rM   r    r    rF   r!   r;      s    "r;   c                       sL   e Zd ZdZejfdedejddf fddZde	j
de	j
fd	d
Z  ZS )NatDownsamplerz
    Convolutional Downsampling Layer.

    Args:
        dim (`int`):
            Number of input channels.
        norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
            Normalization layer class.
    dim
norm_layerrI   Nc                    s>   t    || _tj|d| ddddd| _|d| | _d S )NrN   rO   rQ   rR   F)rS   rT   rU   bias)r9   r:   rc   r   rZ   	reductionr?   )rD   rc   rd   rF   r    r!   r:      s   
zNatDownsampler.__init__input_featurec                 C   s0   |  |dddddddd}| |}|S )Nr   rP   r   rN   )rf   r^   r?   )rD   rg   r    r    r!   rK      s   "
zNatDownsampler.forward)r)   r*   r+   r,   r   r=   intModuler:   r-   rL   rK   rM   r    r    rF   r!   rb      s    "
rb           Finput	drop_probtrainingrI   c                 C   sd   |dks|s| S d| }| j d fd| jd   }|tj|| j| jd }|  | || }|S )aF  
    Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

    Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
    however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
    See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
    layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
    argument.
    rj   r   r   )r   )dtypedevice)r]   ndimr-   randrn   ro   floor_div)rk   rl   rm   	keep_probr]   random_tensoroutputr    r    r!   	drop_path  s   
rw   c                       sT   e Zd ZdZddee ddf fddZdejdejfdd	Z	de
fd
dZ  ZS )NatDropPathzXDrop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).Nrl   rI   c                    s   t    || _d S r   )r9   r:   rl   )rD   rl   rF   r    r!   r:     s   

zNatDropPath.__init__r&   c                 C   s   t || j| jS r   )rw   rl   rm   rD   r&   r    r    r!   rK     s   zNatDropPath.forwardc                 C   s   d| j  S )Nzp=)rl   rD   r    r    r!   
extra_repr  s   zNatDropPath.extra_reprr   )r)   r*   r+   r,   r   floatr:   r-   rL   rK   strr{   rM   r    r    rF   r!   rx     s
    rx   c                       J   e Zd Z fddZdd Z	ddejdee de	ej fd	d
Z
  ZS )NeighborhoodAttentionc                    s   t    || dkrtd| d| d|| _t|| | _| j| j | _|| _t	t
|d| j d d| j d | _tj| j| j|jd| _tj| j| j|jd| _tj| j| j|jd| _t|j| _d S )Nr   zThe hidden size (z6) is not a multiple of the number of attention heads ()rN   r   )re   )r9   r:   rX   num_attention_headsrh   attention_head_sizeall_head_sizerS   r   	Parameterr-   zerosrpbLinearqkv_biasquerykeyvaluer@   attention_probs_dropout_probrB   rD   rE   rc   	num_headsrS   rF   r    r!   r:   $  s   
*zNeighborhoodAttention.__init__c                 C   s8   |  d d | j| jf }||}|dddddS )Nr   rP   r   rN   r   )sizer   r   viewr^   )rD   xnew_x_shaper    r    r!   transpose_for_scores9  s   
z*NeighborhoodAttention.transpose_for_scoresFr&   output_attentionsrI   c                 C   s   |  | |}|  | |}|  | |}|t| j }t||| j| j	d}t
jj|dd}| |}t||| j	d}|ddddd }| d d | jf }	||	}|rc||f}
|
S |f}
|
S )	Nr   r   rc   r   rN   rP   r   )r   r   r   r   mathsqrtr   r   r   rS   r   
functionalsoftmaxrB   r   r^   
contiguousr   r   r   )rD   r&   r   query_layer	key_layervalue_layerattention_scoresattention_probscontext_layernew_context_layer_shapeoutputsr    r    r!   rK   >  s   

zNeighborhoodAttention.forwardF)r)   r*   r+   r:   r   r-   rL   r   boolr0   rK   rM   r    r    rF   r!   r   #  s    r   c                       s8   e Zd Z fddZdejdejdejfddZ  ZS )NeighborhoodAttentionOutputc                    s*   t    t||| _t|j| _d S r   )r9   r:   r   r   denser@   r   rB   rD   rE   rc   rF   r    r!   r:   a  s   
z$NeighborhoodAttentionOutput.__init__r&   input_tensorrI   c                 C      |  |}| |}|S r   r   rB   )rD   r&   r   r    r    r!   rK   f  s   

z#NeighborhoodAttentionOutput.forwardr)   r*   r+   r:   r-   rL   rK   rM   r    r    rF   r!   r   `  s    $r   c                       r~   )NeighborhoodAttentionModulec                    s2   t    t||||| _t||| _t | _d S r   )r9   r:   r   rD   r   rv   setpruned_headsr   rF   r    r!   r:   n  s   
z$NeighborhoodAttentionModule.__init__c                 C   s   t |dkrd S t|| jj| jj| j\}}t| jj|| j_t| jj|| j_t| jj	|| j_	t| j
j|dd| j
_| jjt | | j_| jj| jj | j_| j|| _d S )Nr   r   r   )lenr
   rD   r   r   r   r   r   r   r   rv   r   r   union)rD   headsindexr    r    r!   prune_headst  s   z'NeighborhoodAttentionModule.prune_headsFr&   r   rI   c                 C   s2   |  ||}| |d |}|f|dd   }|S Nr   r   )rD   rv   )rD   r&   r   self_outputsattention_outputr   r    r    r!   rK     s   z#NeighborhoodAttentionModule.forwardr   )r)   r*   r+   r:   r   r-   rL   r   r   r0   rK   rM   r    r    rF   r!   r   m  s    r   c                       2   e Zd Z fddZdejdejfddZ  ZS )NatIntermediatec                    sJ   t    t|t|j| | _t|jt	rt
|j | _d S |j| _d S r   )r9   r:   r   r   rh   	mlp_ratior   
isinstance
hidden_actr}   r   intermediate_act_fnr   rF   r    r!   r:     s
   
zNatIntermediate.__init__r&   rI   c                 C   r   r   )r   r   ry   r    r    r!   rK        

zNatIntermediate.forwardr   r    r    rF   r!   r     s    r   c                       r   )	NatOutputc                    s4   t    tt|j| || _t|j| _	d S r   )
r9   r:   r   r   rh   r   r   r@   rA   rB   r   rF   r    r!   r:     s   
zNatOutput.__init__r&   rI   c                 C   r   r   r   ry   r    r    r!   rK     r   zNatOutput.forwardr   r    r    rF   r!   r     s    r   c                	       sR   e Zd Zd fdd	Zdd Z	ddejdee d	e	ejejf fd
dZ
  ZS )NatLayerrj   c                    s   t    |j| _|j| _tj||jd| _t|||| jd| _	|dkr(t
|nt | _tj||jd| _t||| _t||| _|jdkrYtj|jtd|f dd| _d S d | _d S )Neps)rS   rj   r   rN   T)requires_grad)r9   r:   chunk_size_feed_forwardrS   r   r=   layer_norm_epslayernorm_beforer   	attentionrx   Identityrw   layernorm_afterr   intermediater   rv   layer_scale_init_valuer   r-   oneslayer_scale_parameters)rD   rE   rc   r   drop_path_raterF   r    r!   r:     s   

zNatLayer.__init__c           
      C   sd   | j }d}||k s||k r.d }}td|| }td|| }	dd||||	f}tj||}||fS )N)r   r   r   r   r   r   r   )rS   maxr   r   pad)
rD   r&   r`   ra   window_size
pad_valuespad_lpad_tpad_rpad_br    r    r!   	maybe_pad  s   zNatLayer.maybe_padFr&   r   rI   c                 C   s  |  \}}}}|}| |}| |||\}}|j\}	}
}}	| j||d}|d }|d dkp5|d dk}|rJ|d d d |d |d d f  }| jd urV| jd | }|| | }| |}| 	| 
|}| jd urv| jd | }|| | }|r||d f}|S |f}|S )N)r   r   rP      r   )r   r   r   r]   r   r   r   rw   r   rv   r   )rD   r&   r   
batch_sizer`   ra   channelsshortcutr   r_   
height_pad	width_padattention_outputsr   
was_paddedlayer_outputlayer_outputsr    r    r!   rK     s,   
$


zNatLayer.forward)rj   r   )r)   r*   r+   r:   r   r-   rL   r   r   r0   rK   rM   r    r    rF   r!   r     s    r   c                       sB   e Zd Z fddZ	d	dejdee deej fddZ	  Z
S )
NatStagec                    sd   t     | _| _t fddt|D | _|d ur*|tjd| _	nd | _	d| _
d S )Nc                    s    g | ]}t  | d qS ))rE   rc   r   r   )r   .0irE   rc   r   r   r    r!   
<listcomp>  s    z%NatStage.__init__.<locals>.<listcomp>)rc   rd   F)r9   r:   rE   rc   r   
ModuleListrangelayersr=   
downsamplepointing)rD   rE   rc   depthr   r   r   rF   r   r!   r:     s   

zNatStage.__init__Fr&   r   rI   c                 C   sn   |  \}}}}t| jD ]\}}|||}|d }q|}	| jd ur'| |	}||	f}
|r5|
|dd  7 }
|
S r   )r   	enumerater   r   )rD   r&   r   r_   r`   ra   r   layer_moduler   !hidden_states_before_downsamplingstage_outputsr    r    r!   rK     s   



zNatStage.forwardr   )r)   r*   r+   r:   r-   rL   r   r   r0   rK   rM   r    r    rF   r!   r     s    r   c                       sb   e Zd Z fddZ				ddejdee dee dee d	ee d
ee	e
f fddZ  ZS )
NatEncoderc                    sh   t    t j_ _dd tjd jt	 jddD t
 fddtjD _d S )Nc                 S   s   g | ]}|  qS r    )item)r   r   r    r    r!   r   "  s    z'NatEncoder.__init__.<locals>.<listcomp>r   cpu)ro   c                    st   g | ]6}t  t jd |   j|  j| t jd| t jd|d   |jd k r4tnddqS )rN   Nr   )rE   rc   r   r   r   r   )r   rh   r>   depthsr   sum
num_levelsrb   )r   i_layerrE   dprrD   r    r!   r   $  s    	*)r9   r:   r   r   r   rE   r-   linspacer   r   r   r   r   levelsrC   rF   r   r!   r:     s   
$	
zNatEncoder.__init__FTr&   r   output_hidden_states(output_hidden_states_before_downsamplingreturn_dictrI   c                 C   s  |rdnd }|r
dnd }|rdnd }|r&| dddd}	||f7 }||	f7 }t| jD ]H\}
}|||}|d }|d }|rS|rS| dddd}	||f7 }||	f7 }n|ri|si| dddd}	||f7 }||	f7 }|rs||dd  7 }q+|stdd |||fD S t||||dS )	Nr    r   rP   r   rN   c                 s   s    | ]	}|d ur|V  qd S r   r    )r   vr    r    r!   	<genexpr>X  s    z%NatEncoder.forward.<locals>.<genexpr>)r%   r&   r'   r(   )r^   r   r   r0   r$   )rD   r&   r   r   r   r   all_hidden_statesall_reshaped_hidden_statesall_self_attentionsreshaped_hidden_stater   r   r   r   r    r    r!   rK   1  s<   





zNatEncoder.forward)FFFT)r)   r*   r+   r:   r-   rL   r   r   r   r0   r$   rK   rM   r    r    rF   r!   r     s&    
r   c                   @   s*   e Zd ZU dZeed< dZdZdd ZdS )NatPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    rE   natrH   c                 C   st   t |tjtjfr#|jjjd| jjd |j	dur!|j	j
  dS dS t |tjr8|j	j
  |jjd dS dS )zInitialize the weightsrj   )meanstdNg      ?)r   r   r   rZ   weightdatanormal_rE   initializer_rangere   zero_r=   fill_)rD   moduler    r    r!   _init_weightsl  s   
z NatPreTrainedModel._init_weightsN)	r)   r*   r+   r,   r   r/   base_model_prefixmain_input_namer  r    r    r    r!   r  b  s   
 r  aF  
    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`NatConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a  
    Args:
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
            for details.

        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
z]The bare Nat Model transformer outputting raw hidden-states without any specific head on top.c                       s   e Zd Zd fdd	Zdd Zdd Zeeee	e
eded		
	
	
	
ddeej dee dee dee deee
f f
ddZ  ZS )NatModelTc                    s   t  | t| dg || _t|j| _t|jd| jd   | _	t
|| _t|| _tj| j	|jd| _|r=tdnd | _|   d S )NnattenrN   r   r   )r9   r:   r   rE   r   r   r   rh   r>   num_featuresr8   rJ   r   encoderr   r=   r   	layernormAdaptiveAvgPool1dpooler	post_init)rD   rE   add_pooling_layerrF   r    r!   r:     s   

zNatModel.__init__c                 C      | j jS r   rJ   r<   rz   r    r    r!   get_input_embeddings     zNatModel.get_input_embeddingsc                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr  layerr   r   )rD   heads_to_pruner#  r   r    r    r!   _prune_heads  s   zNatModel._prune_headsvision)
checkpointoutput_typeconfig_classmodalityexpected_outputNrH   r   r   r   rI   c           
      C   s   |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u r&td| |}| j||||d}|d }| |}d }| jd urW| |	dd
dd}t	|d}|se||f|dd   }	|	S t|||j|j|jdS )Nz You have to specify pixel_valuesr   r   r   r   r   rN   )r%   r3   r&   r'   r(   )rE   r   r   use_return_dictrX   rJ   r  r  r  flatten	transposer-   r2   r&   r'   r(   )
rD   rH   r   r   r   embedding_outputencoder_outputssequence_outputpooled_outputrv   r    r    r!   rK     s:   


zNatModel.forward)T)NNNN)r)   r*   r+   r:   r   r%  r   NAT_INPUTS_DOCSTRINGr   _CHECKPOINT_FOR_DOCr2   _CONFIG_FOR_DOC_EXPECTED_OUTPUT_SHAPEr   r-   r.   r   r   r0   rK   rM   r    r    rF   r!   r    s6    	
r  z
    Nat Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
    the [CLS] token) e.g. for ImageNet.
    c                       s   e Zd Z fddZeeeeee	e
d					ddeej deej dee dee d	ee d
eeef fddZ  ZS )NatForImageClassificationc                    s\   t  | t| dg |j| _t|| _|jdkr#t| jj|jnt	 | _
|   d S )Nr  r   )r9   r:   r   
num_labelsr  r  r   r   r  r   
classifierr  rC   rF   r    r!   r:     s   
"z"NatForImageClassification.__init__)r'  r(  r)  r+  NrH   labelsr   r   r   rI   c                 C   s   |dur|n| j j}| j||||d}|d }| |}d}	|dur*| ||| j }	|s@|f|dd  }
|	dur>|	f|
 S |
S t|	||j|j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        Nr,  r   rN   )r6   r7   r&   r'   r(   )	rE   r-  r  r:  loss_functionr5   r&   r'   r(   )rD   rH   r;  r   r   r   r   r3  r7   r6   rv   r    r    r!   rK     s,   
z!NatForImageClassification.forward)NNNNN)r)   r*   r+   r:   r   r4  r   _IMAGE_CLASS_CHECKPOINTr5   r6  _IMAGE_CLASS_EXPECTED_OUTPUTr   r-   r.   
LongTensorr   r   r0   rK   rM   r    r    rF   r!   r8    s6    
r8  zBNAT backbone, to be used with frameworks like DETR and MaskFormer.c                       sl   e Zd Z fddZdd Zeeeee	d			dde
jdee d	ee d
ee def
ddZ  ZS )NatBackbonec                    s   t    t    t| dg t | _t | _ jg fddt	t
 jD  | _i }t| j| jD ]\}}t|||< q8t|| _|   d S )Nr  c                    s   g | ]}t  jd |  qS )rN   )rh   r>   r   rE   r    r!   r   G  s    z(NatBackbone.__init__.<locals>.<listcomp>)r9   r:   _init_backboner   r8   rJ   r   r  r>   r   r   r   r  zipout_featuresr   r   r=   
ModuleDicthidden_states_normsr  )rD   rE   rF  stagerW   rF   rA  r!   r:   ?  s   

&zNatBackbone.__init__c                 C   r  r   r  rz   r    r    r!   r   R  r!  z NatBackbone.get_input_embeddings)r(  r)  NrH   r   r   r   rI   c                 C   s,  |dur|n| j j}|dur|n| j j}|dur|n| j j}| |}| j||dddd}|j}d}t| j|D ]A\}	}
|	| j	v ry|
j
\}}}}|
dddd }
|
||| |}
| j|	 |
}
|
||||}
|
dddd }
||
f7 }q8|s|f}|r||jf7 }|S t||r|jnd|jd	S )
aA  
        Returns:

        Examples:

        ```python
        >>> from transformers import AutoImageProcessor, AutoBackbone
        >>> import torch
        >>> from PIL import Image
        >>> import requests

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224")
        >>> model = AutoBackbone.from_pretrained(
        ...     "shi-labs/nat-mini-in1k-224", out_features=["stage1", "stage2", "stage3", "stage4"]
        ... )

        >>> inputs = processor(image, return_tensors="pt")

        >>> outputs = model(**inputs)

        >>> feature_maps = outputs.feature_maps
        >>> list(feature_maps[-1].shape)
        [1, 512, 7, 7]
        ```NT)r   r   r   r   r    r   rN   rP   r   )feature_mapsr&   r'   )rE   r-  r   r   rJ   r  r(   rC  stage_namesrD  r]   r^   r   r   rF  r&   r   r'   )rD   rH   r   r   r   r0  r   r&   rH  rG  hidden_stater   rW   r`   ra   rv   r    r    r!   rK   U  sD   $


zNatBackbone.forward)NNN)r)   r*   r+   r:   r   r   r4  r   r   r6  r-   rL   r   r   rK   rM   r    r    rF   r!   r@  :  s&    
r@  )r8  r  r  r@  )rj   F)Fr,   r   dataclassesr   typingr   r   r-   r   activationsr   modeling_outputsr   modeling_utilsr	   pytorch_utilsr
   r   utilsr   r   r   r   r   r   r   r   r   utils.backbone_utilsr   configuration_natr   natten.functionalr   r   
get_loggerr)   loggerr6  r5  r7  r=  r>  r$   r2   r5   ri   r8   r;   rb   rL   r|   r   rw   rx   r   r   r   r   r   r   r   r   r  NAT_START_DOCSTRINGr4  r  r8  r@  __all__r    r    r    r!   <module>   sz   ,
 ##$ =$C.EUDf