o
    eiW                     @   s8  d dl mZ d dlmZ d dlZd dlmZ ddlmZmZ ddl	m
Z
mZ ddlmZ dd	lmZmZmZ dd
lmZ ddlmZmZ ddlmZmZ ddlmZmZ ddlmZmZ ddl m!Z! ddl"m#Z#m$Z$m%Z% ddl&m'Z'm(Z( ddl)m*Z* ddl+m,Z, G dd dej-Z.edG dd dej-Z/G dd dej-Z0dd Z1edd>dd Z2d!ej3d"e4d#ej3fd$d%Z5	&d?d'ej-d(ej3d)ej3d*ej3d+ej3dB d,e6d-e6d.e!e# fd/d0Z7ee2G d1d2 d2ej-Z8G d3d4 d4eZ9e$G d5d6 d6eZ:e$G d7d8 d8e:Z;e$G d9d: d:e:eZ<G d;d< d<ee:Z=g d=Z>dS )@    )Callable)OptionalN)nn   )ACT2CLSACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hubuse_kernel_func_from_hubuse_kernelized_func)create_causal_mask)GenericForTokenClassificationGradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuple)maybe_autocastmerge_with_config_defaults)capture_outputs   )ApertusConfigc                       s$   e Zd Z fddZdd Z  ZS )
ApertusMLPc                    s|   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _t	|j
 | _|j
dkr<td |jd| _d S d S )NFbiasxieludtype)super__init__confighidden_sizeintermediate_sizer   Linearup_proj	down_projr   
hidden_actact_fnr   r%   selfr(   	__class__ j/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/apertus/modeling_apertus.pyr'   ,   s   

zApertusMLP.__init__c                 C   s   |  | | |S N)r-   r/   r,   )r1   xr4   r4   r5   forward7   s   zApertusMLP.forward)__name__
__module____qualname__r'   r8   __classcell__r4   r4   r2   r5   r    +   s    r    RMSNormc                       sF   e Zd Zddeddf fddZdejdejfdd	Zd
d Z  Z	S )ApertusRMSNormư>epsreturnNc                    s&   t    tt|| _|| _dS )z=
        ApertusRMSNorm is equivalent to T5LayerNorm
        N)r&   r'   r   	Parametertorchonesweightvariance_epsilon)r1   r)   r@   r2   r4   r5   r'   =   s   

zApertusRMSNorm.__init__hidden_statesc                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )N   T)keepdim)	r%   torC   float32powmeanrsqrtrF   rE   )r1   rG   input_dtypevariancer4   r4   r5   r8   E   s
   zApertusRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tuplerE   shaperF   )r1   r4   r4   r5   
extra_reprL   s   zApertusRMSNorm.extra_repr)r?   )
r9   r:   r;   floatr'   rC   Tensorr8   rT   r<   r4   r4   r2   r5   r>   ;   s    r>   c                       s~   e Zd ZU ejed< ddef fddZe			ddedB de	d de
dB d	ed
ef fddZe edd Z  ZS )ApertusRotaryEmbeddinginv_freqNr(   c                    s   t    |j| _|j| _|| _| jjd | _| j}| jdkr$t	| j }|| j|\}| _
| jd|dd | jd| dd d S )N	rope_typedefaultrX   F)
persistentoriginal_inv_freq)r&   r'   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr(   rope_parametersrY   compute_default_rope_parametersr   attention_scalingregister_bufferclone)r1   r(   devicerope_init_fnrX   r2   r4   r5   r'   S   s   


zApertusRotaryEmbedding.__init__re   ztorch.deviceseq_lenrA   ztorch.Tensorc                 C   sZ   | j d }t| ddp| j| j }d}d|tjd|dtjdj|tjd|   }||fS )	a  
        Computes the inverse frequencies according to the original RoPE implementation
        Args:
            config ([`~transformers.PreTrainedConfig`]):
                The model configuration.
            device (`torch.device`):
                The device to use for initialization of the inverse frequencies.
            seq_len (`int`, *optional*):
                The current sequence length. Unused for this type of RoPE.
        Returns:
            Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
            post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
        
rope_thetahead_dimNg      ?r   rH   r$   )re   r%   )	r`   getattrr)   num_attention_headsrC   arangeint64rK   rU   )r(   re   rg   basedimattention_factorrX   r4   r4   r5   ra   c   s   
&z6ApertusRotaryEmbedding.compute_default_rope_parametersc           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	|dd+ | |  
dd}tj||fdd	}| | j }| | j }	W d    n1 slw   Y  |j|jd
|	j|jd
fS )Nr   rI   r   mpscpuF)device_typeenabledrH   ro   r$   )rX   rU   expandrS   rK   re   
isinstancetypestrr   	transposerC   catcosrb   sinr%   )
r1   r7   position_idsinv_freq_expandedposition_ids_expandedrs   freqsembr|   r}   r4   r4   r5   r8      s   0&zApertusRotaryEmbedding.forwardr6   )NNN)r9   r:   r;   rC   rV   __annotations__r   r'   staticmethodr   intrR   rU   ra   no_gradr   r8   r<   r4   r4   r2   r5   rW   P   s&   
 

rW   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..NrI   rH   ru   )rS   rC   r{   )r7   x1x2r4   r4   r5   rotate_half   s   r   rotary_pos_embc                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer   )qkr|   r}   unsqueeze_dimq_embedk_embedr4   r4   r5   apply_rotary_pos_emb   s
   

r   rG   n_reprA   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)rS   rv   reshape)rG   r   batchnum_key_value_headsslenri   r4   r4   r5   	repeat_kv   s
   0r           modulequerykeyvalueattention_maskscalingdropoutkwargsc                 K   s   t || j}t || j}	t||dd| }
|d ur |
| }
tjj|
dtjd	|j
}
tjj|
|| jd}
t|
|	}|dd }||
fS )NrH   r   rI   )ro   r%   )ptrainingr   )r   num_key_value_groupsrC   matmulrz   r   
functionalsoftmaxrL   rK   r%   r   r   
contiguous)r   r   r   r   r   r   r   r   
key_statesvalue_statesattn_weightsattn_outputr4   r4   r5   eager_attention_forward   s   
r   c                       s   e Zd ZdZddededB f fddZ		ddejde	ejejf d	ejdB d
e
dB dejdB dee de	ejejf fddZ  ZS )ApertusAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNr(   	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _t| j|j| _t| j|j| _d S )Nri   g      Tr!   )r&   r'   r(   r   rj   r)   rk   ri   r   r   r   attention_dropout	is_causalr   r+   attention_biasq_projk_projv_projo_projr>   rms_norm_epsq_normk_normr1   r(   r   r2   r4   r5   r'      s,   
zApertusAttention.__init__rG   position_embeddingsr   past_key_valuescache_positionr   rA   c                 K   s,  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}| |	}	| |
}
|\}}t	|	|
||\}	}
|d ura|||d}|
|
|| j|\}
}t| jjt}|| |	|
||f| jsudn| j| jd|\}}|jg |dR   }| |}||fS )NrI   r   rH   )r}   r|   r   r   )r   r   )rS   ri   r   viewrz   r   r   r   r   r   updater   r   get_interfacer(   _attn_implementationr   r   r   r   r   r   r   )r1   rG   r   r   r   r   r   input_shapehidden_shapequery_statesr   r   r|   r}   cache_kwargsattention_interfacer   r   r4   r4   r5   r8      s<   	



zApertusAttention.forwardr6   )NN)r9   r:   r;   __doc__r   r   r'   rC   rV   rR   r   
LongTensorr   r   r8   r<   r4   r4   r2   r5   r      s(    r   c                       s   e Zd Zdedef fddZ						ddejdejdB d	ejdB d
e	dB de
dB dejdB deejejf dB dee deej fddZ  ZS )ApertusDecoderLayerr(   r   c                    sR   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
d S )N)r(   r   r@   )r&   r'   r)   r   	self_attnr    mlpr>   r   attention_layernormfeedforward_layernormr   r2   r4   r5   r'   "  s   

zApertusDecoderLayer.__init__NFrG   r   r~   r   	use_cacher   r   r   rA   c              
   K   s^   |}	|  |}| jd|||||||d|\}}
|	| }|}	| |}| |}|	| }|S )N)rG   r   r~   r   r   r   r   r4   )r   r   r   r   )r1   rG   r   r~   r   r   r   r   r   residual_r4   r4   r5   r8   ,  s&   




zApertusDecoderLayer.forward)NNNFNN)r9   r:   r;   r   r   r'   rC   rV   r   r   boolrR   r   r   r8   r<   r4   r4   r2   r5   r   !  s6    	
r   c                   @   sH   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZdZeedZdS )ApertusPreTrainedModelr(   modelTr   r   )rG   
attentionsN)r9   r:   r;   r   r   base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_supports_flex_attn_can_compile_fullgraph_supports_attention_backendr   r   _can_record_outputsr4   r4   r4   r5   r   M  s   
 
r   c                       s   e Zd Zdef fddZeee							ddej	dB dej
dB dej	dB dedB d	ejdB d
ej	dB dedB dee defddZ  ZS )ApertusModelr(   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r4   )r   ).0r   r(   r4   r5   
<listcomp>i  s    z)ApertusModel.__init__.<locals>.<listcomp>r   r   F)r&   r'   pad_token_idpadding_idx
vocab_sizer   	Embeddingr)   embed_tokens
ModuleListrangenum_hidden_layerslayersr>   r   normrW   
rotary_embgradient_checkpointing	post_initr0   r2   r   r5   r'   b  s   zApertusModel.__init__N	input_idsr   r~   r   inputs_embedsr   r   r   rA   c              
   K   s   |d u |d uA rt d|d u r| |}|r!|d u r!t| jd}|d u r<|d ur-| nd}	tj|jd |jd|	 }|d u rE|	d}t
| j|||||d}
|}| j||d}| jd | jj D ]}||f|
|||||d|}qb| |}t||d	S )
Nz:You must specify exactly one of input_ids or inputs_embedsr   r   r   )re   )r(   r   r   r   r   r~   )r~   )r   r   r~   r   r   r   )last_hidden_stater   )
ValueErrorr   r	   r(   get_seq_lengthrC   rl   rS   re   r   r   r   r   r   r   r   )r1   r   r   r~   r   r   r   r   r   past_seen_tokenscausal_maskrG   r   decoder_layerr4   r4   r5   r8   r  sP   

	
zApertusModel.forward)NNNNNNN)r9   r:   r;   r   r'   r   r   r   rC   r   rV   r   FloatTensorr   r   r   r   r8   r<   r4   r4   r2   r5   r   `  s>    	
r   c                       s   e Zd ZddiZddiZddgdgfiZ fddZee																	
dde	j
d	B de	jd	B de	j
d	B ded	B de	jd	B de	j
d	B ded	B de	j
d	B dee	jB dee defddZ  ZS )ApertusForCausalLMzlm_head.weightzmodel.embed_tokens.weightlm_headcolwise_gather_outputrG   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S )NFr!   )
r&   r'   r   r   r   r   r+   r)   r   r   r0   r2   r4   r5   r'     s
   
zApertusForCausalLM.__init__Nr   r   r   r~   r   r   labelsr   r   logits_to_keepr   rA   c
              
   K   s   | j d|||||||d|
}|j}t|	trt|	 dn|	}| |dd|ddf }d}|durB| jd||| jjd|
}t	|||j
|j|jdS )an  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, ApertusForCausalLM

        >>> model = ApertusForCausalLM.from_pretrained("swiss-ai/Apertus-8B")
        >>> tokenizer = AutoTokenizer.from_pretrained("swiss-ai/Apertus-8B")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```)r   r   r~   r   r   r   r   N)r   r   r   )lossr   r   rG   r   r4   )r   r   rw   r   slicer   loss_functionr(   r   r   r   rG   r   )r1   r   r   r~   r   r   r   r   r   r   r   outputsrG   slice_indicesr   r   r4   r4   r5   r8     s0   %zApertusForCausalLM.forward)	NNNNNNNNr   )r9   r:   r;   _tied_weights_keys_tp_plan_pp_planr'   r   r   rC   r   rV   r   r   r   r   r   r   r   r8   r<   r4   r4   r2   r5   r     sN    		
r   c                   @   s   e Zd ZdS )ApertusForTokenClassificationN)r9   r:   r;   r4   r4   r4   r5   r    s    r  )r   r   r  r   )r   )r   )?collections.abcr   typingr   rC   r   activationsr   r   cache_utilsr   r	   
generationr
   integrationsr   r   r   masking_utilsr   modeling_layersr   r   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   utils.genericr   r   utils.output_capturingr   configuration_apertusr   Moduler    r>   rW   r   r   rV   r   r   rU   r   r   r   r   r   r   r  __all__r4   r4   r4   r5   <module>   sn   A
I,PP