o
    ei;_                     @   sB  d dl mZ d dlmZ d dlZd dlmZ ddlmZ ddlm	Z	m
Z
 ddlmZ dd	lmZ dd
lmZmZ ddlmZ ddlmZmZmZ ddlmZmZ ddlmZmZ ddlmZm Z  ddl!m"Z" ddl#m$Z$m%Z%m&Z& ddl'm(Z(m)Z) ddl*m+Z+ ddl,m-Z- G dd dej.Z/G dd dej.Z0dd Z1dej2de3dej2fdd Z4	!d@d"ej.d#ej2d$ej2d%ej2d&ej2dB d'e5d(e5d)e"e$ fd*d+Z6dAd,d-Z7G d.d/ d/ej.Z8ed0G d1d2 d2ej.Z9G d3d4 d4eZ:e%G d5d6 d6e Z;e%G d7d8 d8e;Z<e%G d9d: d:e;eZ=G d;d< d<ee;Z>G d=d> d>ee;Z?g d?Z@dS )B    )Callable)OptionalN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hub)create_causal_mask!create_sliding_window_causal_mask)FlashAttentionKwargs) GenericForSequenceClassificationGenericForTokenClassificationGradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuple)maybe_autocastmerge_with_config_defaults)capture_outputs   )
Phi3Configc                       s2   e Zd Z fddZdejdejfddZ  ZS )Phi3MLPc                    sP   t    || _tj|jd|j dd| _tj|j|jdd| _t	|j
 | _d S )N   Fbias)super__init__configr   Linearhidden_sizeintermediate_sizegate_up_proj	down_projr   
hidden_actactivation_fnselfr&   	__class__ d/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/phi3/modeling_phi3.pyr%   2   s
   
zPhi3MLP.__init__hidden_statesreturnc                 C   s4   |  |}|jddd\}}|| | }| |S )Nr!   dim)r*   chunkr-   r+   )r/   r4   	up_statesgater2   r2   r3   forward:   s   

zPhi3MLP.forward)__name__
__module____qualname__r%   torchFloatTensorr<   __classcell__r2   r2   r0   r3   r    1   s    r    c                       s~   e Zd ZU ejed< ddef fddZe			ddedB de	d de
dB d	ed
ef fddZe edd Z  ZS )Phi3RotaryEmbeddinginv_freqNr&   c                    s   t    |j| _|j| _|| _| jjd | _| j}| jdkr$t	| j }|| j|\}| _
| jd|dd | jd| dd d S )N	rope_typedefaultrD   F)
persistentoriginal_inv_freq)r$   r%   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr&   rope_parametersrE   compute_default_rope_parametersr   attention_scalingregister_bufferclone)r/   r&   devicerope_init_fnrD   r0   r2   r3   r%   F   s   


zPhi3RotaryEmbedding.__init__rQ   ztorch.deviceseq_lenr5   ztorch.Tensorc           	      C   st   | j d }| j dd}t| ddp| j| j }t|| }d}d|tjd|dtjdj	|tj
d	|   }||fS )
a  
        Computes the inverse frequencies according to the original RoPE implementation
        Args:
            config ([`~transformers.PreTrainedConfig`]):
                The model configuration.
            device (`torch.device`):
                The device to use for initialization of the inverse frequencies.
            seq_len (`int`, *optional*):
                The current sequence length. Unused for this type of RoPE.
        Returns:
            Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
            post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
        
rope_thetapartial_rotary_factorg      ?head_dimNr   r!   dtype)rQ   rX   )rL   getgetattrr(   num_attention_headsintr@   arangeint64tofloat)	r&   rQ   rS   baserU   rV   r8   attention_factorrD   r2   r2   r3   rM   V   s   
&z3Phi3RotaryEmbedding.compute_default_rope_parametersc           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	|dd+ | |  
dd}tj||fdd	}| | j }| | j }	W d    n1 slw   Y  |j|jd
|	j|jd
fS )Nr   r6   r   mpscpuF)device_typeenabledr!   r7   rW   )rD   r`   expandshaper_   rQ   
isinstancetypestrr   	transposer@   catcosrN   sinrX   )
r/   xposition_idsinv_freq_expandedposition_ids_expandedre   freqsembrn   ro   r2   r2   r3   r<   v   s   0&zPhi3RotaryEmbedding.forwardN)NNN)r=   r>   r?   r@   Tensor__annotations__r   r%   staticmethodr   r\   tupler`   rM   no_gradr   r<   rB   r2   r2   r0   r3   rC   C   s&   
 

rC   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..Nr6   r!   r7   )rh   r@   rm   )rp   x1x2r2   r2   r3   rotate_half   s   r~   r4   n_repr5   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)rh   rg   reshape)r4   r   batchnum_key_value_headsslenrV   r2   r2   r3   	repeat_kv   s
   0r           modulequerykeyvalueattention_maskscalingdropoutkwargsc                 K   s   t || j}t || j}	t||dd| }
|d ur |
| }
tjj|
dtjd	|j
}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr!   r   r6   )r8   rX   )ptrainingr   )r   num_key_value_groupsr@   matmulrl   r   
functionalsoftmaxfloat32r_   rX   r   r   
contiguous)r   r   r   r   r   r   r   r   
key_statesvalue_statesattn_weightsattn_outputr2   r2   r3   eager_attention_forward   s   
r   c                 C   s   | |}| |}|jd }| dd|f | d|df }}|dd|f |d|df }}	tj|| t||  |gdd}
tj|| t||  |	gdd}|
|fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    r6   .Nr7   )	unsqueezerh   r@   rm   r~   )qkrn   ro   unsqueeze_dim
rotary_dimq_rotq_passk_rotk_passq_embedk_embedr2   r2   r3   apply_rotary_pos_emb   s   


""""r   c                       s   e Zd ZdZddededB f fddZ		ddejde	ejejf d	ejdB d
e
dB dejdB dee de	ejejdB e	ej dB f fddZ  ZS )Phi3Attentionz=Multi-headed attention from 'Attention Is All You Need' paperNr&   	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	|j| _| jd | _
|j| _d| _|j| j d|j| j   }tj|j| j |jdd| _tj|j|dd| _d S )NrV   g      Tr!   Fr"   )r$   r%   r&   r   rZ   r(   r[   rV   r   r   r   attention_dropout	is_causalr   r'   o_projqkv_proj)r/   r&   r   op_sizer0   r2   r3   r%      s   
zPhi3Attention.__init__r4   position_embeddingsr   past_key_valuescache_positionr   r5   c                 K   sr  |j d d }g |d| jR }| |}	| jj| j }
|	dd |
f }|	d|
|
| j| j  f }|	d|
| j| j  d f }||dd}||dd}||dd}|\}}t||||\}}|d ur~|||d}|	||| j
|\}}t| jjt}|| ||||f| jsdn| j| jt| jdd d|\}}|jg |dR   }| |}||fS )	Nr6   .r   r!   )ro   rn   r   r   sliding_window)r   r   r   )rh   rV   r   r&   r[   r   viewrl   r   updater   r   get_interface_attn_implementationr   r   r   r   rZ   r   r   r   )r/   r4   r   r   r   r   r   input_shapehidden_shapeqkv	query_posquery_statesr   r   rn   ro   cache_kwargsattention_interfacer   r   r2   r2   r3   r<      sD   	
	

zPhi3Attention.forwardrv   )NN)r=   r>   r?   __doc__r   r\   r%   r@   rw   rz   r   
LongTensorr   r   r<   rB   r2   r2   r0   r3   r      s(    r   RMSNormc                       sF   e Zd Zddeddf fddZdejdejfdd	Zd
d Z  Z	S )Phi3RMSNormư>epsr5   Nc                    s&   t    tt|| _|| _dS )z:
        Phi3RMSNorm is equivalent to T5LayerNorm
        N)r$   r%   r   	Parameterr@   onesweightvariance_epsilon)r/   r(   r   r0   r2   r3   r%     s   

zPhi3RMSNorm.__init__r4   c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr!   r6   T)keepdim)	rX   r_   r@   r   powmeanrsqrtr   r   )r/   r4   input_dtypevariancer2   r2   r3   r<     s
   zPhi3RMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)rz   r   rh   r   )r/   r2   r2   r3   
extra_repr&  s   zPhi3RMSNorm.extra_repr)r   )
r=   r>   r?   r`   r%   r@   rw   r<   r   rB   r2   r2   r0   r3   r     s    r   c                       s   e Zd Zdedef fddZ						ddejdejdB d	ejdB d
e	dB de
dB dejdB deejejf dB dee deejeejejf dB f fddZ  ZS )Phi3DecoderLayerr&   r   c                    st   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
|| _t|j| _t|j| _d S )N)r&   r   r   )r$   r%   r(   r   	self_attnr    mlpr   rms_norm_epsinput_layernormpost_attention_layernormr&   r   Dropoutresid_pdropresid_attn_dropoutresid_mlp_dropout)r/   r&   r   r0   r2   r3   r%   +  s   

zPhi3DecoderLayer.__init__NFr4   r   rq   r   	use_cacher   r   r   r5   c              
   K   sj   |}	|  |}| jd|||||||d|\}}
|	| | }|}	| |}| |}|	| | }|S )N)r4   r   rq   r   r   r   r   r2   )r   r   r   r   r   r   )r/   r4   r   rq   r   r   r   r   r   residualself_attn_weightsr2   r2   r3   r<   6  s&   




zPhi3DecoderLayer.forward)NNNFNN)r=   r>   r?   r   r\   r%   r@   rw   r   r   boolrz   r   r   rA   r<   rB   r2   r2   r0   r3   r   *  s6    	
r   c                   @   sL   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZdZeedZdZdS )	Phi3PreTrainedModelr&   modelTr   r   )r4   
attentionsz0.0.5N)r=   r>   r?   r   rx   base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_supports_flex_attn_can_compile_fullgraph_supports_attention_backendr   r   _can_record_outputs_versionr2   r2   r2   r3   r   W  s   
 r   c                       s   e Zd Zdef fddZeee							ddej	dB dej
dB dej	dB dedB d	ejdB d
edB dej	dB dee defddZ  ZS )	Phi3Modelr&   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r2   )r   ).0r   r&   r2   r3   
<listcomp>t  s    z&Phi3Model.__init__.<locals>.<listcomp>r   r   F)r$   r%   pad_token_idpadding_idx
vocab_sizer   	Embeddingr(   embed_tokens
ModuleListrangenum_hidden_layerslayersr   r   normrC   
rotary_embgradient_checkpointing	post_initr.   r0   r   r3   r%   m  s   zPhi3Model.__init__N	input_idsr   rq   r   inputs_embedsr   r   r   r5   c              
   K   s   |d u |d uA rt d|d u r| |}|r!|d u r!t| jd}|d u r=|d ur-| nd}	tj|	|	|jd  |jd}|d u rF|	d}| jj
d u rNtnt}
|
| j|||||d}|}| j||d}| jd | jj D ]}||f||||||d|}qm| |}t||r|d	S d d	S )
Nz:You must specify exactly one of input_ids or inputs_embedsr   r   r   )rQ   )r&   r   r   r   r   rq   )rq   )r   rq   r   r   r   r   )last_hidden_stater   )
ValueErrorr   r   r&   get_seq_lengthr@   r]   rh   rQ   r   r   r   r   r   r   r   r   r   )r/   r   r   rq   r   r   r   r   r   past_seen_tokensmask_functioncausal_maskr4   r   decoder_layerr2   r2   r3   r<   }  sX   

	

zPhi3Model.forward)NNNNNNN)r=   r>   r?   r   r%   r   r   r   r@   r   rw   r   rA   r   r   r   r   r<   rB   r2   r2   r0   r3   r   k  s>    	
r   c                       s   e Zd ZddiZddiZddgdgfiZ fddZee																	
dde	j
d	B de	jd	B de	j
d	B ded	B de	jd	B de	j
d	B ded	B de	j
d	B dee	jB dee defddZ													d fdd	Z  ZS )Phi3ForCausalLMzlm_head.weightzmodel.embed_tokens.weightlm_headcolwise_gather_outputr4   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S )NFr"   )
r$   r%   r   r   r   r   r'   r(   r  r   r.   r0   r2   r3   r%     s
   
zPhi3ForCausalLM.__init__Nr   r   r   rq   r   r   labelsr   r   logits_to_keepr   r5   c
              
   K   s   | j d|||||||d|
}|j}t|	trt|	 dn|	}| |dd|ddf }d}|durB| jd||| jjd|
}t	|||j
|j|jdS )a  
        Example:

        ```python
        >>> from transformers import AutoTokenizer, Phi3ForCausalLM

        >>> model = Phi3ForCausalLM.from_pretrained("meta-phi3/Phi3-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-phi3/Phi3-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```)r   r   rq   r   r   r   r   N)r  r	  r   )lossr  r   r4   r   r2   )r   r   ri   r\   slicer  loss_functionr&   r   r   r   r4   r   )r/   r   r   rq   r   r   r	  r   r   r
  r   outputsr4   slice_indicesr  r  r2   r2   r3   r<     s0    zPhi3ForCausalLM.forwardTc	                    sf   |rt | jdr|jd | jjd kr|d }
|
| jjkrd }t jd||||||||d|	}|S )N original_max_position_embeddingsr   r   )r   r   r   r   r   rq   r   r
  r2   )hasattrr&   rh   r  r$   prepare_inputs_for_generation)r/   r   r   r   r   r   rq   r   r
  r   past_lengthmodel_inputsr0   r2   r3   r    s*   
	z-Phi3ForCausalLM.prepare_inputs_for_generation)	NNNNNNNNr   )NNNNNTN)r=   r>   r?   _tied_weights_keys_tp_plan_pp_planr%   r   r   r@   r   rw   r   rA   r   r\   r   r   r   r<   r  rB   r2   r2   r0   r3   r    s^    		
=r  c                   @      e Zd ZdS )Phi3ForSequenceClassificationNr=   r>   r?   r2   r2   r2   r3   r  /      r  c                   @   r  )Phi3ForTokenClassificationNr  r2   r2   r2   r3   r  3  r  r  )r   r   r  r  r  )r   )r   )Acollections.abcr   typingr   r@   r   activationsr   cache_utilsr   r   
generationr	   integrationsr
   masking_utilsr   r   modeling_flash_attention_utilsr   modeling_layersr   r   r   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   utils.genericr   r   utils.output_capturingr   configuration_phi3r   Moduler    rC   r~   rw   r\   r   r`   r   r   r   r   r   r   r   r  r  r  __all__r2   r2   r2   r3   <module>   sn   C

E-Pr