o
    wiy                     @   s  d dl mZmZmZ d dlZd dlmZ ddlmZ ddl	m
Z
mZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZmZmZ ddlmZmZ ddlmZmZ ddlm Z  ddl!m"Z"m#Z#m$Z$m%Z% ddl&m'Z' e%(e)Z*dd Z+d;ddZ,dej-de.dej-fddZ/	d<dej0dej-dej-dej-deej- d e1d!e1fd"d#Z2G d$d% d%ej0Z3G d&d' d'ej0Z4G d(d) d)eZ5G d*d+ d+ej0Z6e#G d,d- d-eZ7e#G d.d/ d/e7Z8G d0d1 d1ee"Z9e#G d2d3 d3e7eZ:e#d4d5G d6d7 d7e7Z;e#G d8d9 d9e7Z<g d:Z=dS )=    )CallableOptionalUnionN   )ACT2FN)CacheDynamicCache)GenerationMixin)create_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tuplelogging   )	PhiConfigc                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..N   dim)shapetorchcat)xx1x2 r&   a/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/transformers/models/phi/modeling_phi.pyrotate_half"   s   r(   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer(   )qkcossinposition_idsunsqueeze_dimq_embedk_embedr&   r&   r'   apply_rotary_pos_emb)   s
   

r2   hidden_statesn_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r    expandreshape)r3   r4   batchnum_key_value_headsslenhead_dimr&   r&   r'   	repeat_kvD   s
   0r<           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr   r   r   )r   dtype)ptrainingr   )r<   num_key_value_groupsr!   matmul	transposer    nn
functionalsoftmaxfloat32torF   rD   rH   
contiguous)r>   r?   r@   rA   rB   rC   rD   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr&   r&   r'   eager_attention_forwardP   s   
&rX   c                       s   e Zd ZdZdedef fddZ		ddejde	ejejf d	e
ej d
e
e de
ej de	eje
ej e
e	ej  f fddZ  ZS )PhiAttentionz=Multi-headed attention from 'Attention Is All You Need' paperconfig	layer_idxc                    s$  t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j dd| _tj|j|j| j dd| _tj|j|j| j dd| _tj|j| j |jdd| _t| j|j | _|j| _| jrtj|j|j |jdd| _tj|j|j |jdd| _d S d S )Nr;   g      Tbias)epselementwise_affine)super__init__rZ   r[   getattrhidden_sizenum_attention_headsr;   r9   rI   rC   attention_dropout	is_causalrL   Linearq_projk_projv_projdenseintpartial_rotary_factorrotary_ndimsqk_layernorm	LayerNormlayer_norm_epsq_layernormk_layernormselfrZ   r[   	__class__r&   r'   ra   m   s,   
zPhiAttention.__init__Nr3   position_embeddingsrB   past_key_valuecache_positionr5   c                 K   s  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}| jrB| |	}	| 	|
}
|\}}|	dd | j
f |	d| j
d f }}|
dd | j
f |
d| j
d f }}t||||\}}tj||fdd}	tj||fdd}
|d ur|||d}||
|| j|\}
}t}| jjdkrt| jj }|| |	|
||f| jsdn| j| jd	|\}}|jg |dR   }| |}||fS )
Nr   r   r   .r   )r-   r,   rz   eagerr=   )rD   rC   )r    r;   rh   viewrK   ri   rj   ro   rr   rs   rn   r2   r!   r"   updater[   rX   rZ   _attn_implementationr   rH   re   rC   r7   rQ   rk   )ru   r3   rx   rB   ry   rz   rR   input_shapehidden_shapequery_statesrS   rT   r,   r-   	query_rot
query_passkey_rotkey_passcache_kwargsattention_interfacerW   rU   r&   r&   r'   forward   sN   	



zPhiAttention.forward)NN)__name__
__module____qualname____doc__r   rl   ra   r!   Tensortupler   r   
LongTensorr   __classcell__r&   r&   rv   r'   rY   j   s$    rY   c                       s2   e Zd Z fddZdejdejfddZ  ZS )PhiMLPc                    sD   t    || _t|j | _t|j|j	| _
t|j	|j| _d S N)r`   ra   rZ   r   
hidden_actactivation_fnrL   rg   rc   intermediate_sizefc1fc2ru   rZ   rv   r&   r'   ra      s
   
zPhiMLP.__init__r3   r5   c                 C   s"   |  |}| |}| |}|S r   )r   r   r   )ru   r3   r&   r&   r'   r      s   


zPhiMLP.forward)r   r   r   ra   r!   r   r   r   r&   r&   rv   r'   r      s    r   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
ej  dee dee deej	 dee
ejejf  de
ejee
ejejf  f fddZ  ZS )PhiDecoderLayerrZ   r[   c                    sH   t    t||d| _t|| _tj|j|j	d| _
t|j| _d S )N)r[   r^   )r`   ra   rY   	self_attnr   mlprL   rp   rc   rq   input_layernormDropoutresid_pdropresid_dropoutrt   rv   r&   r'   ra      s
   

zPhiDecoderLayer.__init__NFr3   rB   r.   ry   output_attentions	use_cacherz   rx   r5   c	                 K   sr   |}
|  |}| jd||||||||d|	\}}| |}| | |}|| |
 }|f}|r7||f7 }|S )N)r3   rB   r.   ry   r   r   rz   rx   r&   )r   r   r   r   )ru   r3   rB   r.   ry   r   r   rz   rx   rR   residualattn_outputsself_attn_weightsfeed_forward_hidden_statesoutputsr&   r&   r'   r      s*   
	


zPhiDecoderLayer.forward)NNNFFNN)r   r   r   r   rl   ra   r!   r   r   r   r   boolFloatTensorr   r   r&   r&   rv   r'   r      s8    
	r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )PhiRotaryEmbeddingNrZ   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r`   ra   hasattrr   getr   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenrZ   r   rope_init_fnattention_scalingregister_bufferr   original_inv_freq)ru   rZ   devicer   rv   r&   r'   ra     s   
zPhiRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r   r   mpscpuF)device_typeenabledr   r   )rF   )r   floatr6   r    rP   r   
isinstancer   strr!   autocastrK   r"   r,   r   r-   rF   )
ru   r#   r.   inv_freq_expandedposition_ids_expandedr   freqsembr,   r-   r&   r&   r'   r     s   0&zPhiRotaryEmbedding.forwardr   )
r   r   r   r   ra   r!   no_gradr   r   r   r&   r&   rv   r'   r     s
    r   c                   @   sL   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdZdd ZdS )PhiPreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|tjrX|jjd |jj	  d S d S )Nr=   )meanstdg      ?)rZ   initializer_ranger   rL   rg   weightdatanormal_r]   zero_	Embeddingpadding_idxrp   fill_)ru   r>   r   r&   r&   r'   _init_weights3  s   

z PhiPreTrainedModel._init_weightsN)r   r   r   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_3_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr   r&   r&   r&   r'   r   #  s    r   c                       s   e Zd Zdef fddZdd Zdd Zee									dd	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ  ZS )PhiModelrZ   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t d| _d| _t j| _tj j jd| _|   d S )Nc                    s   g | ]}t  |qS r&   )r   ).0r[   rZ   r&   r'   
<listcomp>K  s    z%PhiModel.__init__.<locals>.<listcomp>r   Fr   )r`   ra   pad_token_idr   
vocab_sizerL   r   rc   embed_tokens
ModuleListrangenum_hidden_layerslayersr   
rotary_embgradient_checkpointingr   
embd_pdropembed_dropoutrp   rq   final_layernorm	post_initr   rv   r   r'   ra   D  s   zPhiModel.__init__c                 C      | j S r   r   ru   r&   r&   r'   get_input_embeddingsU     zPhiModel.get_input_embeddingsc                 C   
   || _ d S r   r   ru   rA   r&   r&   r'   set_input_embeddingsX     
zPhiModel.set_input_embeddingsN	input_idsrB   r.   r   inputs_embedsr   r   output_hidden_statesrz   flash_attn_kwargsr5   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}|d u rB| 	|}|rK|d u rKt
 }|	d u rg|d urW| nd}tj|||jd  |jd}	|d u rp|	d}t| j |||	||d}| |}|}| ||}|rdnd }|rdnd }| jd | j j D ]&}|r||f7 }||f||||||	|d	|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||d
S )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.Fr   r   r   )rZ   input_embedsrB   rz   r   r.   r&   )rB   r.   ry   r   r   rz   rx   )last_hidden_stater   r3   
attentions)rZ   r   r   r   
ValueErrorr   rH   loggerwarning_oncer   r   get_seq_lengthr!   aranger    r   r)   r
   r   r   r   r   r   r   )ru   r   rB   r.   r   r   r   r   r   rz   r   past_seen_tokensrV   r3   rx   all_hidden_statesall_self_attnsdecoder_layerlayer_outputsr&   r&   r'   r   [  s   


	
	


zPhiModel.forward	NNNNNNNNN)r   r   r   r   ra   r   r   r   r   r   r!   r   r   r   r   r   r   r   r   r   r   r&   r&   rv   r'   r   B  sL    	
r   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r   r   r   r&   r&   r&   r'   r    s    r  c                       s
  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej dee deej deej dee dee dee deej d eeejf d!ee d"efd#d$Z  ZS )&PhiForCausalLMzlm_head.weightlm_headcolwise_repr3   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S )NTr\   )
r`   ra   r   r   r   rL   rg   rc   r  r   r   rv   r&   r'   ra     s
   
zPhiForCausalLM.__init__c                 C      | j jS r   r   r   r   r&   r&   r'   r        z#PhiForCausalLM.get_input_embeddingsc                 C      || j _d S r   r  r   r&   r&   r'   r        z#PhiForCausalLM.set_input_embeddingsc                 C   r   r   r  r   r&   r&   r'   get_output_embeddings  r   z$PhiForCausalLM.get_output_embeddingsc                 C   r   r   r  )ru   new_embeddingsr&   r&   r'   set_output_embeddings  r   z$PhiForCausalLM.set_output_embeddingsc                 C   r   r   r   )ru   decoderr&   r&   r'   set_decoder  r   zPhiForCausalLM.set_decoderc                 C   r   r   r  r   r&   r&   r'   get_decoder  r   zPhiForCausalLM.get_decoderNr   r   rB   r.   r   r   labelsr   r   r   rz   logits_to_keeprR   r5   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }d}|durX| j	d||| j j
d|}t|||j|j|jdS )ah  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, PhiForCausalLM

        >>> model = PhiForCausalLM.from_pretrained("meta-phi/Phi-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-phi/Phi-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   rB   r.   r   r   r   r   r   rz   )r  r"  r   lossr  r   r3   r  r&   )rZ   r   r   r   r  r   rl   slicer  loss_functionr   r   r   r3   r  )ru   r   rB   r.   r   r   r"  r   r   r   rz   r#  rR   r   r3   slice_indicesr  r%  r&   r&   r'   r     s:   '
zPhiForCausalLM.forward)NNNNNNNNNNr   )r   r   r   _tied_weights_keys_tp_plan_pp_planra   r   r   r  r  r   r!  r   r   r   r!   r   r   r   r   r   r   rl   r   r  r   r   r   r&   r&   rv   r'   r    sf    		
r  a  
    The Phi Model transformer with a sequence classification head on top (linear layer).

    [`PhiForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )custom_introc                          e Zd Z fddZdd Zdd Zee									ddee	j
 d	ee	j d
ee	j
 dee dee	j dee	j
 dee dee dee defddZ  ZS )PhiForSequenceClassificationc                    s@   t  | |j| _t|| _tj|j| jdd| _| 	  d S )NFr\   )
r`   ra   
num_labelsr   r   rL   rg   rc   scorer   r   rv   r&   r'   ra   =  s
   
z%PhiForSequenceClassification.__init__c                 C   r  r   r  r   r&   r&   r'   r   F  r  z1PhiForSequenceClassification.get_input_embeddingsc                 C   r  r   r  r   r&   r&   r'   r   I  r  z1PhiForSequenceClassification.set_input_embeddingsNr   rB   r.   r   r   r"  r   r   r   r5   c
              
   C   s(  | j ||||||||	d}
|
j}| |}|dur|jd }n|jd }| jjdu r2|dkr2td| jjdu r;d}n1|dur`|| jjk|jt	j
}t	j|jd |jt	j
d}|| d}nd}t| jj d |t	j||jd	|f }d}|dur| j|||| jd
}t|||
j|
j|
jdS )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        rB   r.   r   r   r   r   r   Nr   r   z=Cannot handle batch sizes > 1 if no padding token is defined.r   )r   rF   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r  )r  r"  pooled_logitsrZ   r$  )r   r  r0  r    rZ   r   r  rP   r   r!   int32r	  argmaxr  r  rw   r   r'  r   r   r3   r  )ru   r   rB   r.   r   r   r"  r   r   r   transformer_outputsr3   r  
batch_sizelast_non_pad_tokennon_pad_masktoken_indicesr3  r%  r&   r&   r'   r   L  sL   


z$PhiForSequenceClassification.forwardr  )r   r   r   ra   r   r   r   r   r   r!   r   r   r   r   r   r   r   r   r&   r&   rv   r'   r.  .  sH    		
r.  c                       r-  )PhiForTokenClassificationc                    s|   t  | |j| _t|| _t|dd d ur|j}nt|dd d ur'|j}nd}t	|| _
t|j|j| _|   d S )Nclassifier_dropouthidden_dropoutg?)r`   ra   r/  r   r   rb   r<  r=  rL   r   rD   rg   rc   r0  r   )ru   rZ   r<  rv   r&   r'   ra     s   
z"PhiForTokenClassification.__init__c                 C   r  r   r  r   r&   r&   r'   r     r  z.PhiForTokenClassification.get_input_embeddingsc                 C   r  r   r  r   r&   r&   r'   r     r  z.PhiForTokenClassification.set_input_embeddingsNr   rB   r.   r   r   r"  r   r   r   r5   c
              
   C   sd   | j ||||||||	d}
|
j}| |}| |}d}|dur(| ||| j}t|||
j|
jdS )r1  r2  N)r%  r  r3   r  )	r   r  rD   r0  r'  rZ   r   r3   r  )ru   r   rB   r.   r   r   r"  r   r   r   r   sequence_outputr  r%  r&   r&   r'   r     s,   


z!PhiForTokenClassification.forwardr  )r   r   r   ra   r   r   r   r   r   r!   r   r   r   r   r   r   r   r   r&   r&   rv   r'   r;    sH    	
r;  )r   r   r  r.  r;  )Nr   )r=   )>typingr   r   r   r!   torch.nnrL   activationsr   cache_utilsr   r   
generationr	   masking_utilsr
   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   configuration_phir   
get_loggerr   r  r(   r2   r   rl   r<   Moduler   rX   rY   r   r   r   r   r   r  r  r.  r;  __all__r&   r&   r&   r'   <module>   sj   


X0"{lVF