o
    eiMU                     @   sn  d dl mZ d dlmZ d dlZd dlmZ ddlmZ ddlm	Z	m
Z
 ddlmZ dd	lmZmZmZ dd
lmZ ddlmZmZmZmZ ddlmZmZ ddlmZmZ ddlmZm Z  ddl!m"Z" ddl#m$Z$m%Z%m&Z&m'Z' ddl(m)Z)m*Z* ddl+m,Z, ddl-m.Z. e'/e0Z1edG dd dej2Z3G dd dej2Z4dd Z5eddBddZ6G dd  d ej2Z7d!ej8d"e9d#ej8fd$d%Z:	&dCd'ej2d(ej8d)ej8d*ej8d+ej8dB d,e;d-e;d.e"e$ fd/d0Z<ee6G d1d2 d2ej2Z=G d3d4 d4eZ>e%G d5d6 d6e Z?e%G d7d8 d8e?Z@e%G d9d: d:e?eZAG d;d< d<ee?ZBG d=d> d>ee?ZCG d?d@ d@ee?ZDg dAZEdS )D    )Callable)OptionalN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hubuse_kernel_func_from_hubuse_kernelized_func)create_causal_mask)GenericForQuestionAnswering GenericForSequenceClassificationGenericForTokenClassificationGradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuplelogging)maybe_autocastmerge_with_config_defaults)capture_outputs   )LlamaConfigRMSNormc                       sF   e Zd Zddeddf fddZdejdejfdd	Zd
d Z  Z	S )LlamaRMSNormư>epsreturnNc                    s&   t    tt|| _|| _dS )z;
        LlamaRMSNorm is equivalent to T5LayerNorm
        N)super__init__r   	Parametertorchonesweightvariance_epsilon)selfhidden_sizer%   	__class__ f/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.pyr(   6   s   

zLlamaRMSNorm.__init__hidden_statesc                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )N   T)keepdim)	dtypetor*   float32powmeanrsqrtr-   r,   )r.   r4   input_dtypevariancer2   r2   r3   forward>   s
   zLlamaRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tupler,   shaper-   )r.   r2   r2   r3   
extra_reprE   s   zLlamaRMSNorm.extra_repr)r$   )
__name__
__module____qualname__floatr(   r*   Tensorr@   rC   __classcell__r2   r2   r0   r3   r#   4   s    r#   c                       s~   e Zd ZU ejed< ddef fddZe			ddedB de	d de
dB d	ed
ef fddZe edd Z  ZS )LlamaRotaryEmbeddinginv_freqNconfigc                    s   t    |j| _|j| _|| _| jjd | _| j}| jdkr$t	| j }|| j|\}| _
| jd|dd | jd| dd d S )N	rope_typedefaultrK   F)
persistentoriginal_inv_freq)r'   r(   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenrL   rope_parametersrM   compute_default_rope_parametersr   attention_scalingregister_bufferclone)r.   rL   devicerope_init_fnrK   r0   r2   r3   r(   L   s   


zLlamaRotaryEmbedding.__init__rY   ztorch.deviceseq_lenr&   ztorch.Tensorc                 C   sZ   | j d }t| ddp| j| j }d}d|tjd|dtjdj|tjd|   }||fS )	a  
        Computes the inverse frequencies according to the original RoPE implementation
        Args:
            config ([`~transformers.PreTrainedConfig`]):
                The model configuration.
            device (`torch.device`):
                The device to use for initialization of the inverse frequencies.
            seq_len (`int`, *optional*):
                The current sequence length. Unused for this type of RoPE.
        Returns:
            Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
            post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
        
rope_thetahead_dimNg      ?r   r5   r8   )rY   r8   )	rT   getattrr/   num_attention_headsr*   arangeint64r9   rG   )rL   rY   r[   basedimattention_factorrK   r2   r2   r3   rU   \   s   
&z4LlamaRotaryEmbedding.compute_default_rope_parametersc           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	|dd+ | |  
dd}tj||fdd	}| | j }| | j }	W d    n1 slw   Y  |j|jd
|	j|jd
fS )Nr   r6   r    mpscpuF)device_typeenabledr5   rd   r^   )rK   rG   expandrB   r9   rY   
isinstancetypestrr   	transposer*   catcosrV   sinr8   )
r.   xposition_idsinv_freq_expandedposition_ids_expandedrh   freqsembrq   rr   r2   r2   r3   r@   z   s   0&zLlamaRotaryEmbedding.forwardN)NNN)rD   rE   rF   r*   rH   __annotations__r!   r(   staticmethodr   intrA   rG   rU   no_gradr   r@   rI   r2   r2   r0   r3   rJ   I   s&   
 

rJ   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..Nr6   r5   rj   )rB   r*   rp   )rs   x1x2r2   r2   r3   rotate_half   s   r   rotary_pos_embc                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer   )qkrq   rr   unsqueeze_dimq_embedk_embedr2   r2   r3   apply_rotary_pos_emb   s
   

r   c                       s$   e Zd Z fddZdd Z  ZS )LlamaMLPc                    sx   t    || _|j| _|j| _tj| j| j|jd| _tj| j| j|jd| _	tj| j| j|jd| _
t|j | _d S )Nbias)r'   r(   rL   r/   intermediate_sizer   Linearmlp_bias	gate_projup_proj	down_projr   
hidden_actact_fnr.   rL   r0   r2   r3   r(      s   
zLlamaMLP.__init__c                 C   s$   |  | | || | }|S ry   )r   r   r   r   )r.   rs   r   r2   r2   r3   r@      s    zLlamaMLP.forward)rD   rE   rF   r(   r@   rI   r2   r2   r0   r3   r      s    
r   r4   n_repr&   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r    N)rB   rk   reshape)r4   r   batchnum_key_value_headsslenr]   r2   r2   r3   	repeat_kv   s
   0r           modulequerykeyvalueattention_maskscalingdropoutkwargsc                 K   s   t || j}t || j}	t||dd| }
|d ur |
| }
tjj|
dtjd	|j
}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr5   r   r6   )rd   r8   )ptrainingr    )r   num_key_value_groupsr*   matmulro   r   
functionalsoftmaxr:   r9   r8   r   r   
contiguous)r   r   r   r   r   r   r   r   
key_statesvalue_statesattn_weightsattn_outputr2   r2   r3   eager_attention_forward   s   
r   c                       s   e Zd ZdZdedef fddZ				ddejde	ejejf dB d	ejdB d
e
dB dejdB dee de	ejejf fddZ  ZS )LlamaAttentionz=Multi-headed attention from 'Attention Is All You Need' paperrL   	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _d S )Nr]   g      Tr   )r'   r(   rL   r   r_   r/   r`   r]   r   r   r   attention_dropout	is_causalr   r   attention_biasq_projk_projv_projo_projr.   rL   r   r0   r2   r3   r(      s(   
zLlamaAttention.__init__Nr4   position_embeddingsr   past_key_valuescache_positionr   r&   c                 K   s  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
| jjt}|| |	|
||f| jskdn| j| jd|\}}|jg |dR   }| |}||fS )Nr6   r    r5   )rr   rq   r   r   )r   r   )rB   r]   r   viewro   r   r   r   updater   r   get_interfacerL   _attn_implementationr   r   r   r   r   r   r   )r.   r4   r   r   r   r   r   input_shapehidden_shapequery_statesr   r   rq   rr   cache_kwargsattention_interfacer   r   r2   r2   r3   r@      s8   	

zLlamaAttention.forward)NNNN)rD   rE   rF   __doc__r!   r|   r(   r*   rH   rA   r   
LongTensorr   r   r@   rI   r2   r2   r0   r3   r      s,    r   c                       s   e Zd Zdedef fddZ						ddejdejdB d	ejdB d
e	dB de
dB dejdB deejejf dB dee dejfddZ  ZS )LlamaDecoderLayerrL   r   c                    sR   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
d S )N)rL   r   r%   )r'   r(   r/   r   	self_attnr   mlpr#   rms_norm_epsinput_layernormpost_attention_layernormr   r0   r2   r3   r(   (  s   

zLlamaDecoderLayer.__init__NFr4   r   rt   r   	use_cacher   r   r   r&   c              
   K   s^   |}	|  |}| jd|||||||d|\}}
|	| }|}	| |}| |}|	| }|S )N)r4   r   rt   r   r   r   r   r2   )r   r   r   r   )r.   r4   r   rt   r   r   r   r   r   residual_r2   r2   r3   r@   2  s&   




zLlamaDecoderLayer.forward)NNNFNN)rD   rE   rF   r!   r|   r(   r*   rH   r   r   boolrA   r   r   r@   rI   r2   r2   r0   r3   r   '  s6    	
r   c                   @   sH   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZdZeedZdS )LlamaPreTrainedModelrL   modelTr   r   )r4   
attentionsN)rD   rE   rF   r!   rz   base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_supports_flex_attn_can_compile_fullgraph_supports_attention_backendr   r   _can_record_outputsr2   r2   r2   r3   r   T  s   
 
r   c                       s   e Zd Zdef fddZeee							ddej	dB dej
dB dej	dB dedB d	ejdB d
ej	dB dedB dee defddZ  ZS )
LlamaModelrL   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r2   )r   ).0r   rL   r2   r3   
<listcomp>p  s    z'LlamaModel.__init__.<locals>.<listcomp>r   r   F)r'   r(   pad_token_idpadding_idx
vocab_sizer   	Embeddingr/   embed_tokens
ModuleListrangenum_hidden_layerslayersr#   r   normrJ   
rotary_embgradient_checkpointing	post_initr   r0   r   r3   r(   i  s   zLlamaModel.__init__N	input_idsr   rt   r   inputs_embedsr   r   r   r&   c              
   K   s   |d u |d uA rt d|d u r| |}|r!|d u r!t| jd}|d u r<|d ur-| nd}	tj|jd |jd|	 }|d u rE|	d}t
| j|||||d}
|}| j||d}| jd | jj D ]}||f|
|||||d|}qb| |}t||d	S )
Nz:You must specify exactly one of input_ids or inputs_embedsr   r   r    )rY   )rL   r   r   r   r   rt   )rt   )r   r   rt   r   r   r   )last_hidden_stater   )
ValueErrorr   r   rL   get_seq_lengthr*   ra   rB   rY   r   r   r   r   r   r   r   )r.   r   r   rt   r   r   r   r   r   past_seen_tokenscausal_maskr4   r   decoder_layerr2   r2   r3   r@   y  sP   

	
zLlamaModel.forward)NNNNNNN)rD   rE   rF   r!   r(   r   r   r   r*   r   rH   r   FloatTensorr   r   r   r   r@   rI   r2   r2   r0   r3   r   g  s>    	
r   c                       s   e Zd ZddiZddiZddgdgfiZ fddZee																	
dde	j
d	B de	jd	B de	j
d	B ded	B de	jd	B de	j
d	B ded	B de	j
d	B dee	jB dee defddZ  ZS )LlamaForCausalLMzlm_head.weightzmodel.embed_tokens.weightlm_headcolwise_gather_outputr4   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S )NFr   )
r'   r(   r   r   r   r   r   r/   r   r   r   r0   r2   r3   r(     s
   
zLlamaForCausalLM.__init__Nr   r   r   rt   r   r   labelsr   r   logits_to_keepr   r&   c
              
   K   s   | j d|||||||d|
}|j}t|	trt|	 dn|	}| |dd|ddf }d}|durB| jd||| jjd|
}t	|||j
|j|jdS )a  
        Example:

        ```python
        >>> from transformers import AutoTokenizer, LlamaForCausalLM

        >>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```)r   r   rt   r   r   r   r   N)r   r   r   )lossr   r   r4   r   r2   )r   r   rl   r|   slicer   loss_functionrL   r   r   r   r4   r   )r.   r   r   rt   r   r   r   r   r   r   r   outputsr4   slice_indicesr   r   r2   r2   r3   r@     s0    zLlamaForCausalLM.forward)	NNNNNNNNr   )rD   rE   rF   _tied_weights_keys_tp_plan_pp_planr(   r   r   r*   r   rH   r   r   r   r|   r   r   r   r@   rI   r2   r2   r0   r3   r     sN    		
r   c                   @      e Zd ZdS )LlamaForSequenceClassificationNrD   rE   rF   r2   r2   r2   r3   r	        r	  c                   @   s   e Zd ZdZdS )LlamaForQuestionAnsweringtransformerN)rD   rE   rF   r   r2   r2   r2   r3   r    s    r  c                   @   r  )LlamaForTokenClassificationNr
  r2   r2   r2   r3   r    r  r  )r   r   r   r	  r  r  )r    )r   )Fcollections.abcr   typingr   r*   r   activationsr   cache_utilsr   r   
generationr	   integrationsr
   r   r   masking_utilsr   modeling_layersr   r   r   r   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   utils.genericr   r   utils.output_capturingr   configuration_llamar!   
get_loggerrD   loggerModuler#   rJ   r   r   r   rH   r|   r   rG   r   r   r   r   r   r   r	  r  r  __all__r2   r2   r2   r3   <module>   st   
A
F-PK