o
    eiCU                     @   sp  d dl mZ d dlmZ d dlZd dlmZ ddlmZ ddlm	Z	m
Z
 ddlmZ dd	lmZmZmZ dd
lmZmZ ddlmZ ddlmZmZmZmZ ddlmZmZ ddlmZm Z  ddl!m"Z"m#Z# ddl$m%Z% ddl&m'Z'm(Z(m)Z) ddl*m+Z+m,Z, ddl-m.Z. ddl/m0Z0 G dd dej1Z2dd Z3eddCddZ4dej5de6dej5fd d!Z7	"dDd#ej1d$ej5d%ej5d&ej5d'ej5dB d(e8d)e8d*e%e' fd+d,Z9ee4G d-d. d.ej1Z:ed/G d0d1 d1ej1Z;G d2d3 d3eZ<e(G d4d5 d5e#Z=G d6d7 d7ej1Z>e(G d8d9 d9e=Z?e(G d:d; d;e=eZ@G d<d= d=ee=ZAG d>d? d?ee=ZBG d@dA dAee=ZCg dBZDdS )E    )Callable)OptionalN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hubuse_kernel_func_from_hubuse_kernelized_func)create_causal_mask!create_sliding_window_causal_mask)FlashAttentionKwargs)GenericForQuestionAnswering GenericForSequenceClassificationGenericForTokenClassificationGradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuple)maybe_autocastmerge_with_config_defaults)capture_outputs   )MistralConfigc                       s$   e Zd Z fddZdd Z  ZS )
MistralMLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFbias)super__init__confighidden_sizeintermediate_sizer   Linear	gate_projup_proj	down_projr   
hidden_actact_fnselfr)   	__class__ j/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/mistral/modeling_mistral.pyr(   $   s   
zMistralMLP.__init__c                 C   s$   |  | | || | }|S N)r/   r1   r-   r.   )r3   xr/   r6   r6   r7   forward.   s    zMistralMLP.forward)__name__
__module____qualname__r(   r:   __classcell__r6   r6   r4   r7   r#   #   s    
r#   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..N   dim)shapetorchcat)r9   x1x2r6   r6   r7   rotate_half3   s   rH   rotary_pos_embc                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezerH   )qkcossinunsqueeze_dimq_embedk_embedr6   r6   r7   apply_rotary_pos_emb:   s
   

rR   hidden_statesn_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r!   N)rC   expandreshape)rS   rT   batchnum_key_value_headsslenhead_dimr6   r6   r7   	repeat_kvT   s
   0r\           modulequerykeyvalueattention_maskscalingdropoutkwargsc                 K   s   t || j}t || j}	t||dd| }
|d ur |
| }
tjj|
dtjd	|j
}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr@   r   r?   )rB   dtype)ptrainingr!   )r\   num_key_value_groupsrD   matmul	transposer   
functionalsoftmaxfloat32torf   rd   rh   
contiguous)r^   r_   r`   ra   rb   rc   rd   re   
key_statesvalue_statesattn_weightsattn_outputr6   r6   r7   eager_attention_forward`   s   
ru   c                       s   e Zd ZdZdedef fddZ		ddejde	ejejf d	ejdB d
e
dB dejdB dee de	ejejdB f fddZ  ZS )MistralAttentionz=Multi-headed attention from 'Attention Is All You Need' paperr)   	layer_idxc                    s   t    || _|| _t|dd p|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j dd| _tj|j|j| j dd| _tj|j|j| j dd| _tj|j| j |jdd| _d S )Nr[   g      TFr%   )r'   r(   r)   rw   getattrr*   num_attention_headsr[   rY   ri   rc   attention_dropout	is_causalr   r,   q_projk_projv_projo_projr3   r)   rw   r4   r6   r7   r(   }   s   
 zMistralAttention.__init__NrS   position_embeddingsrb   past_key_valuescache_positionre   rU   c                 K   s$  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
| jjt}|| |	|
||f| jskdn| j| jt| jdd d|\}}|jg |dR   }| |}||fS )Nr?   r!   r@   )rN   rM   r   r]   sliding_window)rd   rc   r   )rC   r[   r|   viewrk   r}   r~   rR   updaterw   r   get_interfacer)   _attn_implementationru   rh   rz   rc   rx   rW   rp   r   )r3   rS   r   rb   r   r   re   input_shapehidden_shapequery_statesrq   rr   rM   rN   cache_kwargsattention_interfacert   rs   r6   r6   r7   r:      s:   		

zMistralAttention.forward)NN)r;   r<   r=   __doc__r"   intr(   rD   Tensortupler   
LongTensorr   r   r:   r>   r6   r6   r4   r7   rv   y   s(    rv   RMSNormc                       sF   e Zd Zddeddf fddZdejdejfdd	Zd
d Z  Z	S )MistralRMSNormư>epsrU   Nc                    s&   t    tt|| _|| _dS )z=
        MistralRMSNorm is equivalent to T5LayerNorm
        N)r'   r(   r   	ParameterrD   onesweightvariance_epsilon)r3   r*   r   r4   r6   r7   r(      s   

zMistralRMSNorm.__init__rS   c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr@   r?   T)keepdim)	rf   ro   rD   rn   powmeanrsqrtr   r   )r3   rS   input_dtypevariancer6   r6   r7   r:      s
   zMistralRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)r   r   rC   r   )r3   r6   r6   r7   
extra_repr   s   zMistralRMSNorm.extra_repr)r   )
r;   r<   r=   floatr(   rD   r   r:   r   r>   r6   r6   r4   r7   r      s    r   c                       s   e Zd Zdedef fddZ						ddejdejdB d	ejdB d
e	dB de
dB dejdB deejejf dB dee dejfddZ  ZS )MistralDecoderLayerr)   rw   c                    sR   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
d S )N)r)   rw   r   )r'   r(   r*   rv   	self_attnr#   mlpr   rms_norm_epsinput_layernormpost_attention_layernormr   r4   r6   r7   r(      s   

zMistralDecoderLayer.__init__NFrS   rb   position_idsr   	use_cacher   r   re   rU   c              
   K   s^   |}	|  |}| jd|||||||d|\}}
|	| }|}	| |}| |}|	| }|S )N)rS   rb   r   r   r   r   r   r6   )r   r   r   r   )r3   rS   rb   r   r   r   r   r   re   residual_r6   r6   r7   r:      s&   




zMistralDecoderLayer.forward)NNNFNN)r;   r<   r=   r"   r   r(   rD   r   r   r   boolr   r   r   r:   r>   r6   r6   r4   r7   r      s6    	
r   c                   @   sH   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZdZeedZdS )MistralPreTrainedModelr)   modelTr   r   )rS   
attentionsN)r;   r<   r=   r"   __annotations__base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_supports_flex_attn_can_compile_fullgraph_supports_attention_backendr   rv   _can_record_outputsr6   r6   r6   r7   r      s   
 
r   c                       s~   e Zd ZU ejed< ddef fddZe			ddedB de	d de
dB d	ed
ef fddZe edd Z  ZS )MistralRotaryEmbeddinginv_freqNr)   c                    s   t    |j| _|j| _|| _| jjd | _| j}| jdkr$t	| j }|| j|\}| _
| jd|dd | jd| dd d S )N	rope_typedefaultr   F)
persistentoriginal_inv_freq)r'   r(   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr)   rope_parametersr   compute_default_rope_parametersr   attention_scalingregister_bufferclone)r3   r)   devicerope_init_fnr   r4   r6   r7   r(     s   


zMistralRotaryEmbedding.__init__r   ztorch.deviceseq_lenrU   ztorch.Tensorc                 C   sZ   | j d }t| ddp| j| j }d}d|tjd|dtjdj|tjd|   }||fS )	a  
        Computes the inverse frequencies according to the original RoPE implementation
        Args:
            config ([`~transformers.PreTrainedConfig`]):
                The model configuration.
            device (`torch.device`):
                The device to use for initialization of the inverse frequencies.
            seq_len (`int`, *optional*):
                The current sequence length. Unused for this type of RoPE.
        Returns:
            Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
            post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
        
rope_thetar[   Ng      ?r   r@   rf   )r   rf   )	r   rx   r*   ry   rD   arangeint64ro   r   )r)   r   r   baserB   attention_factorr   r6   r6   r7   r     s   
&z6MistralRotaryEmbedding.compute_default_rope_parametersc           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	|dd+ | |  
dd}tj||fdd	}| | j }| | j }	W d    n1 slw   Y  |j|jd
|	j|jd
fS )Nr   r?   r!   mpscpuF)device_typeenabledr@   rA   r   )r   r   rV   rC   ro   r   
isinstancetypestrr   rk   rD   rE   rM   r   rN   rf   )
r3   r9   r   inv_freq_expandedposition_ids_expandedr   freqsembrM   rN   r6   r6   r7   r:   <  s   0&zMistralRotaryEmbedding.forwardr8   )NNN)r;   r<   r=   rD   r   r   r"   r(   staticmethodr   r   r   r   r   no_gradr   r:   r>   r6   r6   r4   r7   r     s&   
 

r   c                       s   e Zd Zdef fddZeee							ddej	dB dej
dB dej	dB dedB d	ejdB d
edB dej	dB dee defddZ  ZS )MistralModelr)   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r6   )r   ).0rw   r)   r6   r7   
<listcomp>U  s    z)MistralModel.__init__.<locals>.<listcomp>r   r   F)r'   r(   pad_token_idpadding_idx
vocab_sizer   	Embeddingr*   embed_tokens
ModuleListrangenum_hidden_layerslayersr   r   normr   
rotary_embgradient_checkpointing	post_initr2   r4   r   r7   r(   N  s   zMistralModel.__init__N	input_idsrb   r   r   inputs_embedsr   r   re   rU   c              
   K   s   |d u |d uA rt d|d u r| |}|r!|d u r!t| jd}|d u r=|d ur-| nd}	tj|	|	|jd  |jd}|d u rF|	d}| jj
d u rNtnt}
|
| j|||||d}|}| j||d}| jd | jj D ]}||f||||||d|}qm| |}t||r|d	S d d	S )
Nz:You must specify exactly one of input_ids or inputs_embedsr   r   r!   )r   )r)   r   rb   r   r   r   )r   )rb   r   r   r   r   r   )last_hidden_stater   )
ValueErrorr   r   r)   get_seq_lengthrD   r   rC   r   rJ   r   r   r   r   r   r   r   r   )r3   r   rb   r   r   r   r   r   re   past_seen_tokensmask_functioncausal_maskrS   r   decoder_layerr6   r6   r7   r:   ^  sX   

	

zMistralModel.forward)NNNNNNN)r;   r<   r=   r"   r(   r   r    r   rD   r   r   r   FloatTensorr   r   r   r   r:   r>   r6   r6   r4   r7   r   L  s>    	
r   c                       s   e Zd ZddiZddiZddgdgfiZ fddZee																	
dde	j
d	B de	jd	B de	j
d	B ded	B de	jd	B de	j
d	B ded	B de	j
d	B dee	jB dee defddZ  ZS )MistralForCausalLMzlm_head.weightzmodel.embed_tokens.weightlm_headcolwise_gather_outputrS   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S r$   )
r'   r(   r   r   r   r   r,   r*   r   r   r2   r4   r6   r7   r(     s
   
zMistralForCausalLM.__init__Nr   r   rb   r   r   r   labelsr   r   logits_to_keepre   rU   c
              
   K   s   | j d|||||||d|
}|j}t|	trt|	 dn|	}| |dd|ddf }d}|durB| jd||| jjd|
}t	|||j
|j|jdS )a  
        Example:

        ```python
        >>> from transformers import AutoTokenizer, MistralForCausalLM

        >>> model = MistralForCausalLM.from_pretrained("meta-mistral/Mistral-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-mistral/Mistral-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```)r   rb   r   r   r   r   r   N)r   r   r   )lossr   r   rS   r   r6   )r   r   r   r   slicer   loss_functionr)   r   r   r   rS   r   )r3   r   rb   r   r   r   r   r   r   r  re   outputsrS   slice_indicesr   r  r6   r6   r7   r:     s0    zMistralForCausalLM.forward)	NNNNNNNNr   )r;   r<   r=   _tied_weights_keys_tp_plan_pp_planr(   r   r   rD   r   r   r   r   r   r   r   r   r   r:   r>   r6   r6   r4   r7   r     sN    		
r   c                   @      e Zd ZdS )MistralForTokenClassificationNr;   r<   r=   r6   r6   r6   r7   r        r  c                   @   r
  ) MistralForSequenceClassificationNr  r6   r6   r6   r7   r    r  r  c                   @   r
  )MistralForQuestionAnsweringNr  r6   r6   r6   r7   r    s    r  )r   r  r   r   r  r  )r!   )r]   )Ecollections.abcr   typingr   rD   r   activationsr   cache_utilsr   r   
generationr	   integrationsr
   r   r   masking_utilsr   r   modeling_flash_attention_utilsr   modeling_layersr   r   r   r   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   utils.genericr   r   utils.output_capturingr    configuration_mistralr"   Moduler#   rH   rR   r   r   r\   r   ru   rv   r   r   r   r   r   r   r  r  r  __all__r6   r6   r6   r7   <module>   st   
>+APK