o
    eiW                     @   s>  d dl mZ d dlmZ d dlZd dlmZ ddlmZ ddlm	Z	m
Z
 ddlmZ dd	lmZmZmZ dd
lmZmZ ddlmZ ddlmZ ddlmZmZ ddlmZmZ ddlmZm Z  ddl!m"Z" ddl#m$Z$m%Z%m&Z& ddl'm(Z(m)Z) ddl*m+Z+ ddl,m-Z- G dd dej.Z/dd Z0edd?ddZ1dej2de3dej2fd d!Z4	"d@d#ej.d$ej2d%ej2d&ej2d'ej2dB d(e5d)e5d*e"e$ fd+d,Z6ee1G d-d. d.ej.Z7ed/G d0d1 d1ej.Z8G d2d3 d3ej.Z9G d4d5 d5eZ:e%G d6d7 d7e Z;G d8d9 d9eZ<e%G d:d; d;e;Z=e%G d<d= d=e;eZ>g d>Z?dS )A    )Callable)OptionalN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hubuse_kernel_func_from_hubuse_kernelized_func)create_causal_mask!create_sliding_window_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuple)maybe_autocastmerge_with_config_defaults)capture_outputs   )	CwmConfigc                       s~   e Zd ZU ejed< ddef fddZe			ddedB de	d de
dB d	ed
ef fddZe edd Z  ZS )CwmRotaryEmbeddinginv_freqNconfigc                    s   t    |j| _|j| _|| _| jjd | _| j}| jdkr$t	| j }|| j|\}| _
| jd|dd | jd| dd d S )N	rope_typedefaultr!   F)
persistentoriginal_inv_freq)super__init__max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr"   rope_parametersr#   compute_default_rope_parametersr   attention_scalingregister_bufferclone)selfr"   devicerope_init_fnr!   	__class__ b/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/cwm/modeling_cwm.pyr(   0   s   


zCwmRotaryEmbedding.__init__r2   ztorch.deviceseq_lenreturnztorch.Tensorc                 C   sZ   | j d }t| ddp| j| j }d}d|tjd|dtjdj|tjd|   }||fS )	a  
        Computes the inverse frequencies according to the original RoPE implementation
        Args:
            config ([`~transformers.PreTrainedConfig`]):
                The model configuration.
            device (`torch.device`):
                The device to use for initialization of the inverse frequencies.
            seq_len (`int`, *optional*):
                The current sequence length. Unused for this type of RoPE.
        Returns:
            Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
            post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
        
rope_thetahead_dimNg      ?r      dtype)r2   r>   )	r,   getattrhidden_sizenum_attention_headstorcharangeint64tofloat)r"   r2   r8   basedimattention_factorr!   r6   r6   r7   r-   @   s   
&z2CwmRotaryEmbedding.compute_default_rope_parametersc           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	|dd+ | |  
dd}tj||fdd	}| | j }| | j }	W d    n1 slw   Y  |j|jd
|	j|jd
fS )Nr   r   mpscpuF)device_typeenabledr<   rH   r=   )r!   rF   expandshaperE   r2   
isinstancetypestrr   	transposerB   catcosr.   sinr>   )
r1   xposition_idsinv_freq_expandedposition_ids_expandedrM   freqsembrW   rX   r6   r6   r7   forward^   s   0&zCwmRotaryEmbedding.forwardN)NNN)__name__
__module____qualname__rB   Tensor__annotations__r   r(   staticmethodr   inttuplerF   r-   no_gradr   r_   __classcell__r6   r6   r4   r7   r    -   s&   
 

r    c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..NrJ   r<   rO   )rQ   rB   rV   )rY   x1x2r6   r6   r7   rotate_halfn   s   rm   rotary_pos_embc                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezerm   )qkrW   rX   unsqueeze_dimq_embedk_embedr6   r6   r7   apply_rotary_pos_embu   s
   

ru   hidden_statesn_repr9   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)rQ   rP   reshape)rv   rw   batchnum_key_value_headsslenr;   r6   r6   r7   	repeat_kv   s
   0r|           modulequerykeyvalueattention_maskscalingdropoutkwargsc                 K   s   t || j}t || j}	t||dd| }
|d ur |
| }
tjj|
dtjd	|j
}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr<   r   rJ   )rH   r>   )ptrainingr   )r|   num_key_value_groupsrB   matmulrU   r   
functionalsoftmaxfloat32rE   r>   r   r   
contiguous)r~   r   r   r   r   r   r   r   
key_statesvalue_statesattn_weightsattn_outputr6   r6   r7   eager_attention_forward   s   
r   c                       s   e Zd ZdZdedef fddZ		ddejde	ejejf d	ejdB d
e
dB dejdB dee de	ejejdB f fddZ  ZS )CwmAttentionz=Multi-headed attention from 'Attention Is All You Need' paperr"   	layer_idxc                    s  t    t|dr|j| nd | _|| _|| _t|d|j|j	 | _
|j	|j | _| j
d | _|j| _d| _tjj|j|j	| j
 dd| _tjj|j|j| j
 dd| _tjj|j|j| j
 dd| _tj|j	| j
 |jdd| _| jdkr}|j| _d S d | _d S )Nlayer_typesr;   g      TFbiassliding_attention)r'   r(   hasattrr   
layer_typer"   r   r?   r@   rA   r;   rz   r   r   attention_dropout	is_causalrB   r   Linearq_projk_projv_projo_projsliding_windowr1   r"   r   r4   r6   r7   r(      s   
 zCwmAttention.__init__Nrv   position_embeddingsr   past_key_valuescache_positionr   r9   c                 K   s  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
| jjt}|| |	|
||f| jskdn| j| j| jd|\}}|jg |dR   }| |}||fS )NrJ   r   r<   )rX   rW   r   r}   )r   r   r   )rQ   r;   r   viewrU   r   r   ru   updater   r   get_interfacer"   _attn_implementationr   r   r   r   r   rx   r   r   )r1   rv   r   r   r   r   r   input_shapehidden_shapequery_statesr   r   rW   rX   cache_kwargsattention_interfacer   r   r6   r6   r7   r_      s:   		

zCwmAttention.forward)NN)ra   rb   rc   __doc__r   rg   r(   rB   rd   rh   r   
LongTensorr   r   r_   rj   r6   r6   r4   r7   r      s(    r   RMSNormc                       sF   e Zd Zddeddf fddZdejdejfdd	Zd
d Z  Z	S )
CwmRMSNormư>epsr9   Nc                    s&   t    tt|| _|| _dS )z9
        CwmRMSNorm is equivalent to T5LayerNorm
        N)r'   r(   r   	ParameterrB   onesweightvariance_epsilon)r1   r@   r   r4   r6   r7   r(      s   

zCwmRMSNorm.__init__rv   c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr<   rJ   T)keepdim)	r>   rE   rB   r   powmeanrsqrtr   r   )r1   rv   input_dtypevariancer6   r6   r7   r_      s
   zCwmRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)rh   r   rQ   r   )r1   r6   r6   r7   
extra_repr  s   zCwmRMSNorm.extra_repr)r   )
ra   rb   rc   rF   r(   rB   rd   r_   r   rj   r6   r6   r4   r7   r      s    r   c                       s$   e Zd Z fddZdd Z  ZS )CwmMLPc                    sx   t    || _|j| _|j| _tj| j| j|jd| _tj| j| j|jd| _	tj| j| j|jd| _
t|j | _d S )Nr   )r'   r(   r"   r@   intermediate_sizer   r   mlp_bias	gate_projup_proj	down_projr   
hidden_actact_fnr1   r"   r4   r6   r7   r(     s   
zCwmMLP.__init__c                 C   s$   |  | | || | }|S r`   )r   r   r   r   )r1   rY   r   r6   r6   r7   r_     s    zCwmMLP.forward)ra   rb   rc   r(   r_   rj   r6   r6   r4   r7   r   
  s    
r   c                       s   e Zd Zdedef fddZ						ddejdejdB d	ejdB d
e	dB de
dB dejdB deejejf dB dee dejfddZ  ZS )CwmDecoderLayerr"   r   c                    s^   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
|j| | _d S )N)r"   r   r   )r'   r(   r@   r   	self_attnr   mlpr   rms_norm_epsinput_layernormpost_attention_layernormr   attention_typer   r4   r6   r7   r(     s   

zCwmDecoderLayer.__init__NFrv   r   rZ   r   	use_cacher   r   r   r9   c              
   K   s^   |}	|  |}| jd|||||||d|\}}
|	| }|}	| |}| |}|	| }|S )N)rv   r   rZ   r   r   r   r   r6   )r   r   r   r   )r1   rv   r   rZ   r   r   r   r   r   residual_r6   r6   r7   r_   %  s&   




zCwmDecoderLayer.forward)NNNFNN)ra   rb   rc   r   rg   r(   rB   rd   r   r   boolrh   r   r   r_   rj   r6   r6   r4   r7   r     s6    	
r   c                   @   sH   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZdZeedZdS )CwmPreTrainedModelr"   modelTr   r   )rv   
attentionsN)ra   rb   rc   r   re   base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_supports_flex_attn_can_compile_fullgraph_supports_attention_backendr   r   _can_record_outputsr6   r6   r6   r7   r   G  s   
 
r   c                   @   s   e Zd ZdS )CwmModelOutputWithPastN)ra   rb   rc   r6   r6   r6   r7   r   Z  s    r   c                       s   e Zd ZeZdef fddZeee							dde	j
dB de	jdB de	j
dB dedB d	e	jdB d
e	j
dB dedB dee defddZ  ZS )CwmModelr"   c                    s   t     j| _ j| _t j j| j| _t	j
 fddt jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r6   )r   ).0r   r"   r6   r7   
<listcomp>i  s    z%CwmModel.__init__.<locals>.<listcomp>r   r   F)r'   r(   pad_token_idpadding_idx
vocab_sizer   	Embeddingr@   embed_tokensrB   
ModuleListrangenum_hidden_layerslayersr   r   normr    
rotary_embgradient_checkpointing	post_initr   r4   r   r7   r(   b  s   zCwmModel.__init__N	input_idsr   rZ   r   inputs_embedsr   r   r   r9   c              	   K   s2  |d u |d uA rt d|d u r| |}|r!|d u r!t| jd}|d u r<|d ur-| nd}	tj|jd |jd|	 }|d u rE|	d}t
| }
tsi| j|||||d}| }td
i |td
i |d}
|}| ||}| jd | jj D ]}||f|
|j ||||d|}qz| |}t||d	S )Nz:You must specify exactly one of input_ids or inputs_embedsr   r   r   )r2   )r"   r   r   r   r   rZ   )full_attentionr   )r   rZ   r   r   r   )last_hidden_stater   r6   )
ValueErrorr   r   r"   get_seq_lengthrB   rC   rQ   r2   ro   rR   dictcopyr   r   r   r   r   r   r   r   )r1   r   r   rZ   r   r   r   r   r   past_seen_tokenscausal_mask_mappingmask_kwargssliding_mask_kwargsrv   r   decoder_layerr6   r6   r7   r_   r  sV   



zCwmModel.forward)NNNNNNN)ra   rb   rc   r   config_classr(   r   r   r   rB   r   rd   r   FloatTensorr   r   r   r   r_   rj   r6   r6   r4   r7   r   ^  s@    	
r   c                       s   e Zd ZddiZddiZddgdgfiZ fddZee																	
dde	j
d	B de	jd	B de	j
d	B ded	B de	jd	B de	j
d	B ded	B de	j
d	B dee	jB dee defddZ  ZS )CwmForCausalLMzlm_head.weightzmodel.embed_tokens.weightlm_headcolwise_gather_outputrv   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S )NFr   )
r'   r(   r   r   r   r   r   r@   r  r   r   r4   r6   r7   r(     s
   
zCwmForCausalLM.__init__Nr   r   r   rZ   r   r   labelsr   r   logits_to_keepr   r9   c
              
   K   s   | j d|||||||d|
}|j}t|	trt|	 dn|	}| |dd|ddf }d}|durB| jd||| jjd|
}t	|||j
|j|jdS )a  
        Example:

        ```python
        >>> from transformers import AutoTokenizer, CwmForCausalLM

        >>> model = CwmForCausalLM.from_pretrained("meta-cwm/Cwm-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-cwm/Cwm-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```)r   r   rZ   r   r   r   r   N)r  r  r   )lossr  r   rv   r   r6   )r   r   rR   rg   slicer  loss_functionr"   r   r   r   rv   r   )r1   r   r   rZ   r   r   r  r   r   r	  r   outputsrv   slice_indicesr  r
  r6   r6   r7   r_     s0    zCwmForCausalLM.forward)	NNNNNNNNr   )ra   rb   rc   _tied_weights_keys_tp_plan_pp_planr(   r   r   rB   r   rd   r   r  r   rg   r   r   r   r_   rj   r6   r6   r4   r7   r    sN    		
r  )r   r   r  )r   )r}   )@collections.abcr   typingr   rB   r   activationsr   cache_utilsr   r   
generationr	   integrationsr
   r   r   masking_utilsr   r   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   utils.genericr   r   utils.output_capturingr   configuration_cwmr   Moduler    rm   ru   rd   rg   r|   rF   r   r   r   r   r   r   r   r   r  __all__r6   r6   r6   r7   <module>   sp   A
@-XK