o
    ei[                     @   sJ  d dl mZ d dlmZ d dlZd dlmZ ddlmZ ddl	m
Z
mZ ddlmZ ddlmZmZ dd	lmZ dd
lmZ ddlmZmZmZ ddlmZmZ ddlmZmZ ddlm Z m!Z! ddl"m#Z# ddl$m%Z%m&Z&m'Z' ddl(m)Z)m*Z* ddl+m,Z, ddl-m.Z. G dd dej/Z0G dd deZ1dej2de3dej2fddZ4	d?dej/d ej2d!ej2d"ej2d#ej2dB d$e5d%e5d&e#e% fd'd(Z6d)d* Z7d@d+d,Z8ee8G d-d. d.ej/Z9G d/d0 d0ej/Z:ed1G d2d3 d3ej/Z;e&G d4d5 d5e!Z<e&G d6d7 d7e<Z=e&G d8d9 d9e<eZ>G d:d; d;ee<Z?G d<d= d=ee<Z@g d>ZAdS )A    )Callable)OptionalN   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hubuse_kernelized_func)create_causal_mask)FlashAttentionKwargs) GenericForSequenceClassificationGenericForTokenClassificationGradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuple)maybe_autocastmerge_with_config_defaults)capture_outputs   )
Glm4Configc                       s2   e Zd Z fddZdejdejfddZ  ZS )Glm4MLPc                    sP   t    || _tj|jd|j dd| _tj|j|jdd| _t	|j
 | _d S )N   Fbias)super__init__confignnLinearhidden_sizeintermediate_sizegate_up_proj	down_projr   
hidden_actactivation_fnselfr%   	__class__ d/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/glm4/modeling_glm4.pyr$   2   s
   
zGlm4MLP.__init__hidden_statesreturnc                 C   s4   |  |}|jddd\}}|| | }| |S )Nr    dim)r*   chunkr-   r+   )r/   r4   	up_statesgater2   r2   r3   forward:   s   

zGlm4MLP.forward)__name__
__module____qualname__r$   torchFloatTensorr<   __classcell__r2   r2   r0   r3   r   1   s    r   c                       s   e Zd Zdedef fddZ						ddejdejdB d	ejdB d
e	dB de
dB dejdB deejejf dB dee deejeejejf dB f fddZ  ZS )Glm4DecoderLayerr%   	layer_idxc                    sv   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
t|j|jd| _t|j|jd| _d S )N)r%   rD   eps)r#   r$   r(   Glm4Attention	self_attnr   mlpGlm4RMSNormrms_norm_epsinput_layernormpost_attention_layernormpost_self_attn_layernormpost_mlp_layernormr/   r%   rD   r0   r2   r3   r$   D   s   

zGlm4DecoderLayer.__init__NFr4   attention_maskposition_idspast_key_values	use_cachecache_positionposition_embeddingskwargsr5   c              
   K   sr   |}	|  |}| jd|||||||d|\}}
| |}|	| }|}	| |}| |}| |}|	| }|S )N)r4   rQ   rR   rS   rT   rU   rV   r2   )rL   rH   rN   rM   rI   rO   )r/   r4   rQ   rR   rS   rT   rU   rV   rW   residual_r2   r2   r3   r<   O   s*   





zGlm4DecoderLayer.forward)NNNFNN)r=   r>   r?   r   intr$   r@   Tensor
LongTensorr   booltupler   r   rA   r<   rB   r2   r2   r0   r3   rC   C   s6    	
rC   r4   n_repr5   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)shapeexpandreshape)r4   r_   batchnum_key_value_headsslenhead_dimr2   r2   r3   	repeat_kvs   s
   0rg           modulequerykeyvaluerQ   scalingdropoutrW   c                 K   s   t || j}t || j}	t||dd| }
|d ur |
| }
tjj|
dtjd	|j
}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr    r   r6   )r8   dtype)ptrainingr   )rg   num_key_value_groupsr@   matmul	transposer&   
functionalsoftmaxfloat32toro   rn   rq   
contiguous)ri   rj   rk   rl   rQ   rm   rn   rW   
key_statesvalue_statesattn_weightsattn_outputr2   r2   r3   eager_attention_forward   s   
r~   c                 C   s>   | ddddf }| ddddf }t j| |fdddS )	z*Rotates half the hidden dims of the input..r   Nr    r   r6   r7   )r@   stackflatten)xx1x2r2   r2   r3   rotate_half   s   r   c                 C   s   | |}| |}|dd|jd d f jddd}|dd|jd d f jddd}|jd }| dd|f | d|df }}|dd|f |d|df }}	|| t||  }
|| t||  }tj|
|gdd}
tj||	gdd}|
|fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    .Nr6   r    r7   )	unsqueezer`   repeat_interleaver   r@   cat)qkcossinunsqueeze_dim
rotary_dimq_rotq_passk_rotk_passq_embedk_embedr2   r2   r3   apply_rotary_pos_emb   s   

$$
""r   c                       s   e Zd ZdZddededB f fddZ				ddejde	ejejf dB d	ejdB d
e
dB dejdB dee de	ejejf fddZ  ZS )rG   z=Multi-headed attention from 'Attention Is All You Need' paperNr%   rD   c                    s   t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |jdd| _d S )Nrf   g      Tr!   F)r#   r$   r%   rD   getattrr(   num_attention_headsrf   rd   rr   rm   attention_dropout	is_causalr&   r'   attention_biasq_projk_projv_projo_projrP   r0   r2   r3   r$      s$   
 zGlm4Attention.__init__r4   rV   rQ   rS   rU   rW   r5   c                 K   s  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
| jjt}|| |	|
||f| jskdn| j| jd|\}}|jg |dR   }| |}||fS )Nr6   r   r    )r   r   rU   rh   )rn   rm   )r`   rf   r   viewrt   r   r   r   updaterD   r   get_interfacer%   _attn_implementationr~   rq   r   rm   rb   ry   r   )r/   r4   rV   rQ   rS   rU   rW   input_shapehidden_shapequery_statesrz   r{   r   r   cache_kwargsattention_interfacer}   r|   r2   r2   r3   r<      s8   	

zGlm4Attention.forwardN)NNNN)r=   r>   r?   __doc__r   rZ   r$   r@   r[   r^   r   r\   r   r   r<   rB   r2   r2   r0   r3   rG      s,    rG   c                       s~   e Zd ZU ejed< ddef fddZe			ddedB de	d de
dB d	ed
ef fddZe edd Z  ZS )Glm4RotaryEmbeddinginv_freqNr%   c                    s   t    |j| _|j| _|| _| jjd | _| j}| jdkr$t	| j }|| j|\}| _
| jd|dd | jd| dd d S )N	rope_typedefaultr   F)
persistentoriginal_inv_freq)r#   r$   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr%   rope_parametersr   compute_default_rope_parametersr   attention_scalingregister_bufferclone)r/   r%   devicerope_init_fnr   r0   r2   r3   r$     s   


zGlm4RotaryEmbedding.__init__r   ztorch.deviceseq_lenr5   ztorch.Tensorc           	      C   st   | j d }| j dd}t| ddp| j| j }t|| }d}d|tjd|dtjdj	|tj
d	|   }||fS )
a  
        Computes the inverse frequencies according to the original RoPE implementation
        Args:
            config ([`~transformers.PreTrainedConfig`]):
                The model configuration.
            device (`torch.device`):
                The device to use for initialization of the inverse frequencies.
            seq_len (`int`, *optional*):
                The current sequence length. Unused for this type of RoPE.
        Returns:
            Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
            post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
        
rope_thetapartial_rotary_factorg      ?rf   Nr   r    ro   )r   ro   )r   getr   r(   r   rZ   r@   arangeint64rx   float)	r%   r   r   baser   rf   r8   attention_factorr   r2   r2   r3   r     s   
&z3Glm4RotaryEmbedding.compute_default_rope_parametersc           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	|dd+ | |  
dd}tj||fdd	}| | j }| | j }	W d    n1 slw   Y  |j|jd
|	j|jd
fS )Nr   r6   r   mpscpuF)device_typeenabledr    r7   r   )r   r   ra   r`   rx   r   
isinstancetypestrr   rt   r@   r   r   r   r   ro   )
r/   r   rR   inv_freq_expandedposition_ids_expandedr   freqsembr   r   r2   r2   r3   r<   ?  s   0&zGlm4RotaryEmbedding.forwardr   )NNN)r=   r>   r?   r@   r[   __annotations__r   r$   staticmethodr   rZ   r^   r   r   no_gradr   r<   rB   r2   r2   r0   r3   r     s&   
 

r   RMSNormc                       sF   e Zd Zddeddf fddZdejdejfdd	Zd
d Z  Z	S )rJ   ư>rF   r5   Nc                    s&   t    tt|| _|| _dS )z:
        Glm4RMSNorm is equivalent to T5LayerNorm
        N)r#   r$   r&   	Parameterr@   onesweightvariance_epsilon)r/   r(   rF   r0   r2   r3   r$   Q  s   

zGlm4RMSNorm.__init__r4   c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr    r6   T)keepdim)	ro   rx   r@   rw   powmeanrsqrtr   r   )r/   r4   input_dtypevariancer2   r2   r3   r<   Y  s
   zGlm4RMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)r^   r   r`   r   )r/   r2   r2   r3   
extra_repr`  s   zGlm4RMSNorm.extra_repr)r   )
r=   r>   r?   r   r$   r@   r[   r<   r   rB   r2   r2   r0   r3   rJ   O  s    rJ   c                   @   sH   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZdZeedZdS )Glm4PreTrainedModelr%   modelTrC   rS   )r4   
attentionsN)r=   r>   r?   r   r   base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_supports_flex_attn_can_compile_fullgraph_supports_attention_backendrC   rG   _can_record_outputsr2   r2   r2   r3   r   d  s   
 
r   c                       s   e Zd Zdef fddZeee							ddej	dB dej
dB dej	dB dedB d	ejdB d
ej	dB dedB dee defddZ  ZS )	Glm4Modelr%   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r2   )rC   ).0rD   r%   r2   r3   
<listcomp>  s    z&Glm4Model.__init__.<locals>.<listcomp>rE   r   F)r#   r$   pad_token_idpadding_idx
vocab_sizer&   	Embeddingr(   embed_tokens
ModuleListrangenum_hidden_layerslayersrJ   rK   normr   
rotary_embgradient_checkpointing	post_initr.   r0   r   r3   r$   y  s   zGlm4Model.__init__N	input_idsrQ   rR   rS   inputs_embedsrU   rT   rW   r5   c              
   K   s   |d u |d uA rt d|d u r| |}|r!|d u r!t| jd}|d u r<|d ur-| nd}	tj|jd |jd|	 }|d u rE|	d}t
| j|||||d}
|}| j||d}| jd | jj D ]}||f|
|||||d|}qb| |}t||d	S )
Nz:You must specify exactly one of input_ids or inputs_embedsr   r   r   )r   )r%   r   rQ   rU   rS   rR   )rR   )rQ   rV   rR   rS   rT   rU   )last_hidden_staterS   )
ValueErrorr   r   r%   get_seq_lengthr@   r   r`   r   r   r   r   r   r   r   r   )r/   r   rQ   rR   rS   r   rU   rT   rW   past_seen_tokenscausal_maskr4   rV   decoder_layerr2   r2   r3   r<     sP   

	
zGlm4Model.forward)NNNNNNN)r=   r>   r?   r   r$   r   r   r   r@   r\   r[   r   rA   r]   r   r   r   r<   rB   r2   r2   r0   r3   r   w  s>    	
r   c                       s   e Zd ZddiZddiZddgdgfiZ fddZee																	
dde	j
d	B de	jd	B de	j
d	B ded	B de	jd	B de	j
d	B ded	B de	j
d	B dee	jB dee deeB fddZ  ZS )Glm4ForCausalLMzlm_head.weightzmodel.embed_tokens.weightlm_headcolwise_gather_outputr4   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S )NFr!   )
r#   r$   r   r   r   r&   r'   r(   r  r   r.   r0   r2   r3   r$     s
   
zGlm4ForCausalLM.__init__Nr   r   rQ   rR   rS   r   labelsrT   rU   logits_to_keeprW   r5   c
              
   K   s   | j d|||||||d|
}|j}t|	trt|	 dn|	}| |dd|ddf }d}|durB| jd||| jjd|
}t	|||j
|j|jdS )ah  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, Glm4ForCausalLM

        >>> model = Glm4ForCausalLM.from_pretrained("THUDM/GLM-4-9B-0414")
        >>> tokenizer = AutoTokenizer.from_pretrained("THUDM/GLM-4-9B-0414")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```)r   rQ   rR   rS   r   rT   rU   N)r  r	  r   )lossr  rS   r4   r   r2   )r   r   r   rZ   slicer  loss_functionr%   r   r   rS   r4   r   )r/   r   rQ   rR   rS   r   r	  rT   rU   r
  rW   outputsr4   slice_indicesr  r  r2   r2   r3   r<     s0   %zGlm4ForCausalLM.forward)	NNNNNNNNr   )r=   r>   r?   _tied_weights_keys_tp_plan_pp_planr$   r   r   r@   r\   r[   r   rA   r]   rZ   r   r   r^   r   r<   rB   r2   r2   r0   r3   r    sN    		
r  c                   @      e Zd ZdS )Glm4ForSequenceClassificationNr=   r>   r?   r2   r2   r2   r3   r        r  c                   @   r  )Glm4ForTokenClassificationNr  r2   r2   r2   r3   r    r  r  )r   r   r  r  r  )rh   )r   )Bcollections.abcr   typingr   r@   torch.nnr&   activationsr   cache_utilsr   r   
generationr   integrationsr	   r
   masking_utilsr   modeling_flash_attention_utilsr   modeling_layersr   r   r   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   utils.genericr   r   utils.output_capturingr   configuration_glm4r   Moduler   rC   r[   rZ   rg   r   r~   r   r   rG   r   rJ   r   r   r  r  r  __all__r2   r2   r2   r3   <module>   sp   0

(DCPP