o
    wi~                     @   s@  d dl mZmZmZ d dlZd dlmZ ddlmZ ddl	m
Z
mZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZmZmZ ddlmZmZ ddlmZm Z  ddl!m"Z" ddl#m$Z$m%Z%m&Z&m'Z' ddl(m)Z) e'*e+Z,G dd dej-Z.dej/de0dej/fddZ1	d?dej-dej/dej/dej/deej/ de2d e2fd!d"Z3d#d$ Z4d@d%d&Z5G d'd( d(ej-Z6ed)G d*d+ d+ej-Z7G d,d- d-ej-Z8G d.d/ d/eZ9e%G d0d1 d1e Z:e%G d2d3 d3e:Z;G d4d5 d5ee$Z<e%G d6d7 d7e:eZ=e%d8d9G d:d; d;e:Z>e%G d<d= d=e:Z?g d>Z@dS )A    )CallableOptionalUnionN   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hub)create_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tuplelogging   )	GlmConfigc                       s2   e Zd Z fddZdejdejfddZ  ZS )GlmMLPc                    sP   t    || _tj|jd|j dd| _tj|j|jdd| _t	|j
 | _d S )N   Fbias)super__init__confignnLinearhidden_sizeintermediate_sizegate_up_proj	down_projr   
hidden_actactivation_fnselfr#   	__class__ a/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/transformers/models/glm/modeling_glm.pyr"   3   s
   
zGlmMLP.__init__hidden_statesreturnc                 C   s4   |  |}|jddd\}}|| | }| |S )Nr   dim)r(   chunkr+   r)   )r-   r2   	up_statesgater0   r0   r1   forward;   s   

zGlmMLP.forward)__name__
__module____qualname__r"   torchFloatTensorr:   __classcell__r0   r0   r.   r1   r   2   s    r   r2   n_repr3   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)shapeexpandreshape)r2   rA   batchnum_key_value_headsslenhead_dimr0   r0   r1   	repeat_kvD   s
   0rI           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr   r   r4   )r6   dtype)ptrainingr   )rI   num_key_value_groupsr>   matmul	transposerB   r$   
functionalsoftmaxfloat32torS   rQ   rU   
contiguous)rK   rL   rM   rN   rO   rP   rQ   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr0   r0   r1   eager_attention_forwardP   s   
&rd   c                 C   s>   | ddddf }| ddddf }t j| |fdddS )	z*Rotates half the hidden dims of the input..r   Nr   r   r4   r5   rR   )r>   stackflatten)xx1x2r0   r0   r1   rotate_halfj   s   rj   c                 C   s   | |}| |}|dd|jd d f jddd}|dd|jd d f jddd}|jd }| dd|f | d|df }}|dd|f |d|df }	}
|| t||  }|	| t|	|  }tj||gdd}tj||
gdd}||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    .Nr4   r   r5   )	unsqueezerB   repeat_interleaverj   r>   cat)qkcossinposition_idsunsqueeze_dim
rotary_dimq_rotq_passk_rotk_passq_embedk_embedr0   r0   r1   apply_rotary_pos_embq   s   

$$
""r{   c                       s   e Zd ZdZddedee f fddZ		ddej	de
ej	ej	f d	eej	 d
ee deej dee de
ej	eej	 ee
ej	  f fddZ  ZS )GlmAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNr#   	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |jdd| _d S )NrH   g      Tr   F)r!   r"   r#   r}   getattrr&   num_attention_headsrH   rF   rV   rP   attention_dropout	is_causalr$   r%   attention_biasq_projk_projv_projo_projr-   r#   r}   r.   r0   r1   r"      s$   
 zGlmAttention.__init__r2   position_embeddingsrO   past_key_valuecache_positionr^   r3   c                 K   s$  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
}| jjdkret| jj }|| |	|
||f| jsqdn| j| jd|\}}|jg |dR   }| |}||fS )Nr4   r   r   )rq   rp   r   eagerrJ   )rQ   rP   )rB   rH   r   viewrX   r   r   r{   updater}   rd   r#   _attn_implementationr   rU   r   rP   rD   r]   r   )r-   r2   r   rO   r   r   r^   input_shapehidden_shapequery_statesr_   r`   rp   rq   cache_kwargsattention_interfacerc   ra   r0   r0   r1   r:      s8   	

zGlmAttention.forwardN)NN)r;   r<   r=   __doc__r   r   intr"   r>   Tensortupler   
LongTensorr   r   r:   r@   r0   r0   r.   r1   r|      s(    r|   RMSNormc                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	
GlmRMSNormư>c                    s&   t    tt|| _|| _dS )z9
        GlmRMSNorm is equivalent to T5LayerNorm
        N)r!   r"   r$   	Parameterr>   onesweightvariance_epsilon)r-   r&   epsr.   r0   r1   r"      s   

zGlmRMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr   r4   T)keepdim)	rS   r\   r>   r[   powmeanrsqrtr   r   )r-   r2   input_dtypevariancer0   r0   r1   r:      s
   zGlmRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)r   r   rB   r   r-   r0   r0   r1   
extra_repr   s   zGlmRMSNorm.extra_repr)r   )r;   r<   r=   r"   r:   r   r@   r0   r0   r.   r1   r      s    r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )GlmRotaryEmbeddingNr#   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r!   r"   hasattrr   getr   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr#   r   rope_init_fnattention_scalingregister_bufferr   original_inv_freq)r-   r#   devicer   r.   r0   r1   r"      s   
zGlmRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r4   r   mpscpuF)device_typeenabledr   r5   )rS   )r   floatrC   rB   r\   r   
isinstancer   strr>   autocastrX   rm   rp   r   rq   rS   )
r-   rg   rr   inv_freq_expandedposition_ids_expandedr   freqsembrp   rq   r0   r0   r1   r:     s   0&zGlmRotaryEmbedding.forwardr   )
r;   r<   r=   r   r"   r>   no_gradr   r:   r@   r0   r0   r.   r1   r      s
    r   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  dee deejeeejejf  f fddZ  ZS )GlmDecoderLayerr#   r}   c                    sR   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
d S )N)r#   r}   r   )r!   r"   r&   r|   	self_attnr   mlpr   rms_norm_epsinput_layernormpost_attention_layernormr   r.   r0   r1   r"     s   

zGlmDecoderLayer.__init__NFr2   rO   rr   r   output_attentions	use_cacher   r   r^   r3   c	                 K   st   |}
|  |}| jd||||||||d|	\}}|
| }|}
| |}| |}|
| }|f}|r8||f7 }|S )N)r2   rO   rr   r   r   r   r   r   r0   )r   r   r   r   )r-   r2   rO   rr   r   r   r   r   r   r^   residualself_attn_weightsoutputsr0   r0   r1   r:   !  s.   
	



zGlmDecoderLayer.forward)NNNFFNN)r;   r<   r=   r   r   r"   r>   r   r   r   r   boolr   r   r   r?   r:   r@   r0   r0   r.   r1   r     s<    	
r   c                   @   sL   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdZdd ZdS )GlmPreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|trQ|jjd d S d S )NrJ   )r   stdg      ?)r#   initializer_ranger   r$   r%   r   datanormal_r    zero_	Embeddingpadding_idxr   fill_)r-   rK   r   r0   r0   r1   _init_weights[  s   


z GlmPreTrainedModel._init_weightsN)r;   r<   r=   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_3_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr   r0   r0   r0   r1   r   K  s    r   c                       s   e Zd Zdef fddZdd Zdd Zee									dd	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ  ZS )GlmModelr#   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r0   )r   ).0r}   r#   r0   r1   
<listcomp>r  s    z%GlmModel.__init__.<locals>.<listcomp>r   r   F)r!   r"   pad_token_idr   
vocab_sizer$   r   r&   embed_tokens
ModuleListrangenum_hidden_layerslayersr   r   normr   
rotary_embgradient_checkpointing	post_initr,   r.   r   r1   r"   k  s   zGlmModel.__init__c                 C      | j S r   r   r   r0   r0   r1   get_input_embeddings{     zGlmModel.get_input_embeddingsc                 C   
   || _ d S r   r   r-   rN   r0   r0   r1   set_input_embeddings~     
zGlmModel.set_input_embeddingsN	input_idsrO   rr   r   inputs_embedsr   r   output_hidden_statesr   flash_attn_kwargsr3   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}t	|t
d tfsFtd|d u rO| |}|rX|d u rXt }|	d u rt|d urd| nd}tj|||jd  |jd}	|d u r}|	d}t| j |||	||d}|}| ||}|rd	nd }|rd	nd }| jd | j j D ]&}|r||f7 }||f||||||	|d
|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r   r   )r#   input_embedsrO   r   r   rr   r0   )rO   rr   r   r   r   r   r   )last_hidden_stater   r2   
attentions)r#   r   r  r   
ValueErrorr   rU   loggerwarning_oncer   r   r   r   r   get_seq_lengthr>   arangerB   r   rk   r   r   r   r   r   r   )r-   r  rO   rr   r   r  r   r   r  r   r  past_seen_tokensrb   r2   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputsr0   r0   r1   r:     s   

	
	


zGlmModel.forward	NNNNNNNNN)r;   r<   r=   r   r"   r   r  r   r   r   r>   r   r   r   r?   r   r   r   r   r:   r@   r0   r0   r.   r1   r   i  sL    	
r   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r;   r<   r=   r0   r0   r0   r1   r    s    r  c                       s
  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej dee deej deej dee dee dee deej d eeejf d!ee d"efd#d$Z  ZS )&GlmForCausalLMzlm_head.weightlm_headcolwise_repr2   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S NFr   )
r!   r"   r   r   r   r$   r%   r&   r  r   r,   r.   r0   r1   r"     s
   
zGlmForCausalLM.__init__c                 C      | j jS r   r   r   r   r0   r0   r1   r        z#GlmForCausalLM.get_input_embeddingsc                 C      || j _d S r   r  r  r0   r0   r1   r       z#GlmForCausalLM.set_input_embeddingsc                 C   r   r   r  r   r0   r0   r1   get_output_embeddings  r   z$GlmForCausalLM.get_output_embeddingsc                 C   r  r   r#  )r-   new_embeddingsr0   r0   r1   set_output_embeddings  r  z$GlmForCausalLM.set_output_embeddingsc                 C   r  r   r   )r-   decoderr0   r0   r1   set_decoder  r  zGlmForCausalLM.set_decoderc                 C   r   r   r'  r   r0   r0   r1   get_decoder  r   zGlmForCausalLM.get_decoderNr   r  rO   rr   r   r  labelsr   r   r  r   logits_to_keepr^   r3   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }d}|durX| j	d||| j j
d|}t|||j|j|jdS )ah  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, GlmForCausalLM

        >>> model = GlmForCausalLM.from_pretrained("meta-glm/Glm-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-glm/Glm-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r  rO   rr   r   r  r   r   r  r   )r  r+  r   lossr  r   r2   r  r0   )r#   r   r  r   r  r   r   slicer  loss_functionr   r   r   r2   r  )r-   r  rO   rr   r   r  r+  r   r   r  r   r,  r^   r   r2   slice_indicesr  r.  r0   r0   r1   r:     s:   '
zGlmForCausalLM.forward)NNNNNNNNNNr   )r;   r<   r=   _tied_weights_keys_tp_plan_pp_planr"   r   r  r$  r&  r)  r*  r   r   r   r>   r   r   r   r?   r   r   r   r   r  r   r:   r@   r0   r0   r.   r1   r    sf    		
r  a  
    The Glm Model transformer with a sequence classification head on top (linear layer).

    [`GlmForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )custom_introc                          e Zd Z fddZdd Zdd Zee									ddee	j
 d	ee	j d
ee	j
 dee dee	j dee	j
 dee dee dee defddZ  ZS )GlmForSequenceClassificationc                    s@   t  | |j| _t|| _tj|j| jdd| _| 	  d S r  )
r!   r"   
num_labelsr   r   r$   r%   r&   scorer   r,   r.   r0   r1   r"   f  s
   
z%GlmForSequenceClassification.__init__c                 C   r  r   r  r   r0   r0   r1   r   o  r   z1GlmForSequenceClassification.get_input_embeddingsc                 C   r!  r   r  r  r0   r0   r1   r  r  r"  z1GlmForSequenceClassification.set_input_embeddingsNr  rO   rr   r   r  r+  r   r   r  r3   c
              
   C   s(  | j ||||||||	d}
|
j}| |}|dur|jd }n|jd }| jjdu r2|dkr2td| jjdu r;d}n1|dur`|| jjk|jt	j
}t	j|jd |jt	j
d}|| d}nd}t| jj d |t	j||jd	|f }d}|dur| j|||| jd
}t|||
j|
j|
jdS )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        rO   rr   r   r  r   r   r  Nr   r   z=Cannot handle batch sizes > 1 if no padding token is defined.r4   )r   rS   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r	  )r  r+  pooled_logitsr#   r-  )r   r  r9  rB   r#   r   r  r\   r   r>   int32r  argmaxr  r  r/   r;   r0  r   r   r2   r  )r-   r  rO   rr   r   r  r+  r   r   r  transformer_outputsr2   r  
batch_sizelast_non_pad_tokennon_pad_masktoken_indicesr<  r.  r0   r0   r1   r:   u  sL   


z$GlmForSequenceClassification.forwardr  )r;   r<   r=   r"   r   r  r   r   r   r>   r   r   r   r?   r   r   r:   r@   r0   r0   r.   r1   r7  W  sH    		
r7  c                       r6  )GlmForTokenClassificationc                    s|   t  | |j| _t|| _t|dd d ur|j}nt|dd d ur'|j}nd}t	|| _
t|j|j| _|   d S )Nclassifier_dropouthidden_dropoutg?)r!   r"   r8  r   r   r~   rE  rF  r$   DropoutrQ   r%   r&   r9  r   )r-   r#   rE  r.   r0   r1   r"     s   
z"GlmForTokenClassification.__init__c                 C   r  r   r  r   r0   r0   r1   r     r   z.GlmForTokenClassification.get_input_embeddingsc                 C   r!  r   r  r  r0   r0   r1   r    r"  z.GlmForTokenClassification.set_input_embeddingsNr  rO   rr   r   r  r+  r   r   r  r3   c
              
   C   sd   | j ||||||||	d}
|
j}| |}| |}d}|dur(| ||| j}t|||
j|
jdS )r:  r;  N)r.  r  r2   r  )	r   r  rQ   r9  r0  r#   r   r2   r  )r-   r  rO   rr   r   r  r+  r   r   r  r   sequence_outputr  r.  r0   r0   r1   r:     s,   


z!GlmForTokenClassification.forwardr  )r;   r<   r=   r"   r   r  r   r   r   r>   r   r   r   r?   r   r   r:   r@   r0   r0   r.   r1   rD    sH    	
rD  )r   r   r  r7  rD  )rJ   )Nr   )Atypingr   r   r   r>   torch.nnr$   activationsr   cache_utilsr   r   
generationr	   integrationsr
   masking_utilsr   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   configuration_glmr   
get_loggerr;   r  Moduler   r   r   rI   r   rd   rj   r{   r|   r   r   r   r   r   r  r  r7  rD  __all__r0   r0   r0   r1   <module>   sp   


*D"5}lVF