o
    wi|                     @   s4  d dl Z d dlmZmZmZ d dlZd dlmZ ddlm	Z	 ddl
mZmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZmZmZ ddlmZmZ ddlmZmZ ddl m!Z! ddl"m#Z#m$Z$m%Z%m&Z& ddl'm(Z( e&)e*Z+G dd dej,Z-G dd dej,Z.G dd dej,Z/dej0de1dej0fddZ2	d=dej,dej0dej0d ej0d!eej0 d"e3d#e3fd$d%Z4d&d' Z5d>d(d)Z6G d*d+ d+ej,Z7G d,d- d-eZ8e$G d.d/ d/eZ9e$G d0d1 d1e9Z:G d2d3 d3ee#Z;e$G d4d5 d5e9eZ<e$d6d7G d8d9 d9e9Z=e$G d:d; d;e9Z>g d<Z?dS )?    N)CallableOptionalUnion   )ACT2FN)CacheDynamicCache)GenerationMixin)create_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tuplelogging   )HeliumConfigc                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	HeliumRMSNormư>c                    s&   t    tt|| _|| _d S N)super__init__nn	Parametertorchonesweightvariance_epsilon)selfhidden_sizeeps	__class__ g/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/transformers/models/helium/modeling_helium.pyr    3   s   

zHeliumRMSNorm.__init__c                 C   sR   |j }|tj}|djddd}|t|| j  }| jtj| |S )N   T)keepdim)	dtypetor#   float32powmeanrsqrtr&   r%   )r'   hidden_statesinput_dtypevariancer,   r,   r-   forward8   s
   zHeliumRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tupler%   shaper&   r'   r,   r,   r-   
extra_repr?   s   zHeliumRMSNorm.extra_repr)r   )__name__
__module____qualname__r    r:   r>   __classcell__r,   r,   r*   r-   r   2   s    r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )HeliumRotaryEmbeddingNconfigc                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r   r    hasattrrE   getrF   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenrD   r   rope_init_fnattention_scalingregister_bufferrI   original_inv_freq)r'   rD   devicerI   r*   r,   r-   r    D   s   
zHeliumRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r/   r   mpscpuF)device_typeenabledr.   dim)r1   )rI   floatexpandr<   r2   rT   
isinstancerG   strr#   autocast	transposecatcosrQ   sinr1   )
r'   xposition_idsinv_freq_expandedposition_ids_expandedrW   freqsembrb   rc   r,   r,   r-   r:   U   s   0&zHeliumRotaryEmbedding.forwardr   )
r?   r@   rA   r   r    r#   no_gradr   r:   rB   r,   r,   r*   r-   rC   C   s
    rC   c                       s$   e Zd Z fddZdd Z  ZS )	HeliumMLPc                    sx   t    || _|j| _|j| _tj| j| j|jd| _tj| j| j|jd| _	tj| j| j|jd| _
t|j | _d S )Nbias)r   r    rD   r(   intermediate_sizer!   Linearmlp_bias	gate_projup_proj	down_projr   
hidden_actact_fnr'   rD   r*   r,   r-   r    f   s   
zHeliumMLP.__init__c                 C   s$   |  | | || | }|S r   )rs   ru   rq   rr   )r'   rd   rs   r,   r,   r-   r:   p   s    zHeliumMLP.forward)r?   r@   rA   r    r:   rB   r,   r,   r*   r-   rk   e   s    
rk   r7   n_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r<   r\   reshape)r7   rw   batchnum_key_value_headsslenhead_dimr,   r,   r-   	repeat_kvu   s
   0r~           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr.   r   r/   )rZ   r1   )ptrainingr   )r~   num_key_value_groupsr#   matmulr`   r<   r!   
functionalsoftmaxr3   r2   r1   r   r   
contiguous)r   r   r   r   r   r   r   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr,   r,   r-   eager_attention_forward   s   
&r   c                 C   s>   | ddddf }| ddddf }t j| |fdddS )	z*Rotates half the hidden dims of the input..r   Nr.   r   r/   rY   r   )r#   stackflatten)rd   x1x2r,   r,   r-   rotate_half   s   r   c                 C   s   | |}| |}|dd|jd d f jddd}|dd|jd d f jddd}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    .Nr/   r.   rY   )	unsqueezer<   repeat_interleaver   )qkrb   rc   re   unsqueeze_dimq_embedk_embedr,   r,   r-   apply_rotary_pos_emb   s   

$$r   c                       s   e Zd ZdZddedee f fddZ		ddej	de
ej	ej	f d	eej	 d
ee deej dee de
ej	eej	 ee
ej	  f fddZ  ZS )HeliumAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNrD   	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	dt
| j | _|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|jdd| _d S )Nr}   r   Trl   F)r   r    rD   r   getattrr(   num_attention_headsr}   r{   r   mathsqrtr   attention_dropout	is_causalr!   ro   attention_biasq_projk_projv_projo_projr'   rD   r   r*   r,   r-   r       s$   
zHeliumAttention.__init__r7   position_embeddingsr   past_key_valuecache_positionr   rx   c                 K   s$  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
}| jjdkret| jj }|| |	|
||f| jsqdn| j| jd|\}}|jg |dR   }| |}||fS )Nr/   r   r.   )rc   rb   r   eagerr   )r   r   )r<   r}   r   viewr`   r   r   r   updater   r   rD   _attn_implementationr   r   r   r   ry   r   r   )r'   r7   r   r   r   r   r   input_shapehidden_shapequery_statesr   r   rb   rc   cache_kwargsattention_interfacer   r   r,   r,   r-   r:      s8   	

zHeliumAttention.forwardr   )NN)r?   r@   rA   __doc__r   r   intr    r#   Tensorr;   r   
LongTensorr   r   r:   rB   r,   r,   r*   r-   r      s(    r   c                       s   e Zd Zddedee f fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  dee deejeeejejf  f fddZ  ZS )HeliumDecoderLayerNrD   r   c                    sR   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
d S )N)rD   r   r)   )r   r    r(   r   	self_attnrk   mlpr   rms_norm_epsinput_layernormpost_attention_layernormr   r*   r,   r-   r      s   

zHeliumDecoderLayer.__init__Fr7   r   re   r   output_attentions	use_cacher   r   r   rx   c	                 K   st   |}
|  |}| jd||||||||d|	\}}|
| }|}
| |}| |}|
| }|f}|r8||f7 }|S )N)r7   r   re   r   r   r   r   r   r,   )r   r   r   r   )r'   r7   r   re   r   r   r   r   r   r   residualself_attn_weightsoutputsr,   r,   r-   r:     s.   
	



zHeliumDecoderLayer.forwardr   )NNNFFNN)r?   r@   rA   r   r   r   r    r#   r   r   r   boolr;   r   r   FloatTensorr:   rB   r,   r,   r*   r-   r     s<    	
r   c                   @   sL   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdZdd ZdS )HeliumPreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|trQ|jjd d S d S )Nr   )r5   stdg      ?)rD   initializer_ranger]   r!   ro   r%   datanormal_rm   zero_	Embeddingpadding_idxr   fill_)r'   r   r   r,   r,   r-   _init_weightsL  s   


z#HeliumPreTrainedModel._init_weightsN)r?   r@   rA   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_3_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr   r,   r,   r,   r-   r   <  s    r   c                       s   e Zd Zdef fddZdd Zdd Zee									dd	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ  ZS )HeliumModelrD   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t | _d| _|   d S )Nc                    s   g | ]}t  |qS r,   )r   ).0r   rD   r,   r-   
<listcomp>c  s    z(HeliumModel.__init__.<locals>.<listcomp>r   F)r   r    pad_token_idr   
vocab_sizer!   r   r(   embed_tokens
ModuleListrangenum_hidden_layerslayersr   r   normrC   
rotary_embgradient_checkpointing	post_initrv   r*   r   r-   r    \  s   
zHeliumModel.__init__c                 C      | j S r   r   r=   r,   r,   r-   get_input_embeddingsl     z HeliumModel.get_input_embeddingsc                 C   
   || _ d S r   r   r'   r   r,   r,   r-   set_input_embeddingso     
z HeliumModel.set_input_embeddingsN	input_idsr   re   r   inputs_embedsr   r   output_hidden_statesr   flash_attn_kwargsrx   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}t	|t
d tfsFtd|d u rO| |}|rX|d u rXt }|	d u rt|d urd| nd}tj|||jd  |jd}	|d u r}|	d}t| j |||	||d}|}| ||}|rd	nd }|rd	nd }| jd | j j D ]&}|r||f7 }||f||||||	|d
|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r   rT   )rD   input_embedsr   r   r   re   r,   )r   re   r   r   r   r   r   )last_hidden_stater   r7   
attentions)rD   r   r  r   
ValueErrorr   r   loggerwarning_oncer]   rG   r   r   r   get_seq_lengthr#   aranger<   rT   r   r
   r   r   r   r   r   )r'   r   r   re   r   r   r   r   r  r   r  past_seen_tokensr   r7   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputsr,   r,   r-   r:   r  s   

	
	


zHeliumModel.forward	NNNNNNNNN)r?   r@   rA   r   r    r   r   r   r   r   r#   r   r   r   r   r   r   r   r   r:   rB   r,   r,   r*   r-   r   Z  sL    	
r   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r?   r@   rA   r,   r,   r,   r-   r    s    r  c                       s  e Zd ZdgZddiZddgdgfiZdef fddZd	d
 Zdd Z	dd Z
dd Zdd Zdd Zee											d&deej deej deej dee deej deej dee dee dee d eej d!eeejf d"ee d#efd$d%Z  ZS )'HeliumForCausalLMzlm_head.weightlm_headcolwise_repr7   logitsrD   c                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S NFrl   )
r   r    r   r   r   r!   ro   r(   r  r   rv   r*   r,   r-   r      s
   
zHeliumForCausalLM.__init__c                 C      | j jS r   r   r   r=   r,   r,   r-   r        z&HeliumForCausalLM.get_input_embeddingsc                 C      || j _d S r   r  r   r,   r,   r-   r        z&HeliumForCausalLM.set_input_embeddingsc                 C   r   r   r  r=   r,   r,   r-   get_output_embeddings  r   z'HeliumForCausalLM.get_output_embeddingsc                 C   r   r   r  )r'   new_embeddingsr,   r,   r-   set_output_embeddings  r   z'HeliumForCausalLM.set_output_embeddingsc                 C   r   r   r   )r'   decoderr,   r,   r-   set_decoder  r   zHeliumForCausalLM.set_decoderc                 C   r   r   r!  r=   r,   r,   r-   get_decoder  r   zHeliumForCausalLM.get_decoderNr   r   r   re   r   r   labelsr   r   r  r   logits_to_keepr   rx   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }d}|durX| j	d||| j j
d|}t|||j|j|jdS )a%  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, HeliumForCausalLM

        >>> model = HeliumForCausalLM.from_pretrained("google/helium-7b")
        >>> tokenizer = AutoTokenizer.from_pretrained("google/helium-7b")

        >>> prompt = "What is your favorite condiment?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "What is your favorite condiment?"
        ```N)	r   r   re   r   r   r   r   r  r   )r  r%  r   lossr  r   r7   r  r,   )rD   r   r  r   r  r]   r   slicer  loss_functionr   r   r   r7   r  )r'   r   r   re   r   r   r%  r   r   r  r   r&  r   r   r7   slice_indicesr  r(  r,   r,   r-   r:     s:   '
zHeliumForCausalLM.forward)NNNNNNNNNNr   )r?   r@   rA   _tied_weights_keys_tp_plan_pp_planr   r    r   r   r  r   r#  r$  r   r   r   r#   r   r   r   r   r   r   r   r   r  r   r:   rB   r,   r,   r*   r-   r    sf    		
r  a  
    The Helium Model transformer with a sequence classification head on top (linear layer).

    [`HeliumForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )custom_introc                          e Zd Zdef fddZdd Zdd Zee									dd	e	e
j d
e	e
j de	e
j de	e de	e
j de	e
j de	e de	e de	e defddZ  ZS )HeliumForSequenceClassificationrD   c                    s@   t  | |j| _t|| _tj|j| jdd| _| 	  d S r  )
r   r    
num_labelsr   r   r!   ro   r(   scorer   rv   r*   r,   r-   r    W  s
   
z(HeliumForSequenceClassification.__init__c                 C   r  r   r  r=   r,   r,   r-   r   `  r  z4HeliumForSequenceClassification.get_input_embeddingsc                 C   r  r   r  r   r,   r,   r-   r   c  r  z4HeliumForSequenceClassification.set_input_embeddingsNr   r   re   r   r   r%  r   r   r  rx   c
              
   C   s(  | j ||||||||	d}
|
j}| |}|dur|jd }n|jd }| jjdu r2|dkr2td| jjdu r;d}n1|dur`|| jjk|jt	j
}t	j|jd |jt	j
d}|| d}nd}t| jj d |t	j||jd	|f }d}|dur| j|||| jd
}t|||
j|
j|
jdS )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        r   re   r   r   r   r   r  Nr   r   z=Cannot handle batch sizes > 1 if no padding token is defined.r/   )rT   r1   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r  )r  r%  pooled_logitsrD   r'  )r   r  r3  r<   rD   r   r  r2   rT   r#   int32r  argmaxr  r	  r+   r?   r*  r   r   r7   r  )r'   r   r   re   r   r   r%  r   r   r  transformer_outputsr7   r  
batch_sizelast_non_pad_tokennon_pad_masktoken_indicesr6  r(  r,   r,   r-   r:   f  sL   


z'HeliumForSequenceClassification.forwardr  )r?   r@   rA   r   r    r   r   r   r   r   r#   r   r   r   r   r   r   r:   rB   r,   r,   r*   r-   r1  H  sH    		
r1  c                       r0  )HeliumForTokenClassificationrD   c                    s|   t  | |j| _t|| _t|dd d ur|j}nt|dd d ur'|j}nd}t	|| _
t|j|j| _|   d S )Nclassifier_dropouthidden_dropoutg?)r   r    r2  r   r   r   r?  r@  r!   Dropoutr   ro   r(   r3  r   )r'   rD   r?  r*   r,   r-   r      s   
z%HeliumForTokenClassification.__init__c                 C   r  r   r  r=   r,   r,   r-   r     r  z1HeliumForTokenClassification.get_input_embeddingsc                 C   r  r   r  r   r,   r,   r-   r     r  z1HeliumForTokenClassification.set_input_embeddingsNr   r   re   r   r   r%  r   r   r  rx   c
              
   C   sd   | j ||||||||	d}
|
j}| |}| |}d}|dur(| ||| j}t|||
j|
jdS )r4  r5  N)r(  r  r7   r  )	r   r  r   r3  r*  rD   r   r7   r  )r'   r   r   re   r   r   r%  r   r   r  r   sequence_outputr  r(  r,   r,   r-   r:     s,   


z$HeliumForTokenClassification.forwardr  )r?   r@   rA   r   r    r   r   r   r   r   r#   r   r   r   r   r   r   r:   rB   r,   r,   r*   r-   r>    sH    	
r>  )r   r   r  r1  r>  )r   )Nr   )@r   typingr   r   r   r#   torch.nnr!   activationsr   cache_utilsr   r   
generationr	   masking_utilsr
   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   configuration_heliumr   
get_loggerr?   r  Moduler   rC   rk   r   r   r~   r[   r   r   r   r   r   r   r   r  r  r1  r>  __all__r,   r,   r,   r-   <module>   sn   
"

!D5}lVF