o
    wi^                     @   s
  d dl mZmZmZ d dlZd dlmZ ddlmZ ddl	m
Z
mZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZ ddlmZmZ ddlmZmZ ddlm Z  ddl!m"Z"m#Z#m$Z$m%Z% ddl&m'Z' e%(e)Z*edG dd dej+Z,dej-de.dej-fddZ/	d9dej+dej-dej-dej-deej- d e0d!e0fd"d#Z1d:d$d%Z2d&d' Z3G d(d) d)ej+Z4G d*d+ d+ej+Z5G d,d- d-eZ6G d.d/ d/ej+Z7e#G d0d1 d1eZ8e#G d2d3 d3e8Z9G d4d5 d5ee"Z:e#G d6d7 d7e8eZ;g d8Z<dS );    )CallableOptionalUnionN   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hub)create_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tuplelogging   )Olmo2ConfigRMSNormc                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	Olmo2RMSNormư>c                    s&   t    tt|| _|| _dS )z;
        Olmo2RMSNorm is equivalent to T5LayerNorm
        N)super__init__nn	Parametertorchonesweightvariance_epsilon)selfhidden_sizeeps	__class__ e/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/transformers/models/olmo2/modeling_olmo2.pyr       s   

zOlmo2RMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j| |S )N   T)keepdim)	dtypetor"   float32powmeanrsqrtr%   r$   )r&   hidden_statesinput_dtypevariancer+   r+   r,   forward(   s
   zOlmo2RMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tupler$   shaper%   r&   r+   r+   r,   
extra_repr/   s   zOlmo2RMSNorm.extra_repr)r   )__name__
__module____qualname__r   r9   r=   __classcell__r+   r+   r)   r,   r      s    r   r6   n_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r;   expandreshape)r6   rB   batchnum_key_value_headsslenhead_dimr+   r+   r,   	repeat_kv3   s
   0rJ           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr-   r   r.   )dimr0   )ptrainingr   )rJ   num_key_value_groupsr"   matmul	transposer;   r    
functionalsoftmaxr2   r1   r0   rR   rV   
contiguous)rL   rM   rN   rO   rP   rQ   rR   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr+   r+   r,   eager_attention_forward?   s   
&rc   c           
      C   s^   | j |j }}||}||}| | t| |  }|| t||  }	|||	|fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )r0   	unsqueezerotate_halfr1   )
qkcossinposition_idsunsqueeze_dimq_typek_typeq_embedk_embedr+   r+   r,   apply_rotary_pos_embY   s   

rp   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..Nr.   r-   rT   )r;   r"   cat)xx1x2r+   r+   r,   re   u   s   re   c                       s   e Zd ZdZddedee f fddZ		ddej	de
ej	ej	f d	eej	 d
ee deej de
ej	eej	 ee
ej	  f fddZ  ZS )Olmo2Attentionz=Multi-headed attention from 'Attention Is All You Need' paperNconfig	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _t|j| j |j| _t|j| j |j| _d S )NrI   g      Tbias)r   r   rw   rx   getattrr'   num_attention_headsrI   rG   rW   rQ   attention_dropout	is_causalr    Linearattention_biasq_projk_projv_projo_projr   rms_norm_epsq_normk_normr&   rw   rx   r)   r+   r,   r      s,   
zOlmo2Attention.__init__r6   position_embeddingsrP   past_key_valuecache_positionrC   c                 K   s<  |j d d }g |d| jR }| | |}	| | |}
| |}|	|dd}	|
|dd}
||dd}|\}}t	|	|
||\}	}
|d urc|||d}|
|
|| j|\}
}t}| jjdkrqt| jj }|| |	|
||f| js}dn| j| jd|\}}|jg |dR   }| |}||fS )Nr.   r   r-   )ri   rh   r   eagerrK   )rR   rQ   )r;   rI   r   r   r   r   r   viewrY   rp   updaterx   rc   rw   _attn_implementationr   rV   r}   rQ   rE   r\   r   )r&   r6   r   rP   r   r   r]   input_shapehidden_shapequery_statesr^   r_   rh   ri   cache_kwargsattention_interfacerb   r`   r+   r+   r,   r9      s>   	


zOlmo2Attention.forwardN)NN)r>   r?   r@   __doc__r   r   intr   r"   Tensorr:   r   
LongTensorr9   rA   r+   r+   r)   r,   rv   |   s$    rv   c                       s$   e Zd Z fddZdd Z  ZS )Olmo2MLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFry   )r   r   rw   r'   intermediate_sizer    r   	gate_projup_proj	down_projr   
hidden_actact_fnr&   rw   r)   r+   r,   r      s   
zOlmo2MLP.__init__c                 C   s$   |  | | || | }|S r   )r   r   r   r   )r&   rs   r   r+   r+   r,   r9      s    zOlmo2MLP.forward)r>   r?   r@   r   r9   rA   r+   r+   r)   r,   r      s    
r   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  deejeeejejf  f fddZ  ZS )Olmo2DecoderLayerrw   rx   c                    sR   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
d S )N)rw   rx   r(   )r   r   r'   rv   	self_attnr   mlpr   r   post_attention_layernormpost_feedforward_layernormr   r)   r+   r,   r      s   

zOlmo2DecoderLayer.__init__NFr6   rP   rj   r   output_attentions	use_cacher   r   rC   c	                 K   st   |}
| j d||||||||d|	\}}| |}|
| }|}
| |}| |}|
| }|f}|r8||f7 }|S )N)r6   rP   rj   r   r   r   r   r   r+   )r   r   r   r   )r&   r6   rP   rj   r   r   r   r   r   r]   residualself_attn_weightsoutputsr+   r+   r,   r9      s.   	




zOlmo2DecoderLayer.forward)NNNFFNN)r>   r?   r@   r   r   r   r"   r   r   r   r   boolr:   FloatTensorr9   rA   r+   r+   r)   r,   r      s8    	r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )Olmo2RotaryEmbeddingNrw   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r   r   hasattrr   getr   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenrw   r   rope_init_fnattention_scalingregister_bufferr   original_inv_freq)r&   rw   devicer   r)   r+   r,   r     s   
zOlmo2RotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd/ | |  dd}t	j||fdd	}| | j }| | j }	||	fW  d    S 1 sqw   Y  d S )
Nr   r.   r   mpscpuF)device_typeenabledr-   rq   )r   floatrD   r;   r1   r   
isinstancer   strr"   autocastrY   rr   rh   r   ri   )
r&   rs   rj   inv_freq_expandedposition_ids_expandedr   freqsembrh   ri   r+   r+   r,   r9     s   0&$zOlmo2RotaryEmbedding.forwardr   )
r>   r?   r@   r   r   r"   no_gradr   r9   rA   r+   r+   r)   r,   r     s
    r   c                   @   sL   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdZdd ZdS )Olmo2PreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|trQ|jjd d S d S )NrK   )r4   stdg      ?)rw   initializer_ranger   r    r   r$   datanormal_rz   zero_	Embeddingpadding_idxr   fill_)r&   rL   r   r+   r+   r,   _init_weights=  s   


z"Olmo2PreTrainedModel._init_weightsN)r>   r?   r@   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_3_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr   r+   r+   r+   r,   r   -  s    r   c                       s   e Zd Zdef fddZdd Zdd Zee									dd	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ  ZS )
Olmo2Modelrw   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r+   )r   ).0rx   rw   r+   r,   
<listcomp>T  s    z'Olmo2Model.__init__.<locals>.<listcomp>r   r   F)r   r   pad_token_idr   
vocab_sizer    r   r'   embed_tokens
ModuleListrangenum_hidden_layerslayersr   r   normr   
rotary_embgradient_checkpointing	post_initr   r)   r   r,   r   M  s   zOlmo2Model.__init__c                 C      | j S r   r   r<   r+   r+   r,   get_input_embeddings]     zOlmo2Model.get_input_embeddingsc                 C   
   || _ d S r   r   r&   rO   r+   r+   r,   set_input_embeddings`     
zOlmo2Model.set_input_embeddingsN	input_idsrP   rj   r   inputs_embedsr   r   output_hidden_statesr   flash_attn_kwargsrC   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}t	|t
d tfsFtd|d u rO| |}|rX|d u rXt }|	d u rt|d urd| nd}tj|||jd  |jd}	|d u r}|	d}t| j |||	||d}|}| ||}|rd	nd }|rd	nd }| jd | j j D ]&}|r||f7 }||f||||||	|d
|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r   )r   )rw   input_embedsrP   r   r   rj   r+   )rP   rj   r   r   r   r   r   )last_hidden_stater   r6   
attentions)rw   r   r   r   
ValueErrorr   rV   loggerwarning_oncer   r   r   r   r   get_seq_lengthr"   aranger;   r   rd   r   r   r   r   r   r   )r&   r   rP   rj   r   r   r   r   r   r   r  past_seen_tokensra   r6   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputsr+   r+   r,   r9   c  s   

	
	


zOlmo2Model.forward)	NNNNNNNNN)r>   r?   r@   r   r   r   r   r   r   r   r"   r   r   r   r   r   r   r   r   r9   rA   r+   r+   r)   r,   r   K  sL    	
r   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r>   r?   r@   r+   r+   r+   r,   r    s    r  c                       s
  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej dee deej deej dee dee dee deej d eeejf d!ee d"efd#d$Z  ZS )&Olmo2ForCausalLMzlm_head.weightlm_headcolwise_repr6   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S r   )
r   r   r   r   r   r    r   r'   r  r   r   r)   r+   r,   r     s
   
zOlmo2ForCausalLM.__init__c                 C   s   | j jS r   r   r   r<   r+   r+   r,   r     s   z%Olmo2ForCausalLM.get_input_embeddingsc                 C   s   || j _d S r   r  r   r+   r+   r,   r     s   z%Olmo2ForCausalLM.set_input_embeddingsc                 C   r   r   r  r<   r+   r+   r,   get_output_embeddings  r   z&Olmo2ForCausalLM.get_output_embeddingsc                 C   r   r   r  )r&   new_embeddingsr+   r+   r,   set_output_embeddings  r   z&Olmo2ForCausalLM.set_output_embeddingsc                 C   r   r   r   )r&   decoderr+   r+   r,   set_decoder  r   zOlmo2ForCausalLM.set_decoderc                 C   r   r   r  r<   r+   r+   r,   get_decoder  r   zOlmo2ForCausalLM.get_decoderNr   r   rP   rj   r   r   labelsr   r   r   r   logits_to_keepr]   rC   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }d}|durX| j	d||| j j
d|}t|||j|j|jdS )at  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, Olmo2ForCausalLM

        >>> model = Olmo2ForCausalLM.from_pretrained("meta-olmo2/Olmo2-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-olmo2/Olmo2-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   rP   rj   r   r   r   r   r   r   )r  r  r   )lossr  r   r6   r  r+   )rw   r   r   r   r  r   r   slicer  loss_functionr   r   r   r6   r  )r&   r   rP   rj   r   r   r  r   r   r   r   r  r]   r   r6   slice_indicesr  r  r+   r+   r,   r9     s:   '
zOlmo2ForCausalLM.forward)NNNNNNNNNNr   )r>   r?   r@   _tied_weights_keys_tp_plan_pp_planr   r   r   r  r  r  r  r   r   r   r"   r   r   r   r   r   r   r   r   r  r   r9   rA   r+   r+   r)   r,   r    sf    		
r  )r  r   r   )rK   )Nr   )=typingr   r   r   r"   torch.nnr    activationsr   cache_utilsr   r   
generationr	   integrationsr
   masking_utilsr   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   configuration_olmo2r   
get_loggerr>   r  Moduler   r   r   rJ   r   rc   rp   re   rv   r   r   r   r   r   r  r  __all__r+   r+   r+   r,   <module>   sd   


L4!}l