o
    wi\                     @   s  d dl mZmZmZ d dlZd dlmZ d dlm  mZ	 ddl
mZ ddlmZmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZ ddlmZmZ ddlmZmZ ddl m!Z! ddl"m#Z#m$Z$m%Z%m&Z& ddl'm(Z( e&)e*Z+G dd dej,Z-G dd dej,Z.dd Z/dej0de1dej0fddZ2	d7dej,dej0dej0d ej0d!eej0 d"e3d#e3fd$d%Z4d8d&d'Z5G d(d) d)ej,Z6G d*d+ d+eZ7G d,d- d-ej,Z8e$G d.d/ d/eZ9e$G d0d1 d1e9Z:G d2d3 d3ee#Z;e$G d4d5 d5e9eZ<g d6Z=dS )9    )CallableOptionalUnionN   )ACT2FN)CacheDynamicCache)GenerationMixin)create_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tuplelogging   )
OlmoConfigc                       s@   e Zd ZdZdeddf fddZdejdejfdd	Z  Z	S )
OlmoLayerNormz/LayerNorm but with no learnable weight or bias.hidden_sizereturnNc                    s   t    |f| _d S N)super__init__normalized_shape)selfr   	__class__ c/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/transformers/models/olmo/modeling_olmo.pyr   !   s   
zOlmoLayerNorm.__init__hidden_statesc                 C   s,   |j }tj|jtjd| jd d dd|S )N)dtypegh㈵>)eps)r'   F
layer_normtotorchfloat32r    )r!   r&   
orig_dtyper$   r$   r%   forward%   s    zOlmoLayerNorm.forward)
__name__
__module____qualname____doc__intr   r,   Tensorr/   __classcell__r$   r$   r"   r%   r      s    r   c                       s$   e Zd Z fddZdd Z  ZS )OlmoMLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFbias)r   r   configr   intermediate_sizennLinear	gate_projup_proj	down_projr   
hidden_actact_fnr!   r;   r"   r$   r%   r   -   s   
zOlmoMLP.__init__c                 C   s$   |  | | || | }|S r   )rA   rC   r?   r@   )r!   xrA   r$   r$   r%   r/   7   s    zOlmoMLP.forward)r0   r1   r2   r   r/   r6   r$   r$   r"   r%   r7   ,   s    
r7   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..N   dim)shaper,   cat)rE   x1x2r$   r$   r%   rotate_half<   s   rN   r&   n_repr   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)rJ   expandreshape)r&   rO   batchnum_key_value_headsslenhead_dimr$   r$   r%   	repeat_kvC   s
   0rV           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )NrG   r   rF   )rI   r'   )ptrainingr   )rV   num_key_value_groupsr,   matmul	transposerJ   r=   
functionalsoftmaxr-   r+   r'   r^   ra   
contiguous)rX   rY   rZ   r[   r\   r]   r^   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr$   r$   r%   eager_attention_forwardO   s   
&rn   c           
      C   s^   | j |j }}||}||}| | t| |  }|| t||  }	|||	|fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )r'   	unsqueezerN   r+   )
qkcossinposition_idsunsqueeze_dimq_typek_typeq_embedk_embedr$   r$   r%   apply_rotary_pos_embi   s   

rz   c                       s   e Zd ZdZdedef fddZ		ddejde	ejejf d	e
ej d
e
e de
ej de	eje
ej e
e	ej  f fddZ  ZS )OlmoAttentionz=Multi-headed attention from 'Attention Is All You Need' paperr;   	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _d S )NrU   g      Tr9   )r   r   r;   r|   getattrr   num_attention_headsrU   rS   rb   r]   attention_dropout	is_causalr=   r>   attention_biasq_projk_projv_projo_projr!   r;   r|   r"   r$   r%   r      s(   
zOlmoAttention.__init__Nr&   position_embeddingsr\   past_key_valuecache_positionr   c                 K   s  |j d d }g |d| jR }| |}	| |}
| |}| jjd urJ|	j| jj | jjd |
j| jj | jjd |j| jj | jjd |	|	dd}	|
|	dd}
||	dd}|\}}t
|	|
||\}	}
|d ur|||d}||
|| j|\}
}t}| jjdkrt| jj }|| |	|
||f| jsdn| j| jd|\}}|jg |dR   }| |}||fS )	NrF   )minmaxr   rG   )rs   rr   r   eagerrW   )r^   r]   )rJ   rU   r   r   r   r;   clip_qkvclamp_viewrd   rz   updater|   rn   _attn_implementationr   ra   r   r]   rQ   rg   r   )r!   r&   r   r\   r   r   rh   input_shapehidden_shapequery_statesri   rj   rr   rs   cache_kwargsattention_interfacerm   rk   r$   r$   r%   r/      sF   	




zOlmoAttention.forward)NN)r0   r1   r2   r3   r   r4   r   r,   r5   tupler   r   
LongTensorr/   r6   r$   r$   r"   r%   r{      s$    r{   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  dee deejeeejejf  f fddZ  ZS )OlmoDecoderLayerr;   r|   c                    sF   t    |j| _t||d| _t|| _t|j| _t|j| _	d S )N)r;   r|   )
r   r   r   r{   	self_attnr7   mlpr   input_layernormpost_attention_layernormr   r"   r$   r%   r      s   

zOlmoDecoderLayer.__init__NFr&   r\   rt   r   output_attentions	use_cacher   r   rh   r   c	                 K   st   |}
|  |}| jd||||||||d|	\}}|
| }|}
| |}| |}|
| }|f}|r8||f7 }|S )N)r&   r\   rt   r   r   r   r   r   r$   )r   r   r   r   )r!   r&   r\   rt   r   r   r   r   r   rh   residualself_attn_weightsoutputsr$   r$   r%   r/      s.   
	



zOlmoDecoderLayer.forward)NNNFFNN)r0   r1   r2   r   r4   r   r,   r5   r   r   r   boolr   r   r   FloatTensorr/   r6   r$   r$   r"   r%   r      s<    	
r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )OlmoRotaryEmbeddingNr;   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r   r   hasattrr   getr   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr;   r   rope_init_fnattention_scalingregister_bufferr   original_inv_freq)r!   r;   devicer   r"   r$   r%   r   	  s   
zOlmoRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd/ | |  dd}t	j||fdd	}| | j }| | j }	||	fW  d    S 1 sqw   Y  d S )
Nr   rF   r   mpscpuF)device_typeenabledrG   rH   )r   floatrP   rJ   r+   r   
isinstancer   strr,   autocastrd   rK   rr   r   rs   )
r!   rE   rt   inv_freq_expandedposition_ids_expandedr   freqsembrr   rs   r$   r$   r%   r/     s   0&$zOlmoRotaryEmbedding.forwardr   )
r0   r1   r2   r   r   r,   no_gradr   r/   r6   r$   r$   r"   r%   r     s
    r   c                   @   sL   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdZdd ZdS )OlmoPreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rA|jjjd|d |jd urC|jj|j 	  d S d S d S )NrW   )meanstd)r;   initializer_ranger   r=   r>   weightdatanormal_r:   zero_	Embeddingpadding_idx)r!   rX   r   r$   r$   r%   _init_weights9  s   

z!OlmoPreTrainedModel._init_weightsN)r0   r1   r2   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_3_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr   r$   r$   r$   r%   r   )  s    r   c                       s   e Zd Zdef fddZdd Zdd Zee									dd	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ  ZS )	OlmoModelr;   c                    s|   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r$   )r   ).0r|   r;   r$   r%   
<listcomp>N  s    z&OlmoModel.__init__.<locals>.<listcomp>r   F)r   r   pad_token_idr   
vocab_sizer=   r   r   embed_tokens
ModuleListrangenum_hidden_layerslayersr   normr   
rotary_embgradient_checkpointing	post_initrD   r"   r   r%   r   G  s   zOlmoModel.__init__c                 C      | j S r   r   r!   r$   r$   r%   get_input_embeddingsW     zOlmoModel.get_input_embeddingsc                 C   
   || _ d S r   r   r!   r[   r$   r$   r%   set_input_embeddingsZ     
zOlmoModel.set_input_embeddingsN	input_idsr\   rt   r   inputs_embedsr   r   output_hidden_statesr   flash_attn_kwargsr   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}t	|t
d tfsFtd|d u rO| |}|rX|d u rXt }|	d u rt|d urd| nd}tj|||jd  |jd}	|d u r}|	d}t| j |||	||d}|}| ||}|rd	nd }|rd	nd }| jd | j j D ]&}|r||f7 }||f||||||	|d
|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r   )r   )r;   input_embedsr\   r   r   rt   r$   )r\   rt   r   r   r   r   r   )last_hidden_stater   r&   
attentions)r;   r   r   r   
ValueErrorr   ra   loggerwarning_oncer   r   r   r   r   get_seq_lengthr,   arangerJ   r   ro   r
   r   r   r   r   r   )r!   r   r\   rt   r   r   r   r   r   r   r   past_seen_tokensrl   r&   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputsr$   r$   r%   r/   ]  s   

	
	


zOlmoModel.forward)	NNNNNNNNN)r0   r1   r2   r   r   r   r   r   r   r   r,   r   r5   r   r   r   r   r   r   r/   r6   r$   r$   r"   r%   r   E  sL    	
r   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r0   r1   r2   r$   r$   r$   r%   r    s    r  c                       s
  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej dee deej deej dee dee dee deej d eeejf d!ee d"efd#d$Z  ZS )&OlmoForCausalLMzlm_head.weightlm_headcolwise_repr&   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S r8   )
r   r   r   r   r   r=   r>   r   r  r   rD   r"   r$   r%   r     s
   
zOlmoForCausalLM.__init__c                 C   s   | j jS r   r   r   r   r$   r$   r%   r     s   z$OlmoForCausalLM.get_input_embeddingsc                 C   s   || j _d S r   r  r   r$   r$   r%   r     s   z$OlmoForCausalLM.set_input_embeddingsc                 C   r   r   r  r   r$   r$   r%   get_output_embeddings  r   z%OlmoForCausalLM.get_output_embeddingsc                 C   r   r   r  )r!   new_embeddingsr$   r$   r%   set_output_embeddings  r   z%OlmoForCausalLM.set_output_embeddingsc                 C   r   r   r   )r!   decoderr$   r$   r%   set_decoder  r   zOlmoForCausalLM.set_decoderc                 C   r   r   r  r   r$   r$   r%   get_decoder  r   zOlmoForCausalLM.get_decoderNr   r   r\   rt   r   r   labelsr   r   r   r   logits_to_keeprh   r   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }d}|durX| j	d||| j j
d|}t|||j|j|jdS )an  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, OlmoForCausalLM

        >>> model = OlmoForCausalLM.from_pretrained("meta-olmo/Olmo-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-olmo/Olmo-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   r\   rt   r   r   r   r   r   r   )r
  r  r   )lossr
  r   r&   r   r$   )r;   r   r   r   r   r   r4   slicer  loss_functionr   r   r   r&   r   )r!   r   r\   rt   r   r   r  r   r   r   r   r  rh   r   r&   slice_indicesr
  r  r$   r$   r%   r/     s:   '
zOlmoForCausalLM.forward)NNNNNNNNNNr   )r0   r1   r2   _tied_weights_keys_tp_plan_pp_planr   r   r   r  r  r  r  r   r   r   r,   r   r5   r   r   r   r   r4   r   r  r   r/   r6   r$   r$   r"   r%   r    sf    		
r  )r  r   r   )rW   )Nr   )>typingr   r   r   r,   torch.nnr=   torch.nn.functionalre   r)   activationsr   cache_utilsr   r   
generationr	   masking_utilsr
   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   configuration_olmor   
get_loggerr0   r   Moduler   r7   rN   r5   r4   rV   r   rn   rz   r{   r   r   r   r   r  r  __all__r$   r$   r$   r%   <module>   sb   


O4!}l