o
    wi	g                     @   s
  d dl mZmZmZ d dlZd dlmZ ddlmZ ddlm	Z	m
Z
 ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZmZ ddlmZmZ ddlmZmZ ddlmZ ddl m!Z!m"Z"m#Z#m$Z$ ddl%m&Z& e$'e(Z)dd Z*d:ddZ+dej,de-dej,fddZ.	d;dej/dej,dej,d ej,d!eej, d"e0d#e0fd$d%Z1G d&d' d'ej/Z2ed(G d)d* d*ej/Z3G d+d, d,ej/Z4G d-d. d.eZ5e"G d/d0 d0eZ6G d1d2 d2ej/Z7e"G d3d4 d4e6Z8G d5d6 d6ee!Z9e"G d7d8 d8e6eZ:g d9Z;dS )<    )CallableOptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hub)create_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tuplelogging   )GraniteConfigc                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..N   dim)shapetorchcat)xx1x2 r&   i/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/transformers/models/granite/modeling_granite.pyrotate_half-   s   r(   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer(   )qkcossinposition_idsunsqueeze_dimq_embedk_embedr&   r&   r'   apply_rotary_pos_emb4   s
   

r2   hidden_statesn_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r    expandreshape)r3   r4   batchnum_key_value_headsslenhead_dimr&   r&   r'   	repeat_kvO   s
   0r<           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr   r   r   )r   dtype)ptrainingr   )r<   num_key_value_groupsr!   matmul	transposer    r   
functionalsoftmaxfloat32torF   rD   rH   
contiguous)r>   r?   r@   rA   rB   rC   rD   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr&   r&   r'   eager_attention_forward[   s   
&rW   c                       s   e Zd ZdZddedee f fddZ		ddej	de
ej	ej	f d	eej	 d
ee deej dee de
ej	eej	 ee
ej	  f fddZ  ZS )GraniteAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNconfig	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	|j
| _|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _d S )Nr;   Tbias)super__init__rY   rZ   getattrhidden_sizenum_attention_headsr;   r9   rI   attention_multiplierrC   attention_dropout	is_causalr   Linearattention_biasq_projk_projv_projo_projselfrY   rZ   	__class__r&   r'   r^   x   s(   
zGraniteAttention.__init__r3   position_embeddingsrB   past_key_valuecache_positionrQ   r5   c                 K   s$  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
}| jjdkret| jj }|| |	|
||f| jsqdn| j| jd|\}}|jg |dR   }| |}||fS )Nr   r   r   )r-   r,   rq   eagerr=   )rD   rC   )r    r;   rg   viewrK   rh   ri   r2   updaterZ   rW   rY   _attn_implementationr   rH   rc   rC   r7   rP   rj   )rl   r3   ro   rB   rp   rq   rQ   input_shapehidden_shapequery_statesrR   rS   r,   r-   cache_kwargsattention_interfacerV   rT   r&   r&   r'   forward   s8   	

zGraniteAttention.forwardN)NN)__name__
__module____qualname____doc__r   r   intr^   r!   Tensortupler   
LongTensorr   r   r{   __classcell__r&   r&   rm   r'   rX   u   s(    rX   RMSNormc                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	GraniteRMSNormư>c                    s&   t    tt|| _|| _dS )z=
        GraniteRMSNorm is equivalent to T5LayerNorm
        N)r]   r^   r   	Parameterr!   onesweightvariance_epsilon)rl   r`   epsrm   r&   r'   r^      s   

zGraniteRMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr   r   T)keepdim)	rF   rO   r!   rN   powmeanrsqrtr   r   )rl   r3   input_dtypevariancer&   r&   r'   r{      s
   zGraniteRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)r   r   r    r   rl   r&   r&   r'   
extra_repr   s   zGraniteRMSNorm.extra_repr)r   )r}   r~   r   r^   r{   r   r   r&   r&   rm   r'   r      s    r   c                       s$   e Zd Z fddZdd Z  ZS )
GraniteMLPc                    sx   t    || _|j| _|j| _tj| j| j|jd| _tj| j| j|jd| _	tj| j| j|jd| _
t|j | _d S )Nr[   )r]   r^   rY   r`   intermediate_sizer   re   mlp_bias	gate_projup_proj	down_projr   
hidden_actact_fnrl   rY   rm   r&   r'   r^      s   
zGraniteMLP.__init__c                 C   s$   |  | | || | }|S r|   )r   r   r   r   )rl   r#   r   r&   r&   r'   r{      s    zGraniteMLP.forward)r}   r~   r   r^   r{   r   r&   r&   rm   r'   r      s    
r   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  deejeeejejf  f fddZ  ZS )GraniteDecoderLayerrY   rZ   c                    sZ   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
|j| _d S )N)rY   rZ   r   )r]   r^   r`   rX   	self_attnr   mlpr   rms_norm_epsinput_layernormpost_attention_layernormresidual_multiplierrk   rm   r&   r'   r^      s   

zGraniteDecoderLayer.__init__NFr3   rB   r.   rp   output_attentions	use_cacherq   ro   r5   c	                 K   s   |}
|  |}| jd||||||||d|	\}}|
|| j  }|}
| |}| |}|
|| j  }|f}|r>||f7 }|S )a.  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
            kwargs (`dict`, *optional*):
                Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
                into the model
        )r3   rB   r.   rp   r   r   rq   ro   Nr&   )r   r   r   r   r   )rl   r3   rB   r.   rp   r   r   rq   ro   rQ   residualself_attn_weightsoutputsr&   r&   r'   r{      s.   "
	



zGraniteDecoderLayer.forward)NNNFFNN)r}   r~   r   r   r   r^   r!   r   r   r   r   boolr   FloatTensorr{   r   r&   r&   rm   r'   r      s8    	r   c                   @   sL   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdZdd ZdS )GranitePreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|trQ|jjd d S d S )Nr=   )r   stdg      ?)rY   initializer_range
isinstancer   re   r   datanormal_r\   zero_	Embeddingpadding_idxr   fill_)rl   r>   r   r&   r&   r'   _init_weights=  s   


z$GranitePreTrainedModel._init_weightsN)r}   r~   r   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_3_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr   r&   r&   r&   r'   r   -  s    r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )GraniteRotaryEmbeddingNrY   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r]   r^   hasattrr   getr   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenrY   r   rope_init_fnattention_scalingregister_bufferr   original_inv_freq)rl   rY   devicer   rm   r&   r'   r^   L  s   
zGraniteRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r   r   mpscpuF)device_typeenabledr   r   )rF   )r   floatr6   r    rO   r   r   r   strr!   autocastrK   r"   r,   r   r-   rF   )
rl   r#   r.   inv_freq_expandedposition_ids_expandedr   freqsembr,   r-   r&   r&   r'   r{   ]  s   0&zGraniteRotaryEmbedding.forwardr|   )
r}   r~   r   r   r^   r!   no_gradr   r{   r   r&   r&   rm   r'   r   K  s
    r   c                       s   e Zd Zdef fddZdd Zdd Zee									dd	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ  ZS )GraniteModelrY   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _ j| _|   d S )Nc                    s   g | ]}t  |qS r&   )r   ).0rZ   rY   r&   r'   
<listcomp>v  s    z)GraniteModel.__init__.<locals>.<listcomp>r   r   F)r]   r^   pad_token_idr   
vocab_sizer   r   r`   embed_tokens
ModuleListrangenum_hidden_layerslayersr   r   normr   
rotary_embgradient_checkpointingembedding_multiplier	post_initr   rm   r   r'   r^   o  s   zGraniteModel.__init__c                 C      | j S r|   r   r   r&   r&   r'   get_input_embeddings     z!GraniteModel.get_input_embeddingsc                 C   
   || _ d S r|   r   rl   rA   r&   r&   r'   set_input_embeddings     
z!GraniteModel.set_input_embeddingsN	input_idsrB   r.   r   inputs_embedsr   r   output_hidden_statesrq   flash_attn_kwargsr5   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}|d u rB| 	|}|| j
 }|rP|d u rPt }|	d u rl|d ur\| nd}tj|||jd  |jd}	|d u ru|	d}t| j |||	||d}|}| ||}|rdnd }|rdnd }| jd | j j D ]&}|r||f7 }||f||||||	|d	|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||d
S )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.Fr   r   )r   )rY   input_embedsrB   rq   r   r.   r&   )rB   r.   rp   r   r   rq   ro   )last_hidden_stater   r3   
attentions)rY   r   r   r   
ValueErrorr   rH   loggerwarning_oncer   r   r	   get_seq_lengthr!   aranger    r   r)   r   r   r   r   r   r   )rl   r   rB   r.   r   r   r   r   r   rq   r   past_seen_tokensrU   r3   ro   all_hidden_statesall_self_attnsdecoder_layerlayer_outputsr&   r&   r'   r{     s   


	
	


zGraniteModel.forward)	NNNNNNNNN)r}   r~   r   r   r^   r   r   r   r   r   r!   r   r   r   r   r   r   r   r   r{   r   r&   r&   rm   r'   r   m  sL    	
r   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r}   r~   r   r&   r&   r&   r'   r    s    r  c                       s  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej deeeeej f  deej deej dee dee dee deej d eeejf d!ee d"efd#d$Z  ZS )&GraniteForCausalLMzlm_head.weightlm_headcolwise_repr3   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S )NFr[   )
r]   r^   r   r   r   r   re   r`   r  r   r   rm   r&   r'   r^     s
   
zGraniteForCausalLM.__init__c                 C   s   | j jS r|   r   r   r   r&   r&   r'   r     s   z'GraniteForCausalLM.get_input_embeddingsc                 C   s   || j _d S r|   r  r   r&   r&   r'   r     s   z'GraniteForCausalLM.set_input_embeddingsc                 C   r   r|   r  r   r&   r&   r'   get_output_embeddings  r   z(GraniteForCausalLM.get_output_embeddingsc                 C   r   r|   r  )rl   new_embeddingsr&   r&   r'   set_output_embeddings  r   z(GraniteForCausalLM.set_output_embeddingsc                 C   r   r|   r   )rl   decoderr&   r&   r'   set_decoder  r   zGraniteForCausalLM.set_decoderc                 C   r   r|   r  r   r&   r&   r'   get_decoder  r   zGraniteForCausalLM.get_decoderNr   r   rB   r.   r   r   labelsr   r   r   rq   logits_to_keeprQ   r5   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }|| j j	 }d}|dur^| j
d||| j jd|}t|||j|j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, GraniteForCausalLM

        >>> model = GraniteForCausalLM.from_pretrained("meta-granite/Granite-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-granite/Granite-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   rB   r.   r   r   r   r   r   rq   )r  r  r   )lossr  r   r3   r  r&   )rY   r   r   r   r  r   r   slicer  logits_scalingloss_functionr   r   r   r3   r  )rl   r   rB   r.   r   r   r  r   r   r   rq   r  rQ   r   r3   slice_indicesr  r  r&   r&   r'   r{     s<   '
zGraniteForCausalLM.forward)NNNNNNNNNNr   )r}   r~   r   _tied_weights_keys_tp_plan_pp_planr^   r   r   r  r  r  r  r   r   r   r!   r   r   r   r   listr   r   r   r   r  r   r{   r   r&   r&   rm   r'   r    sf    		
r  )r  r   r   )Nr   )r=   )<typingr   r   r   r!   r   activationsr   cache_utilsr   r	   
generationr
   integrationsr   masking_utilsr   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   configuration_graniter   
get_loggerr}   r  r(   r2   r   r   r<   Moduler   rW   rX   r   r   r   r   r   r   r  r  __all__r&   r&   r&   r'   <module>   sd   


FM"|m