o
    eime                     @   s,  d dl mZ d dlmZ d dlZd dlmZ ddlmZ ddlm	Z	m
Z
 ddlmZ dd	lmZmZmZ dd
lmZ ddlmZ ddlmZmZ ddlmZmZ ddlmZmZ ddlmZ ddl m!Z!m"Z"m#Z#m$Z$ ddl%m&Z&m'Z' ddl(m)Z) ddl*m+Z+ e$,e-Z.dd Z/edd<ddZ0dej1de2dej1fddZ3	d=d ej4d!ej1d"ej1d#ej1d$ej1dB d%e5d&e5d'ee! fd(d)Z6ee0G d*d+ d+ej4Z7ed,G d-d. d.ej4Z8G d/d0 d0ej4Z9G d1d2 d2eZ:e"G d3d4 d4eZ;G d5d6 d6ej4Z<e"G d7d8 d8e;Z=e"G d9d: d:e;eZ>g d;Z?dS )>    )Callable)OptionalN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hubuse_kernel_func_from_hubuse_kernelized_func)create_causal_mask)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuplelogging)maybe_autocastmerge_with_config_defaults)capture_outputs   )GraniteConfigc                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..N   dim)shapetorchcat)xx1x2 r)   j/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/granite/modeling_granite.pyrotate_half/   s   r+   rotary_pos_embc                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer+   )qkcossinunsqueeze_dimq_embedk_embedr)   r)   r*   apply_rotary_pos_emb6   s
   

r5   hidden_statesn_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r#   expandreshape)r6   r7   batchnum_key_value_headsslenhead_dimr)   r)   r*   	repeat_kvP   s
   0r?           modulequerykeyvalueattention_maskscalingdropoutkwargsc                 K   s   t || j}t || j}	t||dd| }
|d ur |
| }
tjj|
dtjd	|j
}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr    r   r   )r"   dtype)ptrainingr   )r?   num_key_value_groupsr$   matmul	transposer   
functionalsoftmaxfloat32torI   rG   rK   
contiguous)rA   rB   rC   rD   rE   rF   rG   rH   
key_statesvalue_statesattn_weightsattn_outputr)   r)   r*   eager_attention_forward\   s   
rX   c                       s   e Zd ZdZddededB f fddZ				ddejde	ejejf dB d	ejdB d
e
dB dejdB dee de	ejejf fddZ  ZS )GraniteAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNconfig	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	|j
| _|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _d S )Nr>   Tbias)super__init__rZ   r[   getattrhidden_sizenum_attention_headsr>   r<   rL   attention_multiplierrF   attention_dropout	is_causalr   Linearattention_biasq_projk_projv_projo_projselfrZ   r[   	__class__r)   r*   r_   y   s(   
zGraniteAttention.__init__r6   position_embeddingsrE   past_key_valuescache_positionrH   r8   c                 K   s  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
| jjt}|| |	|
||f| jskdn| j| jd|\}}|jg |dR   }| |}||fS )Nr   r   r    )r1   r0   rr   r@   )rG   rF   )r#   r>   rh   viewrN   ri   rj   r5   updater[   r   get_interfacerZ   _attn_implementationrX   rK   rd   rF   r:   rS   rk   )rm   r6   rp   rE   rq   rr   rH   input_shapehidden_shapequery_statesrT   rU   r0   r1   cache_kwargsattention_interfacerW   rV   r)   r)   r*   forward   s8   	

zGraniteAttention.forwardN)NNNN)__name__
__module____qualname____doc__r   intr_   r$   Tensortupler   
LongTensorr   r   r|   __classcell__r)   r)   rn   r*   rY   u   s,    rY   RMSNormc                       sF   e Zd Zddeddf fddZdejdejfdd	Zd
d Z  Z	S )GraniteRMSNormư>epsr8   Nc                    s&   t    tt|| _|| _dS )z=
        GraniteRMSNorm is equivalent to T5LayerNorm
        N)r^   r_   r   	Parameterr$   onesweightvariance_epsilon)rm   ra   r   rn   r)   r*   r_      s   

zGraniteRMSNorm.__init__r6   c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr    r   T)keepdim)	rI   rR   r$   rQ   powmeanrsqrtr   r   )rm   r6   input_dtypevariancer)   r)   r*   r|      s
   zGraniteRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)r   r   r#   r   )rm   r)   r)   r*   
extra_repr   s   zGraniteRMSNorm.extra_repr)r   )
r~   r   r   floatr_   r$   r   r|   r   r   r)   r)   rn   r*   r      s    r   c                       s$   e Zd Z fddZdd Z  ZS )
GraniteMLPc                    sx   t    || _|j| _|j| _tj| j| j|jd| _tj| j| j|jd| _	tj| j| j|jd| _
t|j | _d S )Nr\   )r^   r_   rZ   ra   intermediate_sizer   rf   mlp_bias	gate_projup_proj	down_projr   
hidden_actact_fnrm   rZ   rn   r)   r*   r_      s   
zGraniteMLP.__init__c                 C   s$   |  | | || | }|S r}   )r   r   r   r   )rm   r&   r   r)   r)   r*   r|      s    zGraniteMLP.forward)r~   r   r   r_   r|   r   r)   r)   rn   r*   r      s    
r   c                       s   e Zd Zdedef fddZ							ddejdejdB d	ejdB d
e	dB de
dB de
dB dejdB deejejf dB deejeejejf dB f fddZ  ZS )GraniteDecoderLayerrZ   r[   c                    sZ   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
|j| _d S )N)rZ   r[   r   )r^   r_   ra   rY   	self_attnr   mlpr   rms_norm_epsinput_layernormpost_attention_layernormresidual_multiplierrl   rn   r)   r*   r_      s   

zGraniteDecoderLayer.__init__NFr6   rE   position_idsrq   output_attentions	use_cacherr   rp   r8   c	                 K   s   |}
|  |}| jd||||||||d|	\}}|
|| j  }|}
| |}| |}|
|| j  }|f}|r>||f7 }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            past_key_values (`Cache`, *optional*): cached past key and value projection states
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
            kwargs (`dict`, *optional*):
                Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
                into the model
        )r6   rE   r   rq   r   r   rr   rp   Nr)   )r   r   r   r   r   )rm   r6   rE   r   rq   r   r   rr   rp   rH   residualself_attn_weightsoutputsr)   r)   r*   r|      s.   "
	



zGraniteDecoderLayer.forward)NNNFFNN)r~   r   r   r   r   r_   r$   r   r   r   boolr   FloatTensorr|   r   r)   r)   rn   r*   r      s8    	r   c                   @   sH   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZdZeedZdS )GranitePreTrainedModelrZ   modelTr   rq   )r6   
attentionsN)r~   r   r   r   __annotations__base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_supports_flex_attn_can_compile_fullgraph_supports_attention_backendr   rY   _can_record_outputsr)   r)   r)   r*   r   .  s   
 
r   c                       s~   e Zd ZU ejed< ddef fddZe			ddedB de	d de
dB d	ed
ef fddZe edd Z  ZS )GraniteRotaryEmbeddinginv_freqNrZ   c                    s   t    |j| _|j| _|| _| jjd | _| j}| jdkr$t	| j }|| j|\}| _
| jd|dd | jd| dd d S )N	rope_typedefaultr   F)
persistentoriginal_inv_freq)r^   r_   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenrZ   rope_parametersr   compute_default_rope_parametersr   attention_scalingregister_bufferclone)rm   rZ   devicerope_init_fnr   rn   r)   r*   r_   D  s   


zGraniteRotaryEmbedding.__init__r   ztorch.deviceseq_lenr8   ztorch.Tensorc                 C   sZ   | j d }t| ddp| j| j }d}d|tjd|dtjdj|tjd|   }||fS )	a  
        Computes the inverse frequencies according to the original RoPE implementation
        Args:
            config ([`~transformers.PreTrainedConfig`]):
                The model configuration.
            device (`torch.device`):
                The device to use for initialization of the inverse frequencies.
            seq_len (`int`, *optional*):
                The current sequence length. Unused for this type of RoPE.
        Returns:
            Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
            post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
        
rope_thetar>   Ng      ?r   r    rI   )r   rI   )	r   r`   ra   rb   r$   arangeint64rR   r   )rZ   r   r   baser"   attention_factorr   r)   r)   r*   r   T  s   
&z6GraniteRotaryEmbedding.compute_default_rope_parametersc           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	|dd+ | |  
dd}tj||fdd	}| | j }| | j }	W d    n1 slw   Y  |j|jd
|	j|jd
fS )Nr   r   r   mpscpuF)device_typeenabledr    r!   r   )r   r   r9   r#   rR   r   
isinstancetypestrr   rN   r$   r%   r0   r   r1   rI   )
rm   r&   r   inv_freq_expandedposition_ids_expandedr   freqsembr0   r1   r)   r)   r*   r|   r  s   0&zGraniteRotaryEmbedding.forwardr}   )NNN)r~   r   r   r$   r   r   r   r_   staticmethodr   r   r   r   r   no_gradr   r|   r   r)   r)   rn   r*   r   A  s&   
 

r   c                       s   e Zd Zdef fddZeee									ddej	dB dej
dB dej	dB dedB d	ejdB d
edB dedB dedB dej	dB dee defddZ  ZS )GraniteModelrZ   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _ j| _|   d S )Nc                    s   g | ]}t  |qS r)   )r   ).0r[   rZ   r)   r*   
<listcomp>  s    z)GraniteModel.__init__.<locals>.<listcomp>r   r   F)r^   r_   pad_token_idpadding_idx
vocab_sizer   	Embeddingra   embed_tokens
ModuleListrangenum_hidden_layerslayersr   r   normr   
rotary_embgradient_checkpointingembedding_multiplier	post_initr   rn   r   r*   r_     s   zGraniteModel.__init__N	input_idsrE   r   rq   inputs_embedsr   r   output_hidden_statesrr   rH   r8   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}|d u rB| 	|}|| j
 }|rS|d u rSt| j d}|	d u ro|d ur_| nd}tj|||jd  |jd}	|d u rx|	d}t| j |||	||d}|}| j||d	}|rd
nd }|rd
nd }| jd | j j D ]&}|r||f7 }||f||||||	|d|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.Fr   r   r   )r   )rZ   r   rE   rr   rq   r   )r   r)   )rE   r   rq   r   r   rr   rp   )last_hidden_staterq   r6   r   )rZ   r   r   r   
ValueErrorr   rK   loggerwarning_oncer   r   r   get_seq_lengthr$   r   r#   r   r-   r   r   r   r   r   r   )rm   r   rE   r   rq   r   r   r   r   rr   rH   past_seen_tokenscausal_maskr6   rp   all_hidden_statesall_self_attnsdecoder_layerlayer_outputsr)   r)   r*   r|     s   


	
	


zGraniteModel.forward)	NNNNNNNNN)r~   r   r   r   r_   r   r   r   r$   r   r   r   r   r   r   r   r   r|   r   r)   r)   rn   r*   r     sJ    	
r   c                       s   e Zd ZddiZddiZddgdgfiZ fddZee																					
dde	j
d	B de	jd	B de	j
d	B ded	B de	jd	B de	j
d	B ded	B ded	B ded	B de	j
d	B dee	jB dee defddZ  ZS )GraniteForCausalLMzlm_head.weightzmodel.embed_tokens.weightlm_headcolwise_gather_outputr6   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S )NFr\   )
r^   r_   r   r   r   r   rf   ra   r  r   r   rn   r)   r*   r_     s
   
zGraniteForCausalLM.__init__Nr   r   rE   r   rq   r   labelsr   r   r   rr   logits_to_keeprH   r8   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }|| j j	 }d}|dur^| j
d||| j jd|}t|||j|j|jdS )a  
        Example:

        ```python
        >>> from transformers import AutoTokenizer, GraniteForCausalLM

        >>> model = GraniteForCausalLM.from_pretrained("meta-granite/Granite-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-granite/Granite-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   rE   r   rq   r   r   r   r   rr   )r  r  r   )lossr  rq   r6   r   r)   )rZ   r   r   r   r   r   r   slicer  logits_scalingloss_functionr   r   rq   r6   r   )rm   r   rE   r   rq   r   r  r   r   r   rr   r  rH   r   r6   slice_indicesr  r  r)   r)   r*   r|     s<   "
zGraniteForCausalLM.forward)NNNNNNNNNNr   )r~   r   r   _tied_weights_keys_tp_plan_pp_planr_   r   r   r$   r   r   r   r   r   r   r   r   r   r|   r   r)   r)   rn   r*   r    sZ    		
r  )r  r   r   )r   )r@   )@collections.abcr   typingr   r$   r   activationsr   cache_utilsr   r   
generationr	   integrationsr
   r   r   masking_utilsr   modeling_layersr   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   utils.genericr   r   utils.output_capturingr   configuration_graniter   
get_loggerr~   r   r+   r5   r   r   r?   Moduler   rX   rY   r   r   r   r   r   r   r  __all__r)   r)   r)   r*   <module>   sn   

FMAuV