o
    eiW                     @   sJ  d dl mZ d dlmZ d dlZd dlmZ ddlmZ ddl	m
Z
 ddlmZmZ dd	lmZ dd
lmZmZ ddlmZ ddlmZmZmZ ddlmZmZ ddlmZmZ ddlm Z m!Z! ddl"m#Z# ddl$m%Z%m&Z&m'Z' ddl(m)Z)m*Z* ddl+m,Z, ddl-m.Z. G dd dej/Z0G dd dej/Z1G dd dej/Z2dd Z3edd@dd Z4d!ej5d"e6d#ej5fd$d%Z7	&dAd'ej/d(ej5d)ej5d*ej5d+ej5dB d,e8d-e8d.e#e% fd/d0Z9ee4G d1d2 d2ej/Z:G d3d4 d4eZ;e&G d5d6 d6e!Z<e&G d7d8 d8e<Z=e&G d9d: d:e<eZ>G d;d< d<ee<Z?G d=d> d>ee<Z@g d?ZAdS )B    )Callable)OptionalN)nn   )initialization)ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_func_from_hubuse_kernelized_func)create_causal_mask) GenericForSequenceClassificationGenericForTokenClassificationGradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuple)maybe_autocastmerge_with_config_defaults)capture_outputs   )GemmaConfigc                       s@   e Zd Zddedef fddZdd Zdd	 Zd
d Z  Z	S )GemmaRMSNormư>dimepsc                    s&   t    || _tt|| _d S N)super__init__r#   r   	Parametertorchzerosweight)selfr"   r#   	__class__ f/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/gemma/modeling_gemma.pyr&   2   s   
zGemmaRMSNorm.__init__c                 C   s$   |t |djddd| j  S )N   T)keepdim)r(   rsqrtpowmeanr#   )r+   xr.   r.   r/   _norm7   s   $zGemmaRMSNorm._normc                 C   s*   |  | }|d| j   }||S )N      ?)r7   floatr*   type_as)r+   r6   outputr.   r.   r/   forward:   s   
zGemmaRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tupler*   shaper#   )r+   r.   r.   r/   
extra_reprA   s   zGemmaRMSNorm.extra_repr)r!   )
__name__
__module____qualname__intr9   r&   r7   r<   r?   __classcell__r.   r.   r,   r/   r    1   s
    r    c                       s$   e Zd Z fddZdd Z  ZS )GemmaMLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFbias)r%   r&   confighidden_sizeintermediate_sizer   Linear	gate_projup_proj	down_projr   
hidden_actact_fnr+   rI   r,   r.   r/   r&   F   s   
zGemmaMLP.__init__c                 C   s$   |  | | || | }|S r$   )rO   rQ   rM   rN   )r+   r6   rO   r.   r.   r/   r<   P   s    zGemmaMLP.forward)r@   rA   rB   r&   r<   rD   r.   r.   r,   r/   rE   E   s    
rE   c                       s~   e Zd ZU ejed< ddef fddZe			ddedB de	d de
dB d	ed
ef fddZe edd Z  ZS )GemmaRotaryEmbeddinginv_freqNrI   c                    s   t    |j| _|j| _|| _| jjd | _| j}| jdkr$t	| j }|| j|\}| _
| jd|dd | jd| dd d S )N	rope_typedefaultrT   F)
persistentoriginal_inv_freq)r%   r&   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenrI   rope_parametersrU   compute_default_rope_parametersr   attention_scalingregister_bufferclone)r+   rI   devicerope_init_fnrT   r,   r.   r/   r&   X   s   


zGemmaRotaryEmbedding.__init__ra   ztorch.deviceseq_lenreturnztorch.Tensorc                 C   sZ   | j d }t| ddp| j| j }d}d|tjd|dtjdj|tjd|   }||fS )	a  
        Computes the inverse frequencies according to the original RoPE implementation
        Args:
            config ([`~transformers.PreTrainedConfig`]):
                The model configuration.
            device (`torch.device`):
                The device to use for initialization of the inverse frequencies.
            seq_len (`int`, *optional*):
                The current sequence length. Unused for this type of RoPE.
        Returns:
            Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
            post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
        
rope_thetahead_dimNr8   r   r0   dtype)ra   rh   )	r\   getattrrJ   num_attention_headsr(   arangeint64tor9   )rI   ra   rc   baser"   attention_factorrT   r.   r.   r/   r]   h   s   
&z4GemmaRotaryEmbedding.compute_default_rope_parametersc           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	|dd+ | |  
dd}tj||fdd	}| | j }| | j }	W d    n1 slw   Y  |j|jd
|	j|jd
fS )Nr   r1   r   mpscpuF)device_typeenabledr0   r"   rg   )rT   r9   expandr>   rm   ra   
isinstancetypestrr   	transposer(   catcosr^   sinrh   )
r+   r6   position_idsinv_freq_expandedposition_ids_expandedrr   freqsembr{   r|   r.   r.   r/   r<      s   0&zGemmaRotaryEmbedding.forwardr$   )NNN)r@   rA   rB   r(   Tensor__annotations__r   r&   staticmethodr   rC   r=   r9   r]   no_gradr   r<   rD   r.   r.   r,   r/   rS   U   s&   
 

rS   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..Nr1   r0   rt   )r>   r(   rz   )r6   x1x2r.   r.   r/   rotate_half   s   r   rotary_pos_embc                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer   )qkr{   r|   unsqueeze_dimq_embedk_embedr.   r.   r/   apply_rotary_pos_emb   s
   

r   hidden_statesn_reprd   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r>   ru   reshape)r   r   batchnum_key_value_headsslenrf   r.   r.   r/   	repeat_kv   s
   0r           modulequerykeyvalueattention_maskscalingdropoutkwargsc                 K   s   t || j}t || j}	t||dd| }
|d ur |
| }
tjj|
dtjd	|j
}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr0   r   r1   )r"   rh   )ptrainingr   )r   num_key_value_groupsr(   matmulry   r   
functionalsoftmaxfloat32rm   rh   r   r   
contiguous)r   r   r   r   r   r   r   r   
key_statesvalue_statesattn_weightsattn_outputr.   r.   r/   eager_attention_forward   s   
r   c                       s   e Zd ZdZdedef fddZ				ddejde	ejejf dB d	ejdB d
e
dB dejdB dee de	ejejf fddZ  ZS )GemmaAttentionz=Multi-headed attention from 'Attention Is All You Need' paperrI   	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _t|dd | _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _d S )Nrf   g      use_bidirectional_attentionFrG   )r%   r&   rI   r   ri   rJ   rj   rf   r   r   r   attention_dropout	is_causalr   rL   attention_biasq_projk_projv_projo_projr+   rI   r   r,   r.   r/   r&      s(   
zGemmaAttention.__init__Nr   position_embeddingsr   past_key_valuescache_positionr   rd   c                 K   s  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
| jjt}|| |	|
||f| jskdn| j| jd|\}}|jg |dR   }| |}||fS )Nr1   r   r0   )r|   r{   r   r   )r   r   )r>   rf   r   viewry   r   r   r   updater   r   get_interfacerI   _attn_implementationr   r   r   r   r   r   r   )r+   r   r   r   r   r   r   input_shapehidden_shapequery_statesr   r   r{   r|   cache_kwargsattention_interfacer   r   r.   r.   r/   r<      s8   	

zGemmaAttention.forward)NNNN)r@   rA   rB   __doc__r   rC   r&   r(   r   r=   r   
LongTensorr   r   r<   rD   r.   r.   r,   r/   r      s,    r   c                       s   e Zd Zdedef fddZ						ddejdejdB d	ejdB d
e	dB de
dB dejdB deejejf dB dee dejfddZ  ZS )GemmaDecoderLayerrI   r   c                    sR   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
d S )N)rI   r   r#   )r%   r&   rJ   r   	self_attnrE   mlpr    rms_norm_epsinput_layernormpost_attention_layernormr   r,   r.   r/   r&   $  s   

zGemmaDecoderLayer.__init__NFr   r   r}   r   	use_cacher   r   r   rd   c              
   K   s^   |}	|  |}| jd|||||||d|\}}
|	| }|}	| |}| |}|	| }|S )N)r   r   r}   r   r   r   r   r.   )r   r   r   r   )r+   r   r   r}   r   r   r   r   r   residual_r.   r.   r/   r<   .  s&   




zGemmaDecoderLayer.forward)NNNFNN)r@   rA   rB   r   rC   r&   r(   r   r   r   boolr=   r   r   r<   rD   r.   r.   r,   r/   r   #  s6    	
r   c                       s`   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZdZeedZe  fddZ  ZS )	GemmaPreTrainedModelrI   modelTr   r   )r   
attentionsc                    s,   t  | d|jjv rt|j d S d S )NRMSNorm)r%   _init_weightsr-   r@   initzeros_r*   )r+   r   r,   r.   r/   r   b  s   z"GemmaPreTrainedModel._init_weights)r@   rA   rB   r   r   base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_supports_flex_attn_can_compile_fullgraph_supports_attention_backendr   r   _can_record_outputsr(   r   r   rD   r.   r.   r,   r/   r   P  s    
 r   c                       s   e Zd Zdef fddZeee							ddej	dB dej
dB dej	dB dedB d	ejdB d
edB dej	dB dee defddZ  ZS )
GemmaModelrI   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r.   )r   ).0r   rI   r.   r/   
<listcomp>s  s    z'GemmaModel.__init__.<locals>.<listcomp>r   r   F)r%   r&   pad_token_idpadding_idx
vocab_sizer   	EmbeddingrJ   embed_tokens
ModuleListrangenum_hidden_layerslayersr    r   normrS   
rotary_embgradient_checkpointing	post_initrR   r,   r   r/   r&   l  s   zGemmaModel.__init__N	input_idsr   r}   r   inputs_embedsr   r   r   rd   c              
   K   s,  |d u |d uA rt d|d u r| |}|r!|d u r!t| jd}|d u r=|d ur-| nd}	tj|	|	|jd  |jd}|d u rF|	d}t
| j|||||d}
|}| j||d}tj| jjd |jd	}|| }| jd | jj D ]}||f|
|||||d
|}qs| |}t||r|dS d dS )Nz:You must specify exactly one of input_ids or inputs_embedsr   r   r   )ra   )rI   r   r   r   r   r}   )r}   g      ?rg   )r   r}   r   r   r   r   )last_hidden_stater   )
ValueErrorr   r	   rI   get_seq_lengthr(   rk   r>   ra   r   r   r   tensorrJ   rh   r   r   r   r   )r+   r   r   r}   r   r   r   r   r   past_seen_tokenscausal_maskr   r   
normalizerdecoder_layerr.   r.   r/   r<   |  sZ   




zGemmaModel.forward)NNNNNNN)r@   rA   rB   r   r&   r   r   r   r(   r   r   r   FloatTensorr   r   r   r   r<   rD   r.   r.   r,   r/   r   j  s>    	
r   c                       s   e Zd ZddiZddiZddgdgfiZ fddZee																	
dde	j
d	B de	jd	B de	j
d	B ded	B de	jd	B de	j
d	B ded	B de	j
d	B dee	jB dee defddZ  ZS )GemmaForCausalLMzlm_head.weightzmodel.embed_tokens.weightlm_headcolwise_gather_outputr   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S rF   )
r%   r&   r   r   r   r   rL   rJ   r   r   rR   r,   r.   r/   r&     s
   
zGemmaForCausalLM.__init__Nr   r   r   r}   r   r   labelsr   r   logits_to_keepr   rd   c
              
   K   s   | j d|||||||d|
}|j}t|	trt|	 dn|	}| |dd|ddf }d}|durB| jd||| jjd|
}t	|||j
|j|jdS )a|  
        Example:

        ```python
        >>> from transformers import AutoTokenizer, GemmaForCausalLM

        >>> model = GemmaForCausalLM.from_pretrained("google/gemma-7b")
        >>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")

        >>> prompt = "What is your favorite condiment?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "What is your favorite condiment?"
        ```)r   r   r}   r   r   r   r   N)r  r  r   )lossr  r   r   r   r.   )r   r   rv   rC   slicer   loss_functionrI   r   r   r   r   r   )r+   r   r   r}   r   r   r  r   r   r  r   outputsr   slice_indicesr  r  r.   r.   r/   r<     s0    zGemmaForCausalLM.forward)	NNNNNNNNr   )r@   rA   rB   _tied_weights_keys_tp_plan_pp_planr&   r   r   r(   r   r   r   r   r   rC   r   r   r   r<   rD   r.   r.   r,   r/   r     sN    		
r   c                   @      e Zd ZdS )GemmaForSequenceClassificationNr@   rA   rB   r.   r.   r.   r/   r        r  c                   @   r  )GemmaForTokenClassificationNr  r.   r.   r.   r/   r    r  r  )r   r   r  r  r   )r   )r   )Bcollections.abcr   typingr   r(   r    r   r   activationsr   cache_utilsr   r	   
generationr
   integrationsr   r   masking_utilsr   modeling_layersr   r   r   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   utils.genericr   r   utils.output_capturingr   configuration_gemmar   Moduler    rE   rS   r   r   r   rC   r   r9   r   r   r   r   r   r   r  r  __all__r.   r.   r.   r/   <module>   sp   A
F-VK