o
    wi|                     @   s,  d dl mZmZmZ d dlZd dlmZ ddlmZ ddlm	Z	m
Z
 ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZmZmZ ddlmZmZ ddlmZmZ ddlmZ ddl m!Z!m"Z"m#Z#m$Z$ ddl%m&Z& e$'e(Z)G dd dej*Z+G dd dej*Z,G dd dej*Z-dd Z.d>ddZ/dej0de1dej0fdd Z2	!d?d"ej*d#ej0d$ej0d%ej0d&eej0 d'e3d(e3fd)d*Z4G d+d, d,ej*Z5G d-d. d.eZ6e"G d/d0 d0eZ7e"G d1d2 d2e7Z8G d3d4 d4ee!Z9e"G d5d6 d6e7eZ:e"d7d8G d9d: d:e7Z;e"G d;d< d<e7Z<g d=Z=dS )@    )CallableOptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)create_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tuplelogging   )GemmaConfigc                       s@   e Zd Zddedef fddZdd Zdd	 Zd
d Z  Z	S )GemmaRMSNormư>dimepsc                    s&   t    || _tt|| _d S N)super__init__r    r   	Parametertorchzerosweight)selfr   r    	__class__ e/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/transformers/models/gemma/modeling_gemma.pyr#   2   s   
zGemmaRMSNorm.__init__c                 C   s$   |t |djddd| j  S )N   T)keepdim)r%   rsqrtpowmeanr    )r(   xr+   r+   r,   _norm7   s   $zGemmaRMSNorm._normc                 C   s*   |  | }|d| j   }||S )N      ?)r4   floatr'   type_as)r(   r3   outputr+   r+   r,   forward:   s   
zGemmaRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tupler'   shaper    r(   r+   r+   r,   
extra_reprA   s   zGemmaRMSNorm.extra_repr)r   )
__name__
__module____qualname__intr6   r#   r4   r9   r=   __classcell__r+   r+   r)   r,   r   1   s
    r   c                       s$   e Zd Z fddZdd Z  ZS )GemmaMLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFbias)r"   r#   confighidden_sizeintermediate_sizer   Linear	gate_projup_proj	down_projr   
hidden_actact_fnr(   rG   r)   r+   r,   r#   F   s   
zGemmaMLP.__init__c                 C   s$   |  | | || | }|S r!   )rM   rO   rK   rL   )r(   r3   rM   r+   r+   r,   r9   P   s    zGemmaMLP.forward)r>   r?   r@   r#   r9   rB   r+   r+   r)   r,   rC   E   s    
rC   c                       s8   e Zd Zddef fddZe edd Z  Z	S )GemmaRotaryEmbeddingNrG   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r"   r#   hasattrrR   getrS   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenrG   r   rope_init_fnattention_scalingregister_bufferrV   original_inv_freq)r(   rG   devicerV   r)   r+   r,   r#   V   s   
zGemmaRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r.   r   mpscpuF)device_typeenabledr-   r   dtype)rV   r6   expandr;   tora   
isinstancerT   strr%   autocast	transposecatcosr^   sinrh   )
r(   r3   position_idsinv_freq_expandedposition_ids_expandedrd   freqsembrp   rq   r+   r+   r,   r9   g   s   0&zGemmaRotaryEmbedding.forwardr!   )
r>   r?   r@   r   r#   r%   no_gradr   r9   rB   r+   r+   r)   r,   rQ   U   s
    rQ   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..Nr.   r-   rf   )r;   r%   ro   )r3   x1x2r+   r+   r,   rotate_halfw   s   rz   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezerz   )qkrp   rq   rr   unsqueeze_dimq_embedk_embedr+   r+   r,   apply_rotary_pos_emb~   s
   

r   hidden_statesn_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r;   ri   reshape)r   r   batchnum_key_value_headsslenhead_dimr+   r+   r,   	repeat_kv   s
   0r           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr-   r   r.   )r   rh   )ptrainingr   )r   num_key_value_groupsr%   matmulrn   r;   r   
functionalsoftmaxfloat32rj   rh   r   r   
contiguous)r   r   r   r   r   r   r   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr+   r+   r,   eager_attention_forward   s   
&r   c                       s   e Zd ZdZdedef fddZ		ddejde	ejejf d	e
ej d
e
e de
ej dee de	eje
ej e
e	ej  f fddZ  ZS )GemmaAttentionz=Multi-headed attention from 'Attention Is All You Need' paperrG   	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _d S )Nr   g      TrE   )r"   r#   rG   r   getattrrH   num_attention_headsr   r   r   r   attention_dropout	is_causalr   rJ   attention_biasq_projk_projv_projo_projr(   rG   r   r)   r+   r,   r#      s(   
zGemmaAttention.__init__Nr   position_embeddingsr   past_key_valuecache_positionr   r   c                 K   s$  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
}| jjdkret| jj }|| |	|
||f| jsqdn| j| jd|\}}|jg |dR   }| |}||fS )Nr.   r   r-   )rq   rp   r   eagerr   )r   r   )r;   r   r   viewrn   r   r   r   updater   r   rG   _attn_implementationr   r   r   r   r   r   r   )r(   r   r   r   r   r   r   input_shapehidden_shapequery_statesr   r   rp   rq   cache_kwargsattention_interfacer   r   r+   r+   r,   r9      s8   	

zGemmaAttention.forward)NN)r>   r?   r@   __doc__r   rA   r#   r%   Tensorr:   r   r   
LongTensorr   r   r9   rB   r+   r+   r)   r,   r      s(    r   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  dee deejeeejejf  f fddZ  ZS )GemmaDecoderLayerrG   r   c                    sR   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
d S )N)rG   r   r    )r"   r#   rH   r   	self_attnrC   mlpr   rms_norm_epsinput_layernormpost_attention_layernormr   r)   r+   r,   r#     s   

zGemmaDecoderLayer.__init__NFr   r   rr   r   output_attentions	use_cacher   r   r   r   c	                 K   st   |}
|  |}| jd||||||||d|	\}}|
| }|}
| |}| |}|
| }|f}|r8||f7 }|S )N)r   r   rr   r   r   r   r   r   r+   )r   r   r   r   )r(   r   r   rr   r   r   r   r   r   r   residualself_attn_weightsoutputsr+   r+   r,   r9     s.   
	



zGemmaDecoderLayer.forward)NNNFFNN)r>   r?   r@   r   rA   r#   r%   r   r   r   r   boolr:   r   r   FloatTensorr9   rB   r+   r+   r)   r,   r     s<    	
r   c                   @   sL   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdZdd ZdS )GemmaPreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|trQ|jjd d S d S )Nr   )r2   stdr5   )rG   initializer_rangerk   r   rJ   r'   datanormal_rF   zero_	Embeddingpadding_idxr   fill_)r(   r   r   r+   r+   r,   _init_weightsJ  s   


z"GemmaPreTrainedModel._init_weightsN)r>   r?   r@   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_3_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr   r+   r+   r+   r,   r   :  s    r   c                       s   e Zd Zdef fddZdd Zdd Zee									dd	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ  ZS )
GemmaModelrG   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r+   )r   ).0r   rG   r+   r,   
<listcomp>a  s    z'GemmaModel.__init__.<locals>.<listcomp>r   r   F)r"   r#   pad_token_idr   
vocab_sizer   r   rH   embed_tokens
ModuleListrangenum_hidden_layerslayersr   r   normrQ   
rotary_embgradient_checkpointing	post_initrP   r)   r   r,   r#   Z  s   zGemmaModel.__init__c                 C      | j S r!   r   r<   r+   r+   r,   get_input_embeddingsj     zGemmaModel.get_input_embeddingsc                 C   
   || _ d S r!   r   r(   r   r+   r+   r,   set_input_embeddingsm     
zGemmaModel.set_input_embeddingsN	input_idsr   rr   r   inputs_embedsr   r   output_hidden_statesr   r   r   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}|d u rB| 	|}|rK|d u rKt
 }|	d u rg|d urW| nd}tj|||jd  |jd}	|d u rp|	d}t| j |||	||d}|}| ||}tj| j jd |jd	}|| }|rd
nd }|rd
nd }| jd | j j D ]&}|r||f7 }||f||||||	|d|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.Fr   r   ra   )rG   input_embedsr   r   r   rr   g      ?rg   r+   )r   rr   r   r   r   r   r   )last_hidden_stater   r   
attentions)rG   r   r   r   
ValueErrorr   r   loggerwarning_oncer   r	   get_seq_lengthr%   aranger;   ra   r{   r   r   tensorrH   rh   r   r   r   r   )r(   r   r   rr   r   r   r   r   r   r   r   past_seen_tokensr   r   r   
normalizerall_hidden_statesall_self_attnsdecoder_layerlayer_outputsr+   r+   r,   r9   p  s   



	


zGemmaModel.forward	NNNNNNNNN)r>   r?   r@   r   r#   r   r   r   r   r   r%   r   r   r   r   r   r   r   r   r9   rB   r+   r+   r)   r,   r   X  sL    	
r   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r>   r?   r@   r+   r+   r+   r,   r    s    r  c                       s
  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej dee deej deej dee dee dee deej d eeejf d!ee d"efd#d$Z  ZS )&GemmaForCausalLMzlm_head.weightlm_headcolwise_repr   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S rD   )
r"   r#   r   r   r   r   rJ   rH   r  r   rP   r)   r+   r,   r#     s
   
zGemmaForCausalLM.__init__c                 C      | j jS r!   r   r   r<   r+   r+   r,   r        z%GemmaForCausalLM.get_input_embeddingsc                 C      || j _d S r!   r  r   r+   r+   r,   r        z%GemmaForCausalLM.set_input_embeddingsc                 C   r   r!   r  r<   r+   r+   r,   get_output_embeddings  r   z&GemmaForCausalLM.get_output_embeddingsc                 C   r   r!   r  )r(   new_embeddingsr+   r+   r,   set_output_embeddings  r   z&GemmaForCausalLM.set_output_embeddingsc                 C   r   r!   r   )r(   decoderr+   r+   r,   set_decoder  r   zGemmaForCausalLM.set_decoderc                 C   r   r!   r  r<   r+   r+   r,   get_decoder  r   zGemmaForCausalLM.get_decoderNr   r   r   rr   r   r   labelsr   r   r   r   logits_to_keepr   r   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }d}|durX| j	d||| j j
d|}t|||j|j|jdS )a!  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, GemmaForCausalLM

        >>> model = GemmaForCausalLM.from_pretrained("google/gemma-7b")
        >>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")

        >>> prompt = "What is your favorite condiment?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "What is your favorite condiment?"
        ```N)	r   r   rr   r   r   r   r   r   r   )r  r"  r   lossr  r   r   r  r+   )rG   r   r   r   r  rk   rA   slicer  loss_functionr   r   r   r   r  )r(   r   r   rr   r   r   r"  r   r   r   r   r#  r   r   r   slice_indicesr  r%  r+   r+   r,   r9     s:   '
zGemmaForCausalLM.forward)NNNNNNNNNNr   )r>   r?   r@   _tied_weights_keys_tp_plan_pp_planr#   r   r   r  r  r   r!  r   r   r   r%   r   r   r   r   r   r   rA   r   r  r   r9   rB   r+   r+   r)   r,   r    sf    		
r  a  
    The Gemma Model transformer with a sequence classification head on top (linear layer).

    [`GemmaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )custom_introc                          e Zd Z fddZdd Zdd Zee									ddee	j
 d	ee	j d
ee	j
 dee dee	j dee	j
 dee dee dee defddZ  ZS )GemmaForSequenceClassificationc                    s@   t  | |j| _t|| _tj|j| jdd| _| 	  d S rD   )
r"   r#   
num_labelsr   r   r   rJ   rH   scorer   rP   r)   r+   r,   r#   X  s
   
z'GemmaForSequenceClassification.__init__c                 C   r  r!   r  r<   r+   r+   r,   r   a  r  z3GemmaForSequenceClassification.get_input_embeddingsc                 C   r  r!   r  r   r+   r+   r,   r   d  r  z3GemmaForSequenceClassification.set_input_embeddingsNr   r   rr   r   r   r"  r   r   r   r   c
              
   C   s(  | j ||||||||	d}
|
j}| |}|dur|jd }n|jd }| jjdu r2|dkr2td| jjdu r;d}n1|dur`|| jjk|jt	j
}t	j|jd |jt	j
d}|| d}nd}t| jj d |t	j||jd	|f }d}|dur| j|||| jd
}t|||
j|
j|
jdS )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        r   rr   r   r   r   r   r   Nr   r   z=Cannot handle batch sizes > 1 if no padding token is defined.r.   )ra   rh   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r   )r  r"  pooled_logitsrG   r$  )r   r  r0  r;   rG   r   r  rj   ra   r%   int32r  argmaxr  r  r*   r>   r'  r   r   r   r  )r(   r   r   rr   r   r   r"  r   r   r   transformer_outputsr   r  
batch_sizelast_non_pad_tokennon_pad_masktoken_indicesr3  r%  r+   r+   r,   r9   g  sL   


z&GemmaForSequenceClassification.forwardr  )r>   r?   r@   r#   r   r   r   r   r   r%   r   r   r   r   r   r   r9   rB   r+   r+   r)   r,   r.  I  sH    		
r.  c                       r-  )GemmaForTokenClassificationc                    s|   t  | |j| _t|| _t|dd d ur|j}nt|dd d ur'|j}nd}t	|| _
t|j|j| _|   d S )Nclassifier_dropouthidden_dropoutg?)r"   r#   r/  r   r   r   r<  r=  r   Dropoutr   rJ   rH   r0  r   )r(   rG   r<  r)   r+   r,   r#     s   
z$GemmaForTokenClassification.__init__c                 C   r  r!   r  r<   r+   r+   r,   r     r  z0GemmaForTokenClassification.get_input_embeddingsc                 C   r  r!   r  r   r+   r+   r,   r     r  z0GemmaForTokenClassification.set_input_embeddingsNr   r   rr   r   r   r"  r   r   r   r   c
              
   C   sd   | j ||||||||	d}
|
j}| |}| |}d}|dur(| ||| j}t|||
j|
jdS )r1  r2  N)r%  r  r   r  )	r   r  r   r0  r'  rG   r   r   r  )r(   r   r   rr   r   r   r"  r   r   r   r   sequence_outputr  r%  r+   r+   r,   r9     s,   


z#GemmaForTokenClassification.forwardr  )r>   r?   r@   r#   r   r   r   r   r   r%   r   r   r   r   r   r   r9   rB   r+   r+   r)   r,   r;    sH    	
r;  )r   r  r.  r;  r   )Nr   )r   )>typingr   r   r   r%   r   activationsr   cache_utilsr   r	   
generationr
   masking_utilsr   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   configuration_gemmar   
get_loggerr>   r  Moduler   rC   rQ   rz   r   r   rA   r   r6   r   r   r   r   r   r  r  r.  r;  __all__r+   r+   r+   r,   <module>   sn   
"

F5 lVF