o
    wi                     @   sF  d dl mZmZmZ d dlZd dlmZ ddlmZ ddl	m
Z
mZ ddlmZ ddlmZmZ ddlmZ dd	lmZ dd
lmZmZmZmZ ddlmZmZ ddlmZmZ ddl m!Z! ddl"m#Z#m$Z$m%Z% ddl&m'Z' ddl(m)Z) e%*e+Z,G dd dej-Z.G dd dej-Z/dd Z0d=ddZ1dej2de3dej2fddZ4			d>d ej-d!ej2d"ej2d#ej2d$eej2 d%e5d&ee5 d'ee5 de6ej2ej2f fd(d)Z7G d*d+ d+ej-Z8G d,d- d-eZ9G d.d/ d/ej-Z:e#G d0d1 d1eZ;e#G d2d3 d3e;Z<e#G d4d5 d5e;eZ=e#d6d7G d8d9 d9e;Z>e#G d:d; d;e;Z?g d<Z@dS )?    )CallableOptionalUnionN   )ACT2FN)CacheDynamicCache)GenerationMixin)create_causal_mask!create_sliding_window_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)auto_docstringcan_return_tuplelogging)deprecate_kwarg   )Gemma2Configc                       s@   e Zd Zddedef fddZdd Zdd	 Zd
d Z  Z	S )Gemma2RMSNormư>dimepsc                    s&   t    || _tt|| _d S N)super__init__r    nn	Parametertorchzerosweight)selfr   r    	__class__ g/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/transformers/models/gemma2/modeling_gemma2.pyr#   3   s   
zGemma2RMSNorm.__init__c                 C   s$   |t |djddd| j  S )N   T)keepdim)r&   rsqrtpowmeanr    )r)   xr,   r,   r-   _norm8   s   $zGemma2RMSNorm._normc                 C   s*   |  | }|d| j   }||S )N      ?)r5   floatr(   type_as)r)   r4   outputr,   r,   r-   forward;   s   
zGemma2RMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tupler(   shaper    r)   r,   r,   r-   
extra_reprB   s   zGemma2RMSNorm.extra_repr)r   )
__name__
__module____qualname__intr7   r#   r5   r:   r>   __classcell__r,   r,   r*   r-   r   2   s
    r   c                       s$   e Zd Z fddZdd Z  ZS )	Gemma2MLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFbias)r"   r#   confighidden_sizeintermediate_sizer$   Linear	gate_projup_proj	down_projr   hidden_activationact_fnr)   rH   r*   r,   r-   r#   G   s   
zGemma2MLP.__init__c                 C   s$   |  | | || | }|S r!   )rN   rP   rL   rM   )r)   r4   rN   r,   r,   r-   r:   Q   s    zGemma2MLP.forward)r?   r@   rA   r#   r:   rC   r,   r,   r*   r-   rD   F   s    
rD   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..Nr/   r.   r   )r<   r&   cat)r4   x1x2r,   r,   r-   rotate_halfV   s   rV   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezerV   )qkcossinposition_idsunsqueeze_dimq_embedk_embedr,   r,   r-   apply_rotary_pos_emb]   s
   

r`   hidden_statesn_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r<   expandreshape)ra   rb   batchnum_key_value_headsslenhead_dimr,   r,   r-   	repeat_kvx   s
   0rj           modulequerykeyvalueattention_maskdropoutscalingsoftcapc                 K   s   |d u r	| j d }t|| j}	t|| j}
t||	dd| }|d ur2|| }t|}|| }|d urM|d d d d d d d |	jd f }|| }tj	j
|dtjd|j}tj	j||| jd}t||
}|dd }||fS )	N      r.   r   r/   )r   dtype)ptrainingr   )ri   rj   num_key_value_groupsr&   matmul	transposetanhr<   r$   
functionalsoftmaxfloat32torv   rq   rx   
contiguous)rl   rm   rn   ro   rp   rq   rr   rs   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr,   r,   r-   eager_attention_forward   s"   

&r   c                       s   e Zd ZdZdedef fddZ		ddejde	ejejf d	e
ej d
e
e de
ej dee de	eje
ej e
e	ej  f fddZ  ZS )Gemma2Attentionz=Multi-headed attention from 'Attention Is All You Need' paperrH   	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	|j
d | _| jj| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _| jj| _|j| dkry|j| _d S d | _d S )Nri   rt   TrF   sliding_attention)r"   r#   rH   r   getattrrI   num_attention_headsri   rg   ry   query_pre_attn_scalarrr   attention_dropout	is_causalr$   rK   attention_biasq_projk_projv_projo_projattn_logit_softcappinglayer_typessliding_windowr)   rH   r   r*   r,   r-   r#      s,   


$zGemma2Attention.__init__Nra   position_embeddingsrp   past_key_valuecache_positionr   rc   c                 K   s,  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
}| jjdkret| jj }|| |	|
||f| jrr| jnd| j| j| jd|\}}|jg |dR   }| |}||fS )Nr/   r   r.   )r[   rZ   r   eagerrk   )rq   rr   r   rs   )r<   ri   r   viewr{   r   r   r`   updater   r   rH   _attn_implementationr   rx   r   rr   r   r   re   r   r   )r)   ra   r   rp   r   r   r   input_shapehidden_shapequery_statesr   r   rZ   r[   cache_kwargsattention_interfacer   r   r,   r,   r-   r:      s<   	


zGemma2Attention.forward)NN)r?   r@   rA   __doc__r   rB   r#   r&   Tensorr;   r   r   
LongTensorr   r   r:   rC   r,   r,   r*   r-   r      s(    r   c                       s   e Zd Zdedef fddZeddd								dd
ejde	ejejf de
ej de
ej de
e de
e de
e de
ej de	eje
e	ejejf  f fddZ  ZS )Gemma2DecoderLayerrH   r   c                    s   t    |j| _|| _|j| | _t||d| _t|| _	t
|j|jd| _t
|j|jd| _t
|j|jd| _t
|j|jd| _d S )N)rH   r   r    )r"   r#   rI   rH   r   attention_typer   	self_attnrD   mlpr   rms_norm_epsinput_layernormpost_attention_layernormpre_feedforward_layernormpost_feedforward_layernormr   r*   r,   r-   r#      s   

zGemma2DecoderLayer.__init__last_cache_positionz4.53.0)versionNFra   r   rp   r\   r   output_attentions	use_cacher   rc   c	                 K   s   |}
|  |}| jd||||||||d|	\}}| |}|
| }|}
| |}| |}| |}|
| }|f}|rB||f7 }|S )N)ra   r   rp   r\   r   r   r   r   r,   )r   r   r   r   r   r   )r)   ra   r   rp   r\   r   r   r   r   r   residualself_attn_weightsoutputsr,   r,   r-   r:      s2   
	





zGemma2DecoderLayer.forward)NNNFFN)r?   r@   rA   r   rB   r#   r   r&   r   r;   r   r   r   boolFloatTensorr:   rC   r,   r,   r*   r-   r      s8    
	r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )Gemma2RotaryEmbeddingNrH   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r"   r#   hasattrr   getr   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenrH   r   rope_init_fnattention_scalingregister_bufferr   original_inv_freq)r)   rH   devicer   r*   r,   r-   r#   .  s   
zGemma2RotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r/   r   mpscpuF)device_typeenabledr.   rR   rv   )r   r7   rd   r<   r   r   
isinstancer   strr&   autocastr{   rS   rZ   r   r[   rv   )
r)   r4   r\   inv_freq_expandedposition_ids_expandedr   freqsembrZ   r[   r,   r,   r-   r:   ?  s   0&zGemma2RotaryEmbedding.forwardr!   )
r?   r@   rA   r   r#   r&   no_gradr   r:   rC   r,   r,   r*   r-   r   -  s
    r   c                   @   sL   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdZdd ZdS )Gemma2PreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|trQ|jjd d S d S )Nrk   )r3   stdr6   )rH   initializer_ranger   r$   rK   r(   datanormal_rG   zero_	Embeddingpadding_idxr   fill_)r)   rl   r   r,   r,   r-   _init_weights_  s   


z#Gemma2PreTrainedModel._init_weightsN)r?   r@   rA   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_3_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr   r,   r,   r,   r-   r   O  s    r   c                       s   e Zd Zdef fddZdd Zdd Zee									dd	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ  ZS )Gemma2ModelrH   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r,   )r   ).0r   rH   r,   r-   
<listcomp>v  s    z(Gemma2Model.__init__.<locals>.<listcomp>r   r   F)r"   r#   pad_token_idr   
vocab_sizer$   r   rI   embed_tokens
ModuleListrangenum_hidden_layerslayersr   r   normr   
rotary_embgradient_checkpointing	post_initrQ   r*   r   r-   r#   o  s   zGemma2Model.__init__c                 C      | j S r!   r   r=   r,   r,   r-   get_input_embeddings     z Gemma2Model.get_input_embeddingsc                 C   
   || _ d S r!   r  r)   ro   r,   r,   r-   set_input_embeddings     
z Gemma2Model.set_input_embeddingsN	input_idsrp   r\   r   inputs_embedsr   r   output_hidden_statesr   flash_attn_kwargsrc   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}|d u rB| 	|}|rN|d u rN| jsNt
 }|	d u rj|d urZ| nd}tj|||jd  |jd}	|d u rs|	d}t| }ts| j |||	||d}tdi |tdi |d}|}| ||}tj| j jd	 |jd
}|| }|rdnd }|rdnd }| jd | j j D ])}|r||f7 }||f|||j |||||	d|
}|d }|r||d f7 }q| |}|r||f7 }t||||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.Fr   r   r   )rH   input_embedsrp   r   r   r\   )full_attentionr   g      ?r   r,   )r   rp   r\   r   r   r   r   )last_hidden_stater   ra   
attentions)rH   r   r  r   
ValueErrorr   rx   loggerwarning_oncer   r   get_seq_lengthr&   aranger<   r   rW   r   dictr
   r   r   tensorrI   rv   r   r   r   r   r   )r)   r
  rp   r\   r   r  r   r   r  r   r  past_seen_tokenscausal_mask_mappingmask_kwargsra   r   
normalizerall_hidden_statesall_self_attnsdecoder_layerlayer_outputsr,   r,   r-   r:     s   



	

zGemma2Model.forward	NNNNNNNNN)r?   r@   rA   r   r#   r  r  r   r   r   r&   r   r   r   r   r   r   r   r   r:   rC   r,   r,   r*   r-   r   m  sL    	
r   c                       s  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d$deej deej deej dee deej deej dee dee dee deej d eeejf d!efd"d#Z  ZS )%Gemma2ForCausalLMzlm_head.weightlm_headcolwise_repra   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S rE   )
r"   r#   r   r   r   r$   rK   rI   r$  r  rQ   r*   r,   r-   r#     s
   
zGemma2ForCausalLM.__init__c                 C      | j jS r!   r   r   r=   r,   r,   r-   r       z&Gemma2ForCausalLM.get_input_embeddingsc                 C      || j _d S r!   r(  r  r,   r,   r-   r       z&Gemma2ForCausalLM.set_input_embeddingsc                 C   r  r!   r$  r=   r,   r,   r-   get_output_embeddings
  r  z'Gemma2ForCausalLM.get_output_embeddingsc                 C   r  r!   r,  )r)   new_embeddingsr,   r,   r-   set_output_embeddings  r	  z'Gemma2ForCausalLM.set_output_embeddingsc                 C   r  r!   r   )r)   decoderr,   r,   r-   set_decoder  r	  zGemma2ForCausalLM.set_decoderc                 C   r  r!   r0  r=   r,   r,   r-   get_decoder  r  zGemma2ForCausalLM.get_decoderNr   r
  rp   r\   r   r  labelsr   r   r  r   logits_to_keeprc   c                 K   s  | j r| jjdkrtd| jj d |dur|n| jj}|	dur$|	n| jj}	| jd||||||||	|
d	|}|j}t	|t
rHt| dn|}| |dd|ddf }| jjduro|| jj }t|}|| jj }d}|dur| j||| jfi |}t|||j|j|jdS )a'  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, Gemma2ForCausalLM

        >>> model = Gemma2ForCausalLM.from_pretrained("google/gemma-2-9b")
        >>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b")

        >>> prompt = "What is your favorite condiment?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "What is your favorite condiment?"
        ```r   zhIt is strongly recommended to train Gemma2 models with the `eager` attention implementation instead of `zp`. Use `eager` with `AutoModelForCausalLM.from_pretrained('<path-to-checkpoint>', attn_implementation='eager')`.N)	r
  rp   r\   r   r  r   r   r  r   lossr&  r   ra   r  r,   )rx   rH   r   r  r  r   r  r   r  r   rB   slicer$  final_logit_softcappingr&   r|   loss_functionr   r   r   ra   r  )r)   r
  rp   r\   r   r  r4  r   r   r  r   r5  loss_kwargsr   ra   slice_indicesr&  r7  r,   r,   r-   r:     sN   (

zGemma2ForCausalLM.forward)NNNNNNNNNNr   )r?   r@   rA   _tied_weights_keys_tp_plan_pp_planr#   r  r  r-  r/  r2  r3  r   r   r   r&   r   r   r   r   r   r   rB   r   r:   rC   r,   r,   r*   r-   r#    sb    		
r#  a  
    The Gemma2 Model transformer with a sequence classification head on top (linear layer).

    [`Gemma2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )custom_introc                          e Zd Z fddZdd Zdd Zee									ddee	j
 d	ee	j d
ee	j
 dee dee	j dee	j
 dee dee dee defddZ  ZS )Gemma2ForSequenceClassificationc                    s@   t  | |j| _t|| _tj|j| jdd| _| 	  d S rE   )
r"   r#   
num_labelsr   r   r$   rK   rI   scorer  rQ   r*   r,   r-   r#   z  s
   
z(Gemma2ForSequenceClassification.__init__c                 C   r'  r!   r(  r=   r,   r,   r-   r    r)  z4Gemma2ForSequenceClassification.get_input_embeddingsc                 C   r*  r!   r(  r  r,   r,   r-   r    r+  z4Gemma2ForSequenceClassification.set_input_embeddingsNr
  rp   r\   r   r  r4  r   r   r  rc   c
              
   C   s(  | j ||||||||	d}
|
j}| |}|dur|jd }n|jd }| jjdu r2|dkr2td| jjdu r;d}n1|dur`|| jjk|jt	j
}t	j|jd |jt	j
d}|| d}nd}t| jj d |t	j||jd	|f }d}|dur| j|||| jd
}t|||
j|
j|
jdS )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        rp   r\   r   r  r   r   r  Nr   r   z=Cannot handle batch sizes > 1 if no padding token is defined.r/   )r   rv   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r  )r&  r4  pooled_logitsrH   r6  )r   r  rD  r<   rH   r   r  r   r   r&   int32r  argmaxr  r  r+   r?   r:  r   r   ra   r  )r)   r
  rp   r\   r   r  r4  r   r   r  transformer_outputsra   r&  
batch_sizelast_non_pad_tokennon_pad_masktoken_indicesrG  r7  r,   r,   r-   r:     sL   


z'Gemma2ForSequenceClassification.forwardr"  )r?   r@   rA   r#   r  r  r   r   r   r&   r   r   r   r   r   r   r:   rC   r,   r,   r*   r-   rB  k  sH    		
rB  c                       rA  )Gemma2ForTokenClassificationc                    s|   t  | |j| _t|| _t|dd d ur|j}nt|dd d ur'|j}nd}t	|| _
t|j|j| _|   d S )Nclassifier_dropouthidden_dropoutg?)r"   r#   rC  r   r   r   rP  rQ  r$   Dropoutrq   rK   rI   rD  r  )r)   rH   rP  r*   r,   r-   r#     s   
z%Gemma2ForTokenClassification.__init__c                 C   r'  r!   r(  r=   r,   r,   r-   r    r)  z1Gemma2ForTokenClassification.get_input_embeddingsc                 C   r*  r!   r(  r  r,   r,   r-   r    r+  z1Gemma2ForTokenClassification.set_input_embeddingsNr
  rp   r\   r   r  r4  r   r   r  rc   c
              
   C   sd   | j ||||||||	d}
|
j}| |}| |}d}|dur(| ||| j}t|||
j|
jdS )rE  rF  N)r7  r&  ra   r  )	r   r  rq   rD  r:  rH   r   ra   r  )r)   r
  rp   r\   r   r  r4  r   r   r  r   sequence_outputr&  r7  r,   r,   r-   r:     s,   


z$Gemma2ForTokenClassification.forwardr"  )r?   r@   rA   r#   r  r  r   r   r   r&   r   r   r   r   r   r   r:   rC   r,   r,   r*   r-   rO    sH    	
rO  )r#  r   r   rB  rO  )Nr   )rk   NN)Atypingr   r   r   r&   torch.nnr$   activationsr   cache_utilsr   r   
generationr	   masking_utilsr
   r   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   utils.deprecationr   configuration_gemma2r   
get_loggerr?   r  Moduler   rD   rV   r`   r   rB   rj   r7   r;   r   r   r   r   r   r   r#  rB  rO  __all__r,   r,   r,   r-   <module>   sz   



#J<" uVF