o
    wi}                     @   s  d dl mZmZmZ d dlZd dlmZ ddlmZ ddlm	Z	m
Z
 ddlmZ ddlmZmZ dd	lmZ dd
lmZ ddlmZmZmZmZ ddlmZmZ ddlmZmZ ddlm Z  ddl!m"Z"m#Z#m$Z$m%Z% ddl&m'Z' e%(e)Z*G dd dej+Z,dd Z-d<ddZ.dej/de0dej/fddZ1	d=dej+dej/d ej/d!ej/d"eej/ d#e2d$e2fd%d&Z3G d'd( d(ej+Z4G d)d* d*eZ5G d+d, d,ej+Z6e#G d-d. d.eZ7e#G d/d0 d0e7Z8G d1d2 d2ee"Z9e#G d3d4 d4e7eZ:e#d5d6G d7d8 d8e7Z;e#G d9d: d:e7Z<g d;Z=dS )>    )CallableOptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)create_causal_mask!create_sliding_window_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tuplelogging   )Starcoder2Configc                       s@   e Zd Zdef fddZdeeej  dejfddZ	  Z
S )Starcoder2MLPconfigc                    sT   t    |j}tj||j|jd| _tj|j||jd| _t	|j
 | _|j| _d S )Nbias)super__init__hidden_sizer   Linearintermediate_sizeuse_biasc_fcc_projr   
hidden_actactresidual_dropout)selfr   	embed_dim	__class__ o/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/transformers/models/starcoder2/modeling_starcoder2.pyr#   7   s   
zStarcoder2MLP.__init__hidden_statesreturnc                 C   s8   |  |}| |}| |}tjj|| j| jd}|S )Nptraining)r(   r+   r)   r   
functionaldropoutr,   r7   )r-   r3   r1   r1   r2   forward?   s
   


zStarcoder2MLP.forward)__name__
__module____qualname__r   r#   r   tupletorchFloatTensorr:   __classcell__r1   r1   r/   r2   r   6   s    &r   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..N   dim)shaper?   cat)xx1x2r1   r1   r2   rotate_halfG   s   rK   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezerK   )qkcossinposition_idsunsqueeze_dimq_embedk_embedr1   r1   r2   apply_rotary_pos_embN   s
   

rU   r3   n_repr4   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)rF   expandreshape)r3   rV   batchnum_key_value_headsslenhead_dimr1   r1   r2   	repeat_kvi   s
   0r]           modulequerykeyvalueattention_maskscalingr9   c                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )NrC   r   rB   )rE   dtyper5   r   )r]   num_key_value_groupsr?   matmul	transposerF   r   r8   softmaxfloat32torf   r9   r7   
contiguous)r_   r`   ra   rb   rc   rd   r9   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr1   r1   r2   eager_attention_forwardu   s   
&rt   c                       s   e Zd ZdZddedee f fddZ		ddej	de
ej	ej	f d	eej	 d
ee deej dee de
ej	eej	 ee
ej	  f fddZ  ZS )Starcoder2Attentionz=Multi-headed attention from 'Attention Is All You Need' paperNr   	layer_idxc                    s   t    || _|| _t|dd p|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _|j| _d S )Nr\   g      Tr    )r"   r#   r   rv   getattrr$   num_attention_headsr\   rZ   rg   rd   attention_dropout	is_causalr   r%   r'   q_projk_projv_projo_projr,   r-   r   rv   r/   r1   r2   r#      s   
zStarcoder2Attention.__init__r3   position_embeddingsrc   past_key_valuecache_positionrn   r4   c                 K   sF  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
}| jjdkret| jj }|| |	|
||f| jsqdn| j| jt| jdd d|\}}|jg |dR   }| |}tjj|| j| jd	}||fS )
NrB   r   rC   )rP   rO   r   eagerr^   sliding_window)r9   rd   r   r5   )rF   r\   r{   viewri   r|   r}   rU   updaterv   rt   r   _attn_implementationr   r7   ry   rd   rw   rX   rm   r~   r   r8   r9   r,   )r-   r3   r   rc   r   r   rn   input_shapehidden_shapequery_statesro   rp   rO   rP   cache_kwargsattention_interfacers   rq   r1   r1   r2   r:      s@   		


zStarcoder2Attention.forwardN)NN)r;   r<   r=   __doc__r   r   intr#   r?   Tensorr>   r   
LongTensorr   r   r:   rA   r1   r1   r/   r2   ru      s(    ru   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  dee deejeeejejf  f fddZ  ZS )Starcoder2DecoderLayerr   rv   c                    sV   t    |j| _t||d| _t|| _tj|j|j	d| _
tj|j|j	d| _d S )N)r   rv   eps)r"   r#   r$   ru   	self_attnr   mlpr   	LayerNormnorm_epsiloninput_layernormpost_attention_layernormr   r/   r1   r2   r#      s   

zStarcoder2DecoderLayer.__init__NFr3   rc   rQ   r   output_attentions	use_cacher   r   rn   r4   c	                 K   st   |}
|  |}| jd||||||||d|	\}}|
| }|}
| |}| |}|
| }|f}|r8||f7 }|S )N)r3   rc   rQ   r   r   r   r   r   r1   )r   r   r   r   )r-   r3   rc   rQ   r   r   r   r   r   rn   residualself_attn_weightsoutputsr1   r1   r2   r:      s.   
	



zStarcoder2DecoderLayer.forward)NNNFFNN)r;   r<   r=   r   r   r#   r?   r   r   r   r   boolr>   r   r   r@   r:   rA   r1   r1   r/   r2   r      s<    	
r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )Starcoder2RotaryEmbeddingNr   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r"   r#   hasattrr   getr   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr   r   rope_init_fnattention_scalingregister_bufferr   original_inv_freq)r-   r   devicer   r/   r1   r2   r#     s   
z"Starcoder2RotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   rB   r   mpscpuF)device_typeenabledrC   rD   )rf   )r   floatrW   rF   rl   r   
isinstancer   strr?   autocastri   rG   rO   r   rP   rf   )
r-   rH   rQ   inv_freq_expandedposition_ids_expandedr   freqsembrO   rP   r1   r1   r2   r:     s   0&z!Starcoder2RotaryEmbedding.forwardr   )
r;   r<   r=   r   r#   r?   no_gradr   r:   rA   r1   r1   r/   r2   r     s
    r   c                   @   sL   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdZdd ZdS )Starcoder2PreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|tjrX|jjd |jj	  d S d S )Nr^   )meanstdg      ?)r   initializer_ranger   r   r%   weightdatanormal_r!   zero_	Embeddingpadding_idxr   fill_)r-   r_   r   r1   r1   r2   _init_weights7  s   

z'Starcoder2PreTrainedModel._init_weightsN)r;   r<   r=   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_3_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr   r1   r1   r1   r2   r   '  s    r   c                       s   e Zd Zdef fddZdd Zdd Zee									dd	e	e
j d
e	e
j de	e
j de	eeee
j f  de	e
j de	e de	e de	e de	e
j dee defddZ  ZS )Starcoder2Modelr   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _tj j jd| _t d| _d| _ j| _|   d S )Nc                    s   g | ]}t  |qS r1   )r   ).0rv   r   r1   r2   
<listcomp>O  s    z,Starcoder2Model.__init__.<locals>.<listcomp>r   r   F)r"   r#   pad_token_idr   
vocab_sizer   r   r$   embed_tokens
ModuleListrangenum_hidden_layerslayersr   r   normr   
rotary_embgradient_checkpointingembedding_dropout	post_initr-   r   r/   r   r2   r#   H  s   zStarcoder2Model.__init__c                 C      | j S r   r   r-   r1   r1   r2   get_input_embeddingsY     z$Starcoder2Model.get_input_embeddingsc                 C   
   || _ d S r   r   r-   rb   r1   r1   r2   set_input_embeddings\     
z$Starcoder2Model.set_input_embeddingsN	input_idsrc   rQ   r   inputs_embedsr   r   output_hidden_statesr   flash_attn_kwargsr4   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}|d u rB| 	|}|rK|d u rKt
 }|	d u rg|d urW| nd}tj|||jd  |jd}	|d u rp|	d}| j jd u rxtnt}|| j |||	||d}|}tjj|| j| jd}| ||}|rd	nd }|rd	nd }| jd | j j D ]&}|r||f7 }||f||||||	|d
|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.Fr   r   r   )r   input_embedsrc   r   r   rQ   r5   r1   )rc   rQ   r   r   r   r   r   )last_hidden_stater   r3   
attentions)r   r   r   r   
ValueErrorr   r7   loggerwarning_oncer   r	   get_seq_lengthr?   arangerF   r   rL   r   r   r   r   r8   r9   r   r   r   r   r   r   )r-   r   rc   rQ   r   r   r   r   r   r   r   past_seen_tokensmask_functionrr   r3   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputsr1   r1   r2   r:   _  s   

	

	


zStarcoder2Model.forward	NNNNNNNNN)r;   r<   r=   r   r#   r   r   r   r   r   r?   r   r   r   r   listr@   r   r   r   r   r:   rA   r1   r1   r/   r2   r   F  sL    	
r   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r;   r<   r=   r1   r1   r1   r2   r	    s    r	  c                       s
  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej dee deej deej dee dee dee deej d eeejf d!ee d"efd#d$Z  ZS )&Starcoder2ForCausalLMzlm_head.weightlm_headcolwise_repr3   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S NFr    )
r"   r#   r   r   r   r   r%   r$   r  r   r   r/   r1   r2   r#     s
   
zStarcoder2ForCausalLM.__init__c                 C      | j jS r   r   r   r   r1   r1   r2   r        z*Starcoder2ForCausalLM.get_input_embeddingsc                 C      || j _d S r   r  r   r1   r1   r2   r        z*Starcoder2ForCausalLM.set_input_embeddingsc                 C   r   r   r  r   r1   r1   r2   get_output_embeddings  r   z+Starcoder2ForCausalLM.get_output_embeddingsc                 C   r   r   r  )r-   new_embeddingsr1   r1   r2   set_output_embeddings  r   z+Starcoder2ForCausalLM.set_output_embeddingsc                 C   r   r   r   )r-   decoderr1   r1   r2   set_decoder  r   z!Starcoder2ForCausalLM.set_decoderc                 C   r   r   r  r   r1   r1   r2   get_decoder  r   z!Starcoder2ForCausalLM.get_decoderNr   r   rc   rQ   r   r   labelsr   r   r   r   logits_to_keeprn   r4   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }d}|durX| j	d||| j j
d|}t|||j|j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, Starcoder2ForCausalLM

        >>> model = Starcoder2ForCausalLM.from_pretrained("meta-starcoder2/Starcoder2-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-starcoder2/Starcoder2-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   rc   rQ   r   r   r   r   r   r   )r  r  r   lossr  r   r3   r   r1   )r   r   r   r   r   r   r   slicer  loss_functionr   r   r   r3   r   )r-   r   rc   rQ   r   r   r  r   r   r   r   r  rn   r   r3   slice_indicesr  r  r1   r1   r2   r:     s:   '
zStarcoder2ForCausalLM.forward)NNNNNNNNNNr   )r;   r<   r=   _tied_weights_keys_tp_plan_pp_planr#   r   r   r  r  r  r  r   r   r   r?   r   r   r   r@   r   r   r   r   r	  r   r:   rA   r1   r1   r/   r2   r
    sf    		
r
  a  
    The Starcoder2 Model transformer with a sequence classification head on top (linear layer).

    [`Starcoder2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )custom_introc                          e Zd Z fddZdd Zdd Zee									ddee	j
 d	ee	j d
ee	j
 dee dee	j dee	j
 dee dee dee defddZ  ZS )#Starcoder2ForSequenceClassificationc                    s@   t  | |j| _t|| _tj|j| jdd| _| 	  d S r  )
r"   r#   
num_labelsr   r   r   r%   r$   scorer   r   r/   r1   r2   r#   D  s
   
z,Starcoder2ForSequenceClassification.__init__c                 C   r  r   r  r   r1   r1   r2   r   M  r  z8Starcoder2ForSequenceClassification.get_input_embeddingsc                 C   r  r   r  r   r1   r1   r2   r   P  r  z8Starcoder2ForSequenceClassification.set_input_embeddingsNr   rc   rQ   r   r   r  r   r   r   r4   c
              
   C   s(  | j ||||||||	d}
|
j}| |}|dur|jd }n|jd }| jjdu r2|dkr2td| jjdu r;d}n1|dur`|| jjk|jt	j
}t	j|jd |jt	j
d}|| d}nd}t| jj d |t	j||jd	|f }d}|dur| j|||| jd
}t|||
j|
j|
jdS )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        rc   rQ   r   r   r   r   r   Nr   r   z=Cannot handle batch sizes > 1 if no padding token is defined.rB   )r   rf   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r   )r  r  pooled_logitsr   r  )r   r   r*  rF   r   r   r   rl   r   r?   int32r   argmaxr   r   r0   r;   r!  r   r   r3   r   )r-   r   rc   rQ   r   r   r  r   r   r   transformer_outputsr3   r  
batch_sizelast_non_pad_tokennon_pad_masktoken_indicesr-  r  r1   r1   r2   r:   S  sL   


z+Starcoder2ForSequenceClassification.forwardr  )r;   r<   r=   r#   r   r   r   r   r   r?   r   r   r   r@   r   r   r:   rA   r1   r1   r/   r2   r(  5  sH    		
r(  c                       r'  ) Starcoder2ForTokenClassificationc                    s|   t  | |j| _t|| _t|dd d ur|j}nt|dd d ur'|j}nd}t	|| _
t|j|j| _|   d S )Nclassifier_dropouthidden_dropoutg?)r"   r#   r)  r   r   rw   r6  r7  r   Dropoutr9   r%   r$   r*  r   )r-   r   r6  r/   r1   r2   r#     s   
z)Starcoder2ForTokenClassification.__init__c                 C   r  r   r  r   r1   r1   r2   r     r  z5Starcoder2ForTokenClassification.get_input_embeddingsc                 C   r  r   r  r   r1   r1   r2   r     r  z5Starcoder2ForTokenClassification.set_input_embeddingsNr   rc   rQ   r   r   r  r   r   r   r4   c
              
   C   sd   | j ||||||||	d}
|
j}| |}| |}d}|dur(| ||| j}t|||
j|
jdS )r+  r,  N)r  r  r3   r   )	r   r   r9   r*  r!  r   r   r3   r   )r-   r   rc   rQ   r   r   r  r   r   r   r   sequence_outputr  r  r1   r1   r2   r:     s,   


z(Starcoder2ForTokenClassification.forwardr  )r;   r<   r=   r#   r   r   r   r   r   r?   r   r   r   r@   r   r   r:   rA   r1   r1   r/   r2   r5    sH    	
r5  )r
  r   r   r(  r5  )Nr   )r^   )>typingr   r   r   r?   r   activationsr   cache_utilsr   r	   
generationr
   masking_utilsr   r   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   configuration_starcoder2r   
get_loggerr;   r   Moduler   rK   rU   r   r   r]   r   rt   ru   r   r   r   r   r	  r
  r(  r5  __all__r1   r1   r1   r2   <module>   sj   


C3"~lVF