o
    i\                     @   s  d dl mZmZmZ d dlZd dlmZ ddlmZ ddl	m
Z
mZ ddlmZ ddlmZmZ ddlmZ dd	lmZ dd
lmZmZ ddlmZmZ ddlmZmZ ddlmZ ddl m!Z!m"Z"m#Z# ddl$m%Z% ddl&m'Z' ddl(m)Z) G dd dej*Z+G dd dej*Z,dej-de.dej-fddZ/	d8dej*dej-dej-d ej-d!eej- d"e0d#e0d$ee! fd%d&Z1d'd( Z2d9d)d*Z3G d+d, d,ej*Z4G d-d. d.ej*Z5G d/d0 d0eZ6e"G d1d2 d2eZ7e"G d3d4 d4e7Z8e"G d5d6 d6e7eZ9g d7Z:dS ):    )CallableOptionalUnionN   )ACT2FN)CacheDynamicCache)GenerationMixin)create_causal_mask!create_sliding_window_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuple)deprecate_kwarg)check_model_inputs   )Cohere2Configc                       sD   e Zd ZU ejed< ddef fddZe e	dd Z
  ZS )	Cohere2RotaryEmbeddinginv_freqNconfigc                    s   t    t|drt|jtr|jd|jd| _nd| _|j| _	|j| _
|| _t| j | _| | j|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultr   F)
persistent)super__init__hasattr
isinstancer   dictgetr    max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr   r   rope_init_fnattention_scalingregister_bufferr   original_inv_freq)selfr   devicer   	__class__ i/home/ubuntu/veenaModal/venv/lib/python3.10/site-packages/transformers/models/cohere2/modeling_cohere2.pyr%   .   s   
zCohere2RotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd}|d d d d d f  }t|jjtr2|jjdkr2|jjnd}tj	|dd* | |  
dd}tj|ddd	}| | j }| | j }	W d    n1 shw   Y  |j|jd
|	j|jd
fS )Nr   r   mpscpuF)device_typeenabled   dimdtype)r   floatexpandshaper'   r2   r!   strtorchautocast	transposerepeat_interleavecosr.   sintor@   )
r1   xposition_idsinv_freq_expandedposition_ids_expandedr:   freqsembrI   rJ   r5   r5   r6   forward?   s   (&zCohere2RotaryEmbedding.forwardN)__name__
__module____qualname__rE   Tensor__annotations__r   r%   no_gradr   rR   __classcell__r5   r5   r3   r6   r   +   s   
 
r   c                       s&   e Zd Zd fdd	Zdd Z  ZS )	Cohere2LayerNormNh㈵>Fc                    s&   t    tt|| _|| _dS )zcThe hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dimN)r$   r%   nn	ParameterrE   onesweightvariance_epsilon)r1   hidden_sizeepsbiasr3   r5   r6   r%   P   s   

zCohere2LayerNorm.__init__c                 C   sl   |j }|tj}|jddd}|| djddd}|| t|| j  }| jtj| }||S )Nr7   T)keepdimr<   )	r@   rK   rE   float32meanpowrsqrtra   r`   )r1   hidden_statesinput_dtyperg   variancer5   r5   r6   rR   V   s   
zCohere2LayerNorm.forward)Nr\   FrT   rU   rV   r%   rR   rZ   r5   r5   r3   r6   r[   O   s    r[   rj   n_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)rC   rB   reshape)rj   rn   batchnum_key_value_headsslenhead_dimr5   r5   r6   	repeat_kv`   s
   0ru           modulequerykeyvalueattention_maskscalingdropoutkwargsc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr<   r   r7   )r>   r@   )ptrainingr   )ru   num_key_value_groupsrE   matmulrG   rC   r]   
functionalsoftmaxrf   rK   r@   r}   r   
contiguous)rw   rx   ry   rz   r{   r|   r}   r~   
key_statesvalue_statesattn_weightscausal_maskattn_outputr5   r5   r6   eager_attention_forwardl   s   
&r   c                 C   sB   | dd d df }| ddd df }t j| |gddd}|S )N.r<   r   r7   r=   r   )rE   stackflatten)rL   x1x2rot_xr5   r5   r6   rotate_half   s   r   c           	      C   sj   | j }|  } | }||}||}| | t| |  }|| t||  }|j|d|j|dfS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    r?   )r@   rA   	unsqueezer   rK   )	qkrI   rJ   rM   unsqueeze_dimr@   q_embedk_embedr5   r5   r6   apply_rotary_pos_emb   s   

r   c                       s   e Zd ZdZddedee f fddZeddd	d
		dde	j
dee	j
e	j
f dee	j
 dee dee	j dee dee	j
ee	j
 eee	j
  f fddZ  ZS )Cohere2Attentionz=Multi-headed attention from 'Attention Is All You Need' paperNr   	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _|j| dkr4|jnd | _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _d S )Nrt   g      Tsliding_attentionrd   )r$   r%   r   r   getattrrb   num_attention_headsrt   rr   r   r|   attention_dropout	is_causallayer_typessliding_windowr]   Linearattention_biasq_projk_projv_projo_projr1   r   r   r3   r5   r6   r%      s*   
zCohere2Attention.__init__past_key_valuepast_key_values4.58new_nameversionrj   position_embeddingsr{   cache_positionr~   ro   c                 K   s2  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}| jd urGt|	|
||\}	}
|d ur\|||d}|	|
|| j
|\}
}t}| jjdkrjt| jj }|| |	|
||f| jsvdn| j| j| jd|\}}|jg |dR   }| |}||fS )Nr7   r   r<   )rJ   rI   r   eagerrv   )r}   r|   r   )rC   rt   r   viewrG   r   r   r   r   updater   r   r   _attn_implementationr   r   r   r|   rp   r   r   )r1   rj   r   r{   r   r   r~   input_shapehidden_shapequery_statesr   r   rI   rJ   cache_kwargsattention_interfacer   r   r5   r5   r6   rR      s<   

	

zCohere2Attention.forwardrS   )NN)rT   rU   rV   __doc__r   r   intr%   r   rE   rW   tupler   
LongTensorr   r   rR   rZ   r5   r5   r3   r6   r      s*    r   c                       s$   e Zd Z fddZdd Z  ZS )
Cohere2MLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFr   )r$   r%   r   rb   intermediate_sizer]   r   	gate_projup_proj	down_projr   
hidden_actact_fnr1   r   r3   r5   r6   r%      s   
zCohere2MLP.__init__c                 C   s$   |  | | || | }|S rS   )r   r   r   r   )r1   rL   r   r5   r5   r6   rR      s    zCohere2MLP.forwardrm   r5   r5   r3   r6   r      s    
r   c                       s   e Zd Zdedef fddZedddd					
		ddejde	ejejf de
ej de
e de
e de
ej dee de	eje
e	ejejf  f fddZ  ZS )Cohere2DecoderLayerr   r   c                    sL   t    |j| _t||d| _t|| _t|j|jd| _	|j
| | _d S )N)r   r   rb   rc   )r$   r%   rb   r   	self_attnr   mlpr[   layer_norm_epsinput_layernormr   attention_typer   r3   r5   r6   r%     s   

zCohere2DecoderLayer.__init__r   r   r   r   NFrj   r   r{   	use_cacher   r~   ro   c              	   K   sJ   |}|  |}| jd||||||d|\}	}
| |}||	 | }|S )ar  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            past_key_values (`Cache`, *optional*): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
        )rj   r   r{   r   r   r   Nr5   )r   r   r   )r1   rj   r   r{   r   r   r   r~   residualhidden_states_attention_hidden_states_mlpr5   r5   r6   rR     s   



zCohere2DecoderLayer.forward)NNFN)rT   rU   rV   r   r   r%   r   rE   rW   r   r   r   boolr   r   r   FloatTensorrR   rZ   r5   r5   r3   r6   r     s0    	r   c                   @   sH   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZdZeedZdS )Cohere2PreTrainedModelr   modelTr   r   )rj   
attentionsN)rT   rU   rV   r   rX   base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_supports_flex_attn_can_compile_fullgraph_supports_attention_backendr   r   _can_record_outputsr5   r5   r5   r6   r   =  s   
 
r   c                       s   e Zd Zdef fddZee							ddeej	 deej
 deej	 dee d	eej d
ee deej	 dee defddZ  ZS )Cohere2Modelr   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r5   )r   ).0r   r   r5   r6   
<listcomp>Y  s    z)Cohere2Model.__init__.<locals>.<listcomp>r   r   F)r$   r%   pad_token_idpadding_idx
vocab_sizer]   	Embeddingrb   embed_tokens
ModuleListrangenum_hidden_layerslayersr[   r   normr   
rotary_embgradient_checkpointing	post_initr   r3   r   r6   r%   R  s   zCohere2Model.__init__N	input_idsr{   rM   r   inputs_embedsr   r   r~   ro   c              	   K   s&  |d u |d uA rt d|d u r| |}|r$|d u r$| js$t| jd}|d u r@|d ur0| nd}	tj|	|	|jd  |j	d}|d u rI|
d}t| }
tsi| j|||||d}td
i |td
i |d}
|}| ||}| jD ]}||f||
|j |||d|}qt| |}t||d	S )Nz:You must specify exactly one of input_ids or inputs_embedsr   r   r   )r2   )r   input_embedsr{   r   r   rM   )full_attentionr   )r   r{   r   r   r   )last_hidden_stater   r5   )
ValueErrorr   r   r   r   get_seq_lengthrE   arangerC   r2   r   r'   r(   r
   r   r   r   r   r   r   )r1   r   r{   rM   r   r   r   r   r~   past_seen_tokenscausal_mask_mappingmask_kwargsrj   r   decoder_layerr5   r5   r6   rR   b  sV   

	


zCohere2Model.forward)NNNNNNN)rT   rU   rV   r   r%   r   r   r   rE   r   rW   r   r   r   r   r   r   rR   rZ   r5   r5   r3   r6   r   P  s<    	
r   c                       s   e Zd ZdgZddiZddgdgfiZ fddZee												dd
e	e
j de	e
j de	e
j de	eeee
j f  de	e
j de	e
j de	e de	e de	e de	e
j deee
jf dee defddZ  ZS )Cohere2ForCausalLMzlm_head.weightlm_headcolwise_reprj   logitsc                    sP   t  | t|| _|j| _tj|j|jdd| _|j	| _	|j
| _
|   d S r   )r$   r%   r   r   r   r]   r   rb   r  logit_scaletie_word_embeddingsr   r   r3   r5   r6   r%     s   
zCohere2ForCausalLM.__init__Nr   r   r{   rM   r   r   labelsr   output_attentionsoutput_hidden_statesr   logits_to_keepr~   ro   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }|| j	 }d}|dur]| j
d||| j jd|}t|||j|j|jdS )a~  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >> from transformers import AutoTokenizer, Cohere2ForCausalLM

        >> model = Cohere2ForCausalLM.from_pretrained("Cohere2ForAI/c4ai-command-r-v01")
        >> tokenizer = AutoTokenizer.from_pretrained("Cohere2ForAI/c4ai-command-r-v01")

        >> prompt = "Hey, are you conscious? Can you talk to me?"
        >> inputs = tokenizer(prompt, return_tensors="pt")

        >> # Generate
        >> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   r{   rM   r   r   r   r  r	  r   )r  r  r   )lossr  r   rj   r   r5   )r   r  r	  r   r   r'   r   slicer  r  loss_functionr   r   r   rj   r   )r1   r   r{   rM   r   r   r  r   r  r	  r   r
  r~   outputsrj   slice_indicesr  r  r5   r5   r6   rR     s<   '

zCohere2ForCausalLM.forward)NNNNNNNNNNr   )rT   rU   rV   _tied_weights_keys_tp_plan_pp_planr%   r   r   r   rE   r   rW   r   r   listr   r   r   r   r   r   rR   rZ   r5   r5   r3   r6   r    sZ    	
r  )r  r   r   )rv   )Nr   );typingr   r   r   rE   torch.nnr]   activationsr   cache_utilsr   r   
generationr	   masking_utilsr
   r   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   utils.deprecationr   utils.genericr   configuration_cohere2r   Moduler   r[   rW   r   ru   rA   r   r   r   r   r   r   r   r   r  __all__r5   r5   r5   r6   <module>   sd   $

I8R]