o
    wij                     @   s  d dl mZmZmZ d dlZd dlmZ ddlmZ ddlm	Z	m
Z
 ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZ ddlmZmZ ddlmZmZ ddlmZ ddlmZm Z m!Z!m"Z" ddl#m$Z$ e"%e&Z'G dd dej(Z)G dd dej(Z*G dd dej(Z+dej,de-dej,fddZ.	d8dej(dej,d ej,d!ej,d"eej, d#e/d$e/fd%d&Z0d'd( Z1d9d)d*Z2G d+d, d,ej(Z3G d-d. d.eZ4e G d/d0 d0eZ5e G d1d2 d2e5Z6G d3d4 d4eeZ7e G d5d6 d6e5eZ8g d7Z9dS ):    )CallableOptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)create_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tuplelogging   )CohereConfigc                       s&   e Zd Zd fdd	Zdd Z  ZS )	CohereLayerNormNh㈵>Fc                    s&   t    tt|| _|| _dS )zcThe hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dimN)super__init__r   	Parametertorchonesweightvariance_epsilon)selfhidden_sizeepsbias	__class__ g/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/transformers/models/cohere/modeling_cohere.pyr   5   s   

zCohereLayerNorm.__init__c                 C   sl   |j }|tj}|jddd}|| djddd}|| t|| j  }| jtj| }||S )NT)keepdim   )	dtypetor    float32meanpowrsqrtr#   r"   )r$   hidden_statesinput_dtyper2   variancer*   r*   r+   forward;   s   
zCohereLayerNorm.forward)Nr   F__name__
__module____qualname__r   r8   __classcell__r*   r*   r(   r+   r   4   s    r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )CohereRotaryEmbeddingNconfigc                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r   r   hasattrr@   getrA   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr?   r   rope_init_fnattention_scalingregister_bufferrD   original_inv_freq)r$   r?   devicerD   r(   r*   r+   r   F   s   
zCohereRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd}|d d d d d f  }t|jjtr2|jjdkr2|jjnd}tj	|dd* | |  
dd}tj|ddd	}| | j }| | j }	W d    n1 shw   Y  |j|jd
|	j|jd
fS )Nr   r,   r   mpscpuF)device_typeenabledr.   dimr/   )rD   floatexpandshape
isinstancerO   rB   strr    autocast	transposerepeat_interleavecosrL   sinr0   r/   )
r$   xposition_idsinv_freq_expandedposition_ids_expandedrR   freqsembr_   r`   r*   r*   r+   r8   W   s   (&zCohereRotaryEmbedding.forwardN)
r:   r;   r<   r   r   r    no_gradr   r8   r=   r*   r*   r(   r+   r>   E   s
    r>   c                       s$   e Zd Z fddZdd Z  ZS )	CohereMLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFr'   )r   r   r?   r%   intermediate_sizer   Linear	gate_projup_proj	down_projr   
hidden_actact_fnr$   r?   r(   r*   r+   r   h   s   
zCohereMLP.__init__c                 C   s$   |  | | || | }|S rg   )rp   rr   rn   ro   )r$   ra   rp   r*   r*   r+   r8   r   s    zCohereMLP.forwardr9   r*   r*   r(   r+   ri   g   s    
ri   r5   n_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)rY   rX   reshape)r5   rt   batchnum_key_value_headsslenhead_dimr*   r*   r+   	repeat_kvw   s
   0r{           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr.   r   r,   )rU   r/   )ptrainingr   )r{   num_key_value_groupsr    matmulr]   rY   r   
functionalsoftmaxr1   r0   r/   r   r   
contiguous)r}   r~   r   r   r   r   r   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr*   r*   r+   eager_attention_forward   s   
&r   c                 C   sB   | dd d df }| ddd df }t j| |gddd}|S )N.r.   r   r,   rT   r   )r    stackflatten)ra   x1x2rot_xr*   r*   r+   rotate_half   s   r   c           	      C   sj   | j }|  } | }||}||}| | t| |  }|| t||  }|j|d|j|dfS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    rV   )r/   rW   	unsqueezer   r0   )	qkr_   r`   rb   unsqueeze_dimr/   q_embedk_embedr*   r*   r+   apply_rotary_pos_emb   s   

r   c                       s   e Zd ZdZddedee f fddZ		ddej	de
ej	ej	f d	eej	 d
ee deej dee de
ej	eej	 ee
ej	  f fddZ  ZS )CohereAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNr?   	layer_idxc                    s  t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _|j| _| jrt|j| jf|jd| _t|j| jf|jd| _d S d S )Nrz   g      Trk   r%   r&   )r   r   r?   r   getattrr%   num_attention_headsrz   rx   r   r   attention_dropout	is_causalr   rm   attention_biasq_projk_projv_projo_projuse_qk_normr   layer_norm_epsq_normk_normr$   r?   r   r(   r*   r+   r      s:   
zCohereAttention.__init__r5   position_embeddingsr   past_key_valuecache_positionr   ru   c                 K   sJ  |j d d }g |d| jR }| ||}	| ||}
| ||}| jr6| |	}	| |
}
|		dd}	|
	dd}
|	dd}|\}}t
|	|
||\}	}
|d urj|||d}||
|| j|\}
}t}| jjdkrxt| jj }|| |	|
||f| jsdn| j| jd|\}}|jg |dR   }| |}||fS )Nr,   r   r.   )r`   r_   r   eagerr|   )r   r   )rY   rz   r   viewr   r   r   r   r   r]   r   updater   r   r?   _attn_implementationr   r   r   r   rv   r   r   )r$   r5   r   r   r   r   r   input_shapehidden_shapequery_statesr   r   r_   r`   cache_kwargsattention_interfacer   r   r*   r*   r+   r8      sD   	



zCohereAttention.forwardrg   )NN)r:   r;   r<   __doc__r   r   intr   r    Tensortupler   
LongTensorr   r   r8   r=   r*   r*   r(   r+   r      s(    %r   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  dee deejeeejejf  f fddZ  ZS )CohereDecoderLayerr?   r   c                    s@   t    |j| _t||d| _t|| _t|j|jd| _	d S )N)r?   r   r   )
r   r   r%   r   	self_attnri   mlpr   r   input_layernormr   r(   r*   r+   r     s
   

zCohereDecoderLayer.__init__NFr5   r   rb   r   output_attentions	use_cacher   r   r   ru   c	                 K   sb   |}
|  |}| jd||||||||d|	\}}| |}|
| | }|f}|r/||f7 }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
        )r5   r   rb   r   r   r   r   r   Nr*   )r   r   r   )r$   r5   r   rb   r   r   r   r   r   r   residualhidden_states_attentionself_attn_weightshidden_states_mlpoutputsr*   r*   r+   r8   "  s(   
	


zCohereDecoderLayer.forward)NNNFFNN)r:   r;   r<   r   r   r   r    r   r   r   r   boolr   r   r   FloatTensorr8   r=   r*   r*   r(   r+   r     s<    
	
r   c                   @   sL   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdZdd ZdS )CoherePreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|trQ|jjd d S d S )Nr|   )r2   stdg      ?)r?   initializer_rangerZ   r   rm   r"   datanormal_r'   zero_	Embeddingpadding_idxr   fill_)r$   r}   r   r*   r*   r+   _init_weightso  s   


z#CoherePreTrainedModel._init_weightsN)r:   r;   r<   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_3_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr   r*   r*   r*   r+   r   _  s    r   c                       s   e Zd Zdef fddZdd Zdd Zee									dd	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ  ZS )CohereModelr?   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r*   )r   ).0r   r?   r*   r+   
<listcomp>  s    z(CohereModel.__init__.<locals>.<listcomp>r   r   F)r   r   pad_token_idr   
vocab_sizer   r   r%   embed_tokens
ModuleListrangenum_hidden_layerslayersr   r   normr>   
rotary_embgradient_checkpointing	post_initrs   r(   r   r+   r     s   zCohereModel.__init__c                 C      | j S rg   r   r$   r*   r*   r+   get_input_embeddings     z CohereModel.get_input_embeddingsc                 C   
   || _ d S rg   r   r$   r   r*   r*   r+   set_input_embeddings     
z CohereModel.set_input_embeddingsN	input_idsr   rb   r   inputs_embedsr   r   output_hidden_statesr   flash_attn_kwargsru   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}t	|t
d tfsFtd|d u rO| |}|rX|d u rXt }|	d u rt|d urd| nd}tj|||jd  |jd}	|d u r}|	d}t| j |||	||d}|}| ||}|rd	nd }|rd	nd }| jd | j j D ]&}|r||f7 }||f||||||	|d
|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r   )rO   )r?   input_embedsr   r   r   rb   r*   )r   rb   r   r   r   r   r   )last_hidden_stater   r5   
attentions)r?   r   r  r   
ValueErrorr   r   loggerwarning_oncerZ   rB   r   r   r	   get_seq_lengthr    arangerY   rO   r   r   r   r   r   r   r   )r$   r   r   rb   r   r  r   r   r  r   r  past_seen_tokensr   r5   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputsr*   r*   r+   r8     s   

	
	


zCohereModel.forward)	NNNNNNNNN)r:   r;   r<   r   r   r   r   r   r   r   r    r   r   r   r   r   r   r   r   r8   r=   r*   r*   r(   r+   r   }  sL    	
r   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r:   r;   r<   r*   r*   r*   r+   r    s    r  c                       s  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej deeeeej f  deej deej dee dee dee deej d eeejf d!ee d"efd#d$Z  ZS )&CohereForCausalLMzlm_head.weightlm_headcolwise_repr5   logitsc                    sP   t  | t|| _|j| _tj|j|jdd| _|j	| _	|j
| _
|   d S rj   )r   r   r   r   r   r   rm   r%   r  logit_scaletie_word_embeddingsr   rs   r(   r*   r+   r     s   
zCohereForCausalLM.__init__c                 C   s   | j jS rg   r   r   r   r*   r*   r+   r     s   z&CohereForCausalLM.get_input_embeddingsc                 C   s   || j _d S rg   r  r   r*   r*   r+   r     s   z&CohereForCausalLM.set_input_embeddingsc                 C   r   rg   r  r   r*   r*   r+   get_output_embeddings  r   z'CohereForCausalLM.get_output_embeddingsc                 C   r   rg   r  )r$   new_embeddingsr*   r*   r+   set_output_embeddings  r   z'CohereForCausalLM.set_output_embeddingsc                 C   r   rg   r   )r$   decoderr*   r*   r+   set_decoder  r   zCohereForCausalLM.set_decoderc                 C   r   rg   r  r   r*   r*   r+   get_decoder  r   zCohereForCausalLM.get_decoderNr   r   r   rb   r   r  labelsr   r   r  r   logits_to_keepr   ru   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }|| j	 }d}|dur]| j
d||| j jd|}t|||j|j|jdS )az  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >> from transformers import AutoTokenizer, CohereForCausalLM

        >> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01")
        >> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")

        >> prompt = "Hey, are you conscious? Can you talk to me?"
        >> inputs = tokenizer(prompt, return_tensors="pt")

        >> # Generate
        >> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   r   rb   r   r  r   r   r  r   )r  r!  r   )lossr  r   r5   r  r*   )r?   r   r  r   r  rZ   r   slicer  r  loss_functionr   r   r   r5   r  )r$   r   r   rb   r   r  r!  r   r   r  r   r"  r   r   r5   slice_indicesr  r#  r*   r*   r+   r8   !  s<   '

zCohereForCausalLM.forward)NNNNNNNNNNr   )r:   r;   r<   _tied_weights_keys_tp_plan_pp_planr   r   r   r  r  r  r   r   r   r   r    r   r   r   r   listr   r   r   r   r  r   r8   r=   r*   r*   r(   r+   r    sf    	
r  )r  r   r   )r|   )Nr   ):typingr   r   r   r    r   activationsr   cache_utilsr   r	   
generationr
   masking_utilsr   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   configuration_coherer   
get_loggerr:   r  Moduler   r>   ri   r   r   r{   rW   r   r   r   r   r   r   r   r  r  __all__r*   r*   r*   r+   <module>   s`   
"

WE}o