o
    eis                     @   s  d dl mZ d dlmZ d dlZd dlm  mZ d dlmZ ddl	m
Z ddlmZ ddlmZmZ dd	lmZ dd
lmZmZmZmZ ddlmZmZ ddlmZ ddlmZ ddlm Z m!Z! ddl"m#Z#m$Z$ ddl%m&Z&m'Z' ddl(m)Z) ddl*m+Z+m,Z,m-Z-m.Z. ddl/m0Z0m1Z1 ddl2m3Z3 ddl4m5Z5 edG dd dej6Z7G dd dej6Z8dd Z9eddDdd Z:d!ej;d"e<d#ej;fd$d%Z=	&dEd'ej6d(ej;d)ej;d*ej;d+ej;dB d,e>d-e>d.e)e+ fd/d0Z?ee:G d1d2 d2ej6Z@G d3d4 d4ej6ZAG d5d6 d6ej6ZBeG d7d8 d8ej6ZCG d9d: d:ej6ZDG d;d< d<eZEe,G d=d> d>e'ZFe,G d?d@ d@eFZGe,G dAdB dBeFeZHg dCZIdS )F    )Callable)OptionalN)nn   )initialization)ACT2FN)CacheDynamicCache)GenerationMixin)use_experts_implementationuse_kernel_forward_from_hubuse_kernel_func_from_hubuse_kernelized_func)create_causal_mask!create_sliding_window_causal_mask)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tupleis_grouped_mm_available)maybe_autocastmerge_with_config_defaults)capture_outputs   )Dots1ConfigRMSNormc                       sF   e Zd Zddeddf fddZdejdejfdd	Zd
d Z  Z	S )Dots1RMSNormư>epsreturnNc                    s&   t    tt|| _|| _dS )z;
        Dots1RMSNorm is equivalent to T5LayerNorm
        N)super__init__r   	Parametertorchonesweightvariance_epsilon)selfhidden_sizer&   	__class__ f/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/dots1/modeling_dots1.pyr)   4   s   

zDots1RMSNorm.__init__hidden_statesc                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )N   T)keepdim)	dtypetor+   float32powmeanrsqrtr.   r-   )r/   r5   input_dtypevariancer3   r3   r4   forward<   s
   zDots1RMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tupler-   shaper.   )r/   r3   r3   r4   
extra_reprC   s   zDots1RMSNorm.extra_repr)r%   )
__name__
__module____qualname__floatr)   r+   TensorrA   rD   __classcell__r3   r3   r1   r4   r$   2   s    r$   c                       s~   e Zd ZU ejed< ddef fddZe			ddedB de	d de
dB d	ed
ef fddZe edd Z  ZS )Dots1RotaryEmbeddinginv_freqNconfigc                    s   t    |j| _|j| _|| _| jjd | _| j}| jdkr$t	| j }|| j|\}| _
| jd|dd | jd| dd d S )N	rope_typedefaultrL   F)
persistentoriginal_inv_freq)r(   r)   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenrM   rope_parametersrN   compute_default_rope_parametersr   attention_scalingregister_bufferclone)r/   rM   devicerope_init_fnrL   r1   r3   r4   r)   J   s   


zDots1RotaryEmbedding.__init__rZ   ztorch.deviceseq_lenr'   ztorch.Tensorc                 C   sZ   | j d }t| ddp| j| j }d}d|tjd|dtjdj|tjd|   }||fS )	a  
        Computes the inverse frequencies according to the original RoPE implementation
        Args:
            config ([`~transformers.PreTrainedConfig`]):
                The model configuration.
            device (`torch.device`):
                The device to use for initialization of the inverse frequencies.
            seq_len (`int`, *optional*):
                The current sequence length. Unused for this type of RoPE.
        Returns:
            Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
            post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
        
rope_thetahead_dimNg      ?r   r6   r9   )rZ   r9   )	rU   getattrr0   num_attention_headsr+   arangeint64r:   rH   )rM   rZ   r\   basedimattention_factorrL   r3   r3   r4   rV   Z   s   
&z4Dots1RotaryEmbedding.compute_default_rope_parametersc           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	|dd+ | |  
dd}tj||fdd	}| | j }| | j }	W d    n1 slw   Y  |j|jd
|	j|jd
fS )Nr   r7   r!   mpscpuF)device_typeenabledr6   re   r_   )rL   rH   expandrC   r:   rZ   
isinstancetypestrr   	transposer+   catcosrW   sinr9   )
r/   xposition_idsinv_freq_expandedposition_ids_expandedri   freqsembrr   rs   r3   r3   r4   rA   x   s   0&zDots1RotaryEmbedding.forwardN)NNN)rE   rF   rG   r+   rI   __annotations__r"   r)   staticmethodr   intrB   rH   rV   no_gradr   rA   rJ   r3   r3   r1   r4   rK   G   s&   
 

rK   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..Nr7   r6   rk   )rC   r+   rq   )rt   x1x2r3   r3   r4   rotate_half   s   r   rotary_pos_embc                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer   )qkrr   rs   unsqueeze_dimq_embedk_embedr3   r3   r4   apply_rotary_pos_emb   s
   

r   r5   n_repr'   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r!   N)rC   rl   reshape)r5   r   batchnum_key_value_headsslenr^   r3   r3   r4   	repeat_kv   s
   0r           modulequerykeyvalueattention_maskscalingdropoutkwargsc                 K   s   t || j}t || j}	t||dd| }
|d ur |
| }
tjj|
dtjd	|j
}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr6   r   r7   )re   r9   )ptrainingr!   )r   num_key_value_groupsr+   matmulrp   r   
functionalsoftmaxr;   r:   r9   r   r   
contiguous)r   r   r   r   r   r   r   r   
key_statesvalue_statesattn_weightsattn_outputr3   r3   r4   eager_attention_forward   s   
r   c                       s   e Zd ZdZdedef fddZ		ddejde	ejejf d	ejdB d
e
dB dejdB dee de	ejejdB f fddZ  ZS )Dots1Attentionz=Multi-headed attention from 'Attention Is All You Need' paperrM   	layer_idxc                    s*  t    t|dr|j| nd | _|| _|| _t|d|j|j	 | _
|j	|j | _| j
d | _|j| _d| _tj|j|j	| j
 |jd| _tj|j|j| j
 |jd| _tj|j|j| j
 |jd| _tj|j	| j
 |j|jd| _t| j
|jd| _t| j
|jd| _| jdkr|j| _d S d | _d S )Nlayer_typesr^   g      Tbiasr&   sliding_attention)r(   r)   hasattrr   
layer_typerM   r   r`   r0   ra   r^   r   r   r   attention_dropout	is_causalr   Linearattention_biasq_projk_projv_projo_projr$   rms_norm_epsq_normk_normsliding_windowr/   rM   r   r1   r3   r4   r)      s0   
 zDots1Attention.__init__Nr5   position_embeddingsr   past_key_valuescache_positionr   r'   c                 K   s(  |j d d }g |d| jR }| | ||dd}	| | ||dd}
| ||dd}|\}}t	|	|
||\}	}
|d ur]|||d}|
|
|| j|\}
}t| jjt}|| |	|
||f| jsqdn| j| j| jd|\}}|jg |dR   }| |}||fS )Nr7   r!   r6   )rs   rr   r   r   )r   r   r   )rC   r^   r   r   viewrp   r   r   r   r   updater   r   get_interfacerM   _attn_implementationr   r   r   r   r   r   r   r   )r/   r5   r   r   r   r   r   input_shapehidden_shapequery_statesr   r   rr   rs   cache_kwargsattention_interfacer   r   r3   r3   r4   rA      s:   		

zDots1Attention.forward)NN)rE   rF   rG   __doc__r"   r}   r)   r+   rI   rB   r   
LongTensorr   r   rA   rJ   r3   r3   r1   r4   r      s(     r   c                       s&   e Zd Zd fdd	Zdd Z  ZS )Dots1MLPNc                    s~   t    || _|j| _|d u r|jn|| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFr   )r(   r)   rM   r0   intermediate_sizer   r   	gate_projup_proj	down_projr   
hidden_actact_fn)r/   rM   r   r1   r3   r4   r)     s   
zDots1MLP.__init__c                 C   s$   |  | | || | }|S rz   )r   r   r   r   )r/   rt   r   r3   r3   r4   rA   %  s    zDots1MLP.forwardrz   rE   rF   rG   r)   rA   rJ   r3   r3   r1   r4   r     s    
r   c                       s$   e Zd Z fddZdd Z  ZS )Dots1TopkRouterc                    sJ   t    || _|j| _tt| j|jf| _	| 
dt| j d S )Ne_score_correction_bias)r(   r)   rM   n_routed_expertsr   r*   r+   emptyr0   r-   rX   zerosr/   rM   r1   r3   r4   r)   +  s
   
zDots1TopkRouter.__init__c                 C   s2   | d| jj}t|tj| jtj}|S Nr7   )	r   rM   r0   Flinearrn   r+   r;   r-   )r/   r5   router_logitsr3   r3   r4   rA   3  s   zDots1TopkRouter.forwardr   r3   r3   r1   r4   r   *  s    r   c                       sB   e Zd ZdZ fddZdejdejdejdejfdd	Z  ZS )
Dots1NaiveMoez2Collection of expert weights stored as 3D tensors.c                    sn   t    |j| _|j| _|j| _t	t
| jd| j | j| _t	t
| j| j| j| _t|j | _d S )Nr6   )r(   r)   num_local_expertsnum_expertsr0   
hidden_dimmoe_intermediate_sizeintermediate_dimr   r*   r+   r   gate_up_projr   r   r   r   r   r1   r3   r4   r)   =  s   
 zDots1NaiveMoe.__init__r5   top_k_indextop_k_weightsr'   c                 C   s  t |}t  % t jjj|| jd}|ddd}t |j	ddd
 }W d    n1 s1w   Y  |D ]O}|d }|| jkrDq8t || \}}	||	 }
tj|
| j| jddd\}}| || }tj|| j| }|||	|d f  }|d|	||j q8|S )N)num_classesr6   r!   r   )r7   rk   r7   )r+   
zeros_liker~   r   r   one_hotr   permutegreatersumnonzerowherer   r   chunkr   r   
index_add_r:   r9   )r/   r5   r   r   final_hidden_statesexpert_mask
expert_hit
expert_idx	top_k_pos	token_idxcurrent_stategateupcurrent_hidden_statesr3   r3   r4   rA   F  s$   


"zDots1NaiveMoe.forward)	rE   rF   rG   r   r)   r+   rI   rA   rJ   r3   r3   r1   r4   r   9  s    	r   c                       s0   e Zd ZdZ fddZdd Zdd Z  ZS )Dots1MoEz:
    A mixed expert module containing shared experts.
    c                    sn   t    || _t|| _t|| _t||j|j	 d| _
|j| _|j| _|j| _|j| _|j| _|j| _d S )N)rM   r   )r(   r)   rM   r   expertsr   r   r   r   n_shared_expertsshared_expertsr   n_group
topk_groupnorm_topk_probrouted_scaling_factornum_experts_per_toktop_kr   r1   r3   r4   r)   f  s   


zDots1MoE.__init__c                 C   s  |  }|| jj }|d| j| j| j jdddd jdd}tj|| j	dddd }t
|}|d|d |dd| j| j| j d| j}||  d}tj|| jdddd }|d|}	| jrx|	jdd	d
d }
|	|
 }	|	| j }	||	fS )Nr7   r6   rk   r   F)r   re   sortedr!   r   T)re   r8   g#B;)sigmoidr   r   r   r   r   topkr   r+   r   r   scatter_r   rl   r   masked_fillboolr  gatherr   r  )r/   r   router_logits_for_choicegroup_scores	group_idx
group_mask
score_maskscores_for_choicetopk_indicestopk_weightsdenominatorr3   r3   r4   route_tokens_to_expertsu  s2   


z Dots1MoE.route_tokens_to_expertsc                 C   sZ   |}|j }| |}| |\}}|d|j d }| |||j| }|| | }|S r   )rC   r   r  r   r   r   )r/   r5   	residuals
orig_shaper   r  r  r3   r3   r4   rA     s   
zDots1MoE.forward)rE   rF   rG   r   r)   r  rA   rJ   r3   r3   r1   r4   r   a  s
    r   c                       s   e Zd Zdedef fddZ						ddejdejdB d	ejdB d
e	dB de
dB dejdB deejejf dB dee dejfddZ  ZS )Dots1DecoderLayerrM   r   c                    st   t    |j| _t||d| _||jkrt|| _nt|| _t	|j|j
d| _t	|j|j
d| _|j| | _d S )N)rM   r   r   )r(   r)   r0   r   	self_attnfirst_k_dense_replacer   mlpr   r$   r   input_layernormpost_attention_layernormr   attention_typer   r1   r3   r4   r)     s   


zDots1DecoderLayer.__init__NFr5   r   ru   r   	use_cacher   r   r   r'   c              
   K   s^   |}	|  |}| jd|||||||d|\}}
|	| }|}	| |}| |}|	| }|S )N)r5   r   ru   r   r  r   r   r3   )r  r  r  r  )r/   r5   r   ru   r   r  r   r   r   residual_r3   r3   r4   rA     s&   




zDots1DecoderLayer.forward)NNNFNN)rE   rF   rG   r"   r}   r)   r+   rI   r   r   r	  rB   r   r   rA   rJ   r3   r3   r1   r4   r    s6    	
r  c                       sl   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZe ZdZeedZdgZdZe  fd	d
Z  ZS )Dots1PreTrainedModelrM   modelTr  r   )r5   
attentionsr   Nc                    sz   t  | t|trtj|jd| jjd t	|j
 d S t|tr;tj|jd| jjd tj|jd| jjd d S d S )Nr   )r=   std)r(   _init_weightsrm   r   initnormal_r-   rM   initializer_rangezeros_r   r   r   r   )r/   r   r1   r3   r4   r%    s   

z"Dots1PreTrainedModel._init_weights)rE   rF   rG   r"   r{   base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_supports_flex_attnr   _can_compile_fullgraph_supports_attention_backendr  r   _can_record_outputs_keep_in_fp32_modules_strict"_keys_to_ignore_on_load_unexpectedr+   r~   r%  rJ   r3   r3   r1   r4   r!    s&   
 r!  c                       s   e Zd Zdef fddZeee							ddej	dB dej
dB dej	dB dedB d	ejdB d
edB dej	dB dee defddZ  ZS )
Dots1ModelrM   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _d| jjv | _|   d S )Nc                    s   g | ]}t  |qS r3   )r  ).0r   rM   r3   r4   
<listcomp>  s    z'Dots1Model.__init__.<locals>.<listcomp>r   r8  Fr   )r(   r)   pad_token_idpadding_idx
vocab_sizer   	Embeddingr0   embed_tokens
ModuleListrangenum_hidden_layerslayersr$   r   normrK   
rotary_embgradient_checkpointingrM   r   has_sliding_layers	post_initr   r1   r8  r4   r)     s   zDots1Model.__init__N	input_idsr   ru   r   inputs_embedsr  r   r   r'   c              
   K   sF  |d u |d uA rt d|d u r| |}|r!|d u r!t| jd}|d u r=|d ur-| nd}	tj|	|	|jd  |jd}|d u rF|	d}t
| }
tsl| j|||||d}dtdi |i}
| jrltdi ||
d< |}| ||}| jd | jj D ]}||f|
|j |||||d	|}q}| |}t||r|d
S d d
S )Nz:You must specify exactly one of input_ids or inputs_embedsr8  r   r!   )rZ   )rM   rI  r   r   r   ru   full_attentionr   )r   r   ru   r   r  r   )last_hidden_stater   r3   )
ValueErrorr>  r	   rM   get_seq_lengthr+   rb   rC   rZ   r   rm   dictr   rF  r   rD  rB  rA  r  rC  r   )r/   rH  r   ru   r   rI  r  r   r   past_seen_tokenscausal_mask_mappingmask_kwargsr5   r   decoder_layerr3   r3   r4   rA     s^   



zDots1Model.forward)NNNNNNN)rE   rF   rG   r"   r)   r   r    r   r+   r   rI   r   FloatTensorr	  r   r   r   rA   rJ   r3   r3   r1   r4   r6    s>    	
r6  c                       s   e Zd ZddiZddiZddgdgfiZ fddZee																	
dde	j
d	B de	jd	B de	j
d	B ded	B de	jd	B de	j
d	B ded	B de	j
d	B dee	jB dee defddZ  ZS )Dots1ForCausalLMzlm_head.weightzmodel.embed_tokens.weightlm_headcolwise_gather_outputr5   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S r   )
r(   r)   r6  r"  r<  r   r   r0   rU  rG  r   r1   r3   r4   r)   M  s
   
zDots1ForCausalLM.__init__Nr   rH  r   ru   r   rI  labelsr  r   logits_to_keepr   r'   c
              
   K   s   | j d|||||||d|
}|j}t|	trt|	 dn|	}| |dd|ddf }d}|durB| jd||| jjd|
}t	|||j
|j|jdS )a~  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, Dots1ForCausalLM

        >>> model = Dots1ForCausalLM.from_pretrained("rednote-hilab/dots1.llm1.inst")
        >>> tokenizer = AutoTokenizer.from_pretrained("rednote-hilab/dots1.llm1.inst")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```)rH  r   ru   r   rI  r  r   N)rW  rX  r<  )lossrW  r   r5   r#  r3   )r"  rK  rm   r}   slicerU  loss_functionrM   r<  r   r   r5   r#  )r/   rH  r   ru   r   rI  rX  r  r   rY  r   outputsr5   slice_indicesrW  rZ  r3   r3   r4   rA   V  s0   %zDots1ForCausalLM.forward)	NNNNNNNNr   )rE   rF   rG   _tied_weights_keys_tp_plan_pp_planr)   r   r   r+   r   rI   r   rS  r	  r}   r   r   r   rA   rJ   r3   r3   r1   r4   rT  G  sN    		
rT  )r!  r6  rT  )r!   )r   )Jcollections.abcr   typingr   r+   torch.nn.functionalr   r   r    r   r&  activationsr   cache_utilsr   r	   
generationr
   integrationsr   r   r   r   masking_utilsr   r   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   utils.genericr   r   utils.output_capturingr    configuration_dots1r"   Moduler$   rK   r   r   rI   r}   r   rH   r   r   r   r   r   r   r  r!  r6  rT  __all__r3   r3   r3   r4   <module>   sz   A
K'82[P