o
    iZ                     @   sP  d dl mZmZmZ d dlZd dlmZ ddlmZ ddlm	Z	m
Z
 ddlmZ ddlmZ dd	lmZmZ dd
lmZ ddlmZmZmZmZ ddlmZmZ ddlmZmZ ddlm Z m!Z! ddl"m#Z# ddl$m%Z%m&Z&m'Z' ddl(m)Z) ddl*m+Z+ ddl,m-Z- edG dd dej.Z/G dd dej.Z0dd Z1dAddZ2dej3de4d ej3fd!d"Z5	#dBd$ej.d%ej3d&ej3d'ej3d(eej3 d)e6d*e6d+e#e% fd,d-Z7G d.d/ d/ej.Z8G d0d1 d1eZ9e&G d2d3 d3e!Z:G d4d5 d5ej.Z;e&G d6d7 d7e:Z<e&G d8d9 d9e:eZ=G d:d; d;ee:Z>G d<d= d=ee:Z?G d>d? d?ee:Z@g d@ZAdS )C    )CallableOptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)use_kernel_forward_from_hub)create_causal_mask!create_sliding_window_causal_mask)FlashAttentionKwargs)GenericForQuestionAnswering GenericForSequenceClassificationGenericForTokenClassificationGradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuple)deprecate_kwarg)check_model_inputs   )Qwen3ConfigRMSNormc                       sF   e Zd Zddeddf fddZdejdejfdd	Zd
d Z  Z	S )Qwen3RMSNormư>epsreturnNc                    s&   t    tt|| _|| _dS )z;
        Qwen3RMSNorm is equivalent to T5LayerNorm
        N)super__init__r   	Parametertorchonesweightvariance_epsilon)selfhidden_sizer$   	__class__ e/home/ubuntu/veenaModal/venv/lib/python3.10/site-packages/transformers/models/qwen3/modeling_qwen3.pyr'   3   s   

zQwen3RMSNorm.__init__hidden_statesc                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )N   T)keepdim)	dtypetor)   float32powmeanrsqrtr,   r+   )r-   r3   input_dtypevariancer1   r1   r2   forward;   s
   zQwen3RMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tupler+   shaper,   )r-   r1   r1   r2   
extra_reprB   s   zQwen3RMSNorm.extra_repr)r#   )
__name__
__module____qualname__floatr'   r)   Tensorr?   rB   __classcell__r1   r1   r/   r2   r"   1   s    r"   c                       s$   e Zd Z fddZdd Z  ZS )Qwen3MLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFbias)r&   r'   configr.   intermediate_sizer   Linear	gate_projup_proj	down_projr   
hidden_actact_fnr-   rM   r/   r1   r2   r'   G   s   
zQwen3MLP.__init__c                 C   s$   |  | | || | }|S N)rR   rT   rP   rQ   )r-   xrR   r1   r1   r2   r?   Q   s    zQwen3MLP.forward)rC   rD   rE   r'   r?   rH   r1   r1   r/   r2   rI   F   s    
rI   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..Nr5   r4   dim)rA   r)   cat)rW   x1x2r1   r1   r2   rotate_halfV   s   r]   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer]   )qkcossinposition_idsunsqueeze_dimq_embedk_embedr1   r1   r2   apply_rotary_pos_emb]   s
   

rg   r3   n_repr%   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)rA   expandreshape)r3   rh   batchnum_key_value_headsslenhead_dimr1   r1   r2   	repeat_kvx   s
   0ro           modulequerykeyvalueattention_maskscalingdropoutkwargsc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr4   r   r5   )rY   r7   )ptrainingr   )ro   num_key_value_groupsr)   matmul	transposerA   r   
functionalsoftmaxr9   r8   r7   rw   r{   
contiguous)rq   rr   rs   rt   ru   rv   rw   rx   
key_statesvalue_statesattn_weightscausal_maskattn_outputr1   r1   r2   eager_attention_forward   s   
&r   c                       s   e Zd ZdZdedef fddZedddd		
	
ddej	de
ej	ej	f deej	 dee deej dee de
ej	eej	 f fddZ  ZS )Qwen3Attentionz=Multi-headed attention from 'Attention Is All You Need' paperrM   	layer_idxc                    s  t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _t| j|jd| _t| j|jd| _|j| dkr|j| _d S d | _d S )Nrn   g      TrK   r$   sliding_attention)r&   r'   rM   r   getattrr.   num_attention_headsrn   rl   r|   rv   attention_dropout	is_causalr   rO   attention_biasq_projk_projv_projo_projr"   rms_norm_epsq_normk_normlayer_typessliding_windowr-   rM   r   r/   r1   r2   r'      s.   
$zQwen3Attention.__init__past_key_valuepast_key_values4.58new_nameversionNr3   position_embeddingsru   cache_positionrx   r%   c                 K   s4  |j d d }g |d| jR }| | ||dd}	| | ||dd}
| ||dd}|\}}t	|	|
||\}	}
|d ur]|||d}|
|
|| j|\}
}t}| jjdkrkt| jj }|| |	|
||f| jswdn| j| j| jd|\}}|jg |dR   }| |}||fS )Nr5   r   r4   )rb   ra   r   eagerrp   )rw   rv   r   )rA   rn   r   r   viewr~   r   r   r   rg   updater   r   rM   _attn_implementationr   r{   r   rv   r   rj   r   r   )r-   r3   r   ru   r   r   rx   input_shapehidden_shapequery_statesr   r   ra   rb   cache_kwargsattention_interfacer   r   r1   r1   r2   r?      s:   
	

zQwen3Attention.forward)NN)rC   rD   rE   __doc__r    intr'   r   r)   rG   r@   r   r   
LongTensorr   r   r?   rH   r1   r1   r/   r2   r      s*    r   c                       s   e Zd Zdedef fddZedddd							
				ddejde	ej de	ej
 de	e de	e de	ej
 de	eejejf  dee dejfddZ  ZS )Qwen3DecoderLayerrM   r   c                    s^   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
|j| | _d S )N)rM   r   r   )r&   r'   r.   r   	self_attnrI   mlpr"   r   input_layernormpost_attention_layernormr   attention_typer   r/   r1   r2   r'      s   

zQwen3DecoderLayer.__init__r   r   r   r   NFr3   ru   rc   	use_cacher   r   rx   r%   c              
   K   s^   |}	|  |}| jd|||||||d|\}}
|	| }|}	| |}| |}|	| }|S )N)r3   ru   rc   r   r   r   r   r1   )r   r   r   r   )r-   r3   ru   rc   r   r   r   r   rx   residual_r1   r1   r2   r?      s&   




zQwen3DecoderLayer.forward)NNNFNN)rC   rD   rE   r    r   r'   r   r)   rG   r   r   r   boolr@   r   r   r?   rH   r1   r1   r/   r2   r      s8    	
r   c                   @   sH   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZdZeedZdS )Qwen3PreTrainedModelrM   modelTr   r   )r3   
attentionsN)rC   rD   rE   r    __annotations__base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_supports_flex_attn_can_compile_fullgraph_supports_attention_backendr   r   _can_record_outputsr1   r1   r1   r2   r     s   
 
r   c                       sD   e Zd ZU ejed< ddef fddZe e	dd Z
  ZS )	Qwen3RotaryEmbeddinginv_freqNrM   c                    s   t    t|drt|jtr|jd|jd| _nd| _|j| _	|j| _
|| _t| j | _| | j|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultr   F)
persistent)r&   r'   hasattr
isinstancer   dictgetr   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenrM   r   rope_init_fnattention_scalingregister_bufferr   original_inv_freq)r-   rM   devicer   r/   r1   r2   r'   .  s   
zQwen3RotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r5   r   mpscpuF)device_typeenabledr4   rX   )r7   )r   rF   ri   rA   r8   r   r   r   strr)   autocastr~   rZ   ra   r   rb   r7   )
r-   rW   rc   inv_freq_expandedposition_ids_expandedr   freqsembra   rb   r1   r1   r2   r?   ?  s   0&zQwen3RotaryEmbedding.forwardrV   )rC   rD   rE   r)   rG   r   r    r'   no_gradr   r?   rH   r1   r1   r/   r2   r   +  s   
 
r   c                       s   e Zd Zdef fddZee							ddeej	 deej
 deej	 dee d	eej d
ee deej	 dee defddZ  ZS )
Qwen3ModelrM   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _d| jjv | _|   d S )Nc                    s   g | ]}t  |qS r1   )r   ).0r   rM   r1   r2   
<listcomp>X  s    z'Qwen3Model.__init__.<locals>.<listcomp>r   r   Fr   )r&   r'   pad_token_idpadding_idx
vocab_sizer   	Embeddingr.   embed_tokens
ModuleListrangenum_hidden_layerslayersr"   r   normr   
rotary_embgradient_checkpointingrM   r   has_sliding_layers	post_initrU   r/   r   r2   r'   Q  s   zQwen3Model.__init__N	input_idsru   rc   r   inputs_embedsr   r   rx   r%   c              
   K   sF  |d u |d uA rt d|d u r| |}|r!|d u r!t| jd}|d u r=|d ur-| nd}	tj|	|	|jd  |jd}|d u rF|	d}t
| }
tsl| j|||||d}dtdi |i}
| jrltdi ||
d< |}| ||}| jd | jj D ]}||f|
|j |||||d	|}q}| |}t||r|d
S d d
S )Nz:You must specify exactly one of input_ids or inputs_embedsr   r   r   )r   )rM   input_embedsru   r   r   rc   full_attentionr   )ru   rc   r   r   r   r   )last_hidden_stater   r1   )
ValueErrorr   r	   rM   get_seq_lengthr)   arangerA   r   r^   r   r   r   r   r   r   r   r   r   r   r   )r-   r   ru   rc   r   r   r   r   rx   past_seen_tokenscausal_mask_mappingmask_kwargsr3   r   decoder_layerr1   r1   r2   r?   b  s^   



zQwen3Model.forward)NNNNNNN)rC   rD   rE   r    r'   r   r   r   r)   r   rG   r   FloatTensorr   r   r   r   r?   rH   r1   r1   r/   r2   r   O  s<    	
r   c                       s   e Zd ZdgZddiZddgdgfiZ fddZee										dd
e	e
j de	e
j de	e
j de	e de	e
j de	e
j de	e de	e
j deee
jf dee defddZ  ZS )Qwen3ForCausalLMzlm_head.weightlm_headcolwise_repr3   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S rJ   )
r&   r'   r   r   r   r   rO   r.   r  r   rU   r/   r1   r2   r'     s
   
zQwen3ForCausalLM.__init__Nr   r   ru   rc   r   r   labelsr   r   logits_to_keeprx   r%   c
              
   K   s   | j d|||||||d|
}|j}t|	trt|	 dn|	}| |dd|ddf }d}|durB| jd||| jjd|
}t	|||j
|j|jdS )a^  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, Qwen3ForCausalLM

        >>> model = Qwen3ForCausalLM.from_pretrained("Qwen/Qwen3-8B")
        >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```)r   ru   rc   r   r   r   r   N)r  r  r   )lossr  r   r3   r   r1   )r   r   r   r   slicer  loss_functionrM   r   r   r   r3   r   )r-   r   ru   rc   r   r   r  r   r   r	  rx   outputsr3   slice_indicesr  r
  r1   r1   r2   r?     s0   %zQwen3ForCausalLM.forward)	NNNNNNNNr   )rC   rD   rE   _tied_weights_keys_tp_plan_pp_planr'   r   r   r   r)   r   rG   r   r  r   r   r   r   r   r   r?   rH   r1   r1   r/   r2   r    sN    		
r  c                   @      e Zd ZdS )Qwen3ForSequenceClassificationNrC   rD   rE   r1   r1   r1   r2   r        r  c                   @   r  )Qwen3ForTokenClassificationNr  r1   r1   r1   r2   r    r  r  c                   @   s   e Zd ZdZdS )Qwen3ForQuestionAnsweringtransformerN)rC   rD   rE   r   r1   r1   r1   r2   r    s    r  )r  r  r   r   r  r  )Nr   )rp   )Btypingr   r   r   r)   r   activationsr   cache_utilsr   r	   
generationr
   integrationsr   masking_utilsr   r   modeling_flash_attention_utilsr   modeling_layersr   r   r   r   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   utils.deprecationr   utils.genericr   configuration_qwen3r    Moduler"   rI   r]   rg   rG   r   ro   rF   r   r   r   r   r   r   r  r  r  r  __all__r1   r1   r1   r2   <module>   sn   

K/$\P