o
    	۷i                     @   s  d Z ddlZddlmZmZmZmZ ddlZddlmZ ddl	m
Z
mZmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZmZmZ ddlmZmZ ddlmZ ddl m!Z!m"Z" ddl#m$Z$ ddl%m&Z&m'Z' ddl(m)Z) e' rddl*m+Z+m,Z, ddl-m.Z. nd\Z.Z,Z+e& rddl/m0Z0m1Z1 nd\Z1Z0e2e.e,e0e1e+fZ3e"4e5Z6G dd dej7Z8dej9de:dej9fddZ;G d d! d!Z<	"dCd#ej7d$ej9d%ej9d&ej9d'eej9 d(e=d)e=fd*d+Z>G d,d- d-ej7Z?G d.d/ d/ej7Z@G d0d1 d1ej7ZAG d2d3 d3ej7ZBG d4d5 d5ej7ZCG d6d7 d7ej7ZDe!G d8d9 d9eZEe!G d:d; d;eEZFG d<d= d=eEeZGe!d>d?G d@dA dAeEZHg dBZIdS )DzPyTorch Zamba model.    N)AnyCallableOptionalUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)Cache)GenerationMixin)AttentionMaskConverter)FlashAttentionKwargs)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPast)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)auto_docstringlogging)deprecate_kwarg)is_causal_conv1d_availableis_mamba_ssm_available   )ZambaConfig)mamba_inner_fnselective_scan_fn)selective_state_update)NNN)causal_conv1d_fncausal_conv1d_updateNNc                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	ZambaRMSNormư>c                    s&   t    tt|| _|| _dS )z;
        ZambaRMSNorm is equivalent to T5LayerNorm
        N)super__init__r   	Parametertorchonesweightvariance_epsilon)selfhidden_sizeeps	__class__ ^/home/ubuntu/vllm_env/lib/python3.10/site-packages/transformers/models/zamba/modeling_zamba.pyr&   @   s   

zZambaRMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )N   T)keepdim)	dtypetor(   float32powmeanrsqrtr+   r*   )r,   hidden_statesinput_dtypevariancer1   r1   r2   forwardH   s
   zZambaRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tupler*   shaper+   r,   r1   r1   r2   
extra_reprO   s   zZambaRMSNorm.extra_repr)r$   )__name__
__module____qualname__r&   r?   rC   __classcell__r1   r1   r/   r2   r#   ?   s    r#   r<   n_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)rA   expandreshape)r<   rH   batchnum_key_value_headsslenhead_dimr1   r1   r2   	repeat_kvT   s
   0rP   c                   @   s   e Zd ZdZdZejdfddZdd Zde	d	e
ejejf fd
dZ	ddejdejde	deeeef  d	e
ejejf f
ddZdejfddZddee	 d	e	fddZdS )ZambaHybridDynamicCachea  
    A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
    (which has a constant shape regardless of seq_len).

    This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
    and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
    For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
    while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
    For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
    while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
    and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
    FNc              
      s"  || _ d| _|j| _d| _|j|j | _|j| _|j	| _
|j| _g | _g | _g | _i | _i | _i | _t|jD ];}|  jtj | j| j
|dg7  _ | j| j| j | jf}|  jtj||dg7  _| j| dkrr| j| q7 fddt|jD | _ fddt|jD | _d S )NFdevicer6   hybridc                        g | ]}t jg g  d qS rS   r(   tensor.0_
batch_sizerS   r1   r2   
<listcomp>        z4ZambaHybridDynamicCache.__init__.<locals>.<listcomp>c                    rU   rV   rX   rZ   r]   r1   r2   r_      r`   )r6   is_compileablelayers_block_typehas_previous_statemamba_expandr-   intermediate_sizemamba_d_statessm_state_sizemamba_d_convconv_kernel_sizen_mamba_headsconv_states
ssm_statestransformer_layers_modules_parameters_buffersrangenum_hidden_layersr(   zerosappend	key_cachevalue_cache)r,   configr^   r6   rS   icache_shaper1   r]   r2   r&   p   s:   
 z ZambaHybridDynamicCache.__init__c                 C   s
   t | jS N)lenru   rB   r1   r1   r2   __len__   s   
zZambaHybridDynamicCache.__len__	layer_idxrI   c                 C   s   | j | | j| fS rz   )ru   rv   r,   r}   r1   r1   r2   __getitem__   s   z#ZambaHybridDynamicCache.__getitem__
key_statesvalue_statescache_kwargsc                 C   sz   | j | jd dkr|| j |< || j|< ntj| j | |gdd| j |< tj| j| |gdd| j|< | j | | j| fS )Nr4   r   r3   dim)ru   rA   rv   r(   cat)r,   r   r   r}   r   r1   r1   r2   update   s   
zZambaHybridDynamicCache.updatebeam_idxc                 C   s   t t| jD ]V}| j| j}| j| d||| j|< | j| j}| j| d||| j|< | j| j}| j| d||| j|< | j| j}| j| d||| j|< qdS )zDReorders the cache for beam search, given the selected beam indices.r   N)	rq   r{   ru   rS   index_selectr7   rv   rk   rl   )r,   r   r}   rS   r1   r1   r2   reorder_cache   s    z%ZambaHybridDynamicCache.reorder_cacher   c                 C   s:   || j vr
| j d n|}t| j|krdS | j| jd S )zYReturns the sequence length of the cached states. A layer index can be optionally passed.r   )rm   r{   ru   rA   r~   r1   r1   r2   get_seq_length   s   z&ZambaHybridDynamicCache.get_seq_lengthrz   )r   )rD   rE   rF   __doc__ra   r(   float16r&   r|   intr@   Tensorr   r   dictstrr   r   
LongTensorr   r   r1   r1   r1   r2   rQ   `   s(     	
rQ           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr3   r
   r   r4   )r   r6   )ptrainingr   )rP   num_key_value_groupsr(   matmul	transposerA   r   
functionalsoftmaxr8   r7   r6   r   r   
contiguous)r   r   r   r   r   r   r   kwargsr   r   attn_weightscausal_maskattn_outputr1   r1   r2   eager_attention_forward   s   
&r   c                       s   e Zd ZdZdedef fddZedddd		
ddej	dede
ej	 de
e dee deej	e
ej	 e
eej	  f fddZ  ZS )ZambaAttentionaA  
    Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
    and "Generating Long Sequences with Sparse Transformers".

    Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
    The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
    The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
    (see fig. 2 in https://huggingface.co/papers/2405.16712).
    Additionally, replaced
    attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
    attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
    rw   r}   c                    s   t    || _|| _|j| _|j| _|j|j | _	|j
| _
| jd d | _d| _|j| _tj|j|j| j dd| _tj|j|j| j dd| _tj|j|j| j dd| _tj|j| j |jdd| _d S )Nr3         TFbias)r%   r&   rw   r}   attention_hidden_sizeattention_head_dimrO   num_attention_headsrM   r   max_position_embeddingsr   	is_causalattention_dropoutr   Linearq_projk_projv_projr-   o_projr,   rw   r}   r/   r1   r2   r&      s   
 zZambaAttention.__init__past_key_valuepast_key_values4.58new_nameversionNr<   r   r   rI   c                 K   s   |j d d }g |d| jR }| ||dd}| ||dd}	| ||dd}
|d urB||	|
|\}	}
t}| j	j
dkrPt| j	j
 }|| ||	|
|f| js\dn| j| jd|\}}|jg |dR   }| |}||fS )Nr4   r   r3   eagerr   )r   r   )rA   rO   r   viewr   r   r   r   r   rw   _attn_implementationr   r   r   r   rK   r   r   )r,   r<   r}   r   r   r   input_shapehidden_shapequery_statesr   r   attention_interfacer   r   r1   r1   r2   r?      s2   	

zZambaAttention.forwardrz   )rD   rE   rF   r   r   r   r&   r   r(   r   r   rQ   r   r   r@   r?   rG   r1   r1   r/   r2   r      s$    r   c                       s^   e Zd ZdZdef fddZ	ddejdefdd	Z	ddefd
dZ
ddefddZ  ZS )ZambaMambaMixeruE  
    Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
    A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
    ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
    and is why Mamba is called **selective** state spaces)

    This module differs from `transformers.models.mamba.modeling_mamba.MambaMixer` in two ways:
    - Added multi-head: the output of `self.in_proj` is split into `self.n_mamba_heads` heads, and each head
    undergoes an independent forward pass, identical to the original `MambaMixer`, up until the pre-activations of
    `self.out_proj`. The pre-activations, coming from different mamba heads, are then concatenated and fed into `self.out_proj`.
    rw   c                    s  t    || _|| _|j| _|j| _|j| _|j	|j | _
|j| _|j| _| j
| j | _|j| _|j| _tj| j
| j
| j| j| j
| jd d| _|j| _t|j | _|j| _tj| j| j
d | jd| _tt | j| j| jd  | j| _!tt | j| j| jd d | jd  | _"tt | j| j| _#tj$d| jd tj%dd d d f }|&| j
d' }tt(|)| j| jd| _*tt+| j| j| _,tj| j
| j| jd| _-t.st/0d d S d S )	Nr   )in_channelsout_channelsr   kernel_sizegroupspaddingr3   r   g      ?r6   r4   aq  The fast path is not available because one of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)` is None. To install follow https://github.com/state-spaces/mamba/#installation and https://github.com/Dao-AILab/causal-conv1d. If you want to use the naive implementation, set `use_mamba_kernels=False` in the model config)1r%   r&   rw   r}   r-   rf   rg   rh   ri   rd   re   mamba_dt_ranktime_step_rankrj   mamba_head_dimmamba_conv_biasuse_conv_biasmamba_proj_biasuse_biasr   Conv1dconv1dhidden_mamba_act
activationr   actuse_mamba_kernelsuse_fast_kernelsr   in_projr'   r(   rs   x_proj_weightdt_proj_weightdt_proj_biasaranger8   rJ   r   logrK   A_logr)   Dout_projis_fast_path_availableloggerwarning_once)r,   rw   r}   Ar/   r1   r2   r&   .  sb   
	$ zZambaMambaMixer.__init__Nr<   cache_paramsc                 C   s  |j \}}}|d uo|jo|dk}| |dd}||dd|jddd\}}	|d }|	d}	|	|| j	d|dd}	| j
j| j
jd| j
jd}
|rnt|d|j| j |
| j
j| j}|d}nK|d urt|dks||d }|d urtj|| j|j d  df}|j| j | t||
| j
j| jd}|d urt|dks||d }|d| j	| j|dd}| jd d d d d d d f | dd}tj|| j| j| jgdd\}}}| j d d d f |dd }t!| j"#  }| j$d ur| j$# nd }tj%|d|f|j&|j'd}|rtt(| j	D ]K}t)|j*| j d d |f ||d	df ||d	df || ||d d df ||d d df | j+| |	|d	df || d
d
d}tj,||fdd}q'nntj%|d| j| jf|j&|j'd}t(| j	D ]E}t-|| || || || dd|| dd| j+| # |	| || d
d
d
\}}tj,||fdd }tj,||dfdd}q|d ur|d ur|j*| j | | .|dd}|S )Nr   r3   r4   r   r   )r   r   rR   .T)dt_softplus)delta_softplusreturn_last_state)/rA   rc   r   r   r   chunksqueezer   rK   rj   r   r*   sizer!   rk   r}   r   r   	unsqueezer(   allr   r   padri   copy_r    r   r   splitr   rg   r   expr   floatr   emptyrS   r6   rq   r   rl   r   r   r   r   )r,   r<   r   r   r^   seq_lenr\   use_precomputed_statesprojected_statesgateconv_weightsrk   ssm_parameters	time_stepBCdiscrete_time_stepr   time_proj_biasscan_outputsnscan_outputs_	ssm_state
ssm_state_contextualized_statesr1   r1   r2   cuda_kernels_forwardk  s   
$
*
z$ZambaMambaMixer.cuda_kernels_forwardc              
   C   s  |j \}}}|j}| |dd}||dd|jddd\}	}
|	d }	|
d}
|
|| j	d|dd}
t
|t}|r+|j| j j d |kr+| jrZ|j| j  }n|j| j }||	j}|jr|dkr|j| j j d |kr|j| j }tj|ddd}|	d d d d df |d d d d df< ||j| j< tj|| jjd d dd d f  dd}	| jr|	| jj7 }	| |	|d}	n|d urt|dks|	|d d |	j d  d f d }	tj |	| j!|	j d  df}||j| j< | | |	dd |f }	|d ur*t|dks*|	|d d |	j d  d f d }	nFtj"|| j	| j#| j$f|	j|d}|d urOt|dksO|	|d }	| | |	dd |f }	|d urqt|dksq|	|d }	|	d| j	| j#|dd}	| j%d d d d d d d f |	 dd	}tj&|| j'| j$| j$gdd\}}}| j(d d d f |dd	 | j)d d d d d d f  }tj*|}t+| j,-  }t+|d d d d d d d d f |d d d d d d d d d f  }|d d d d d d d d d f |d d d d d d d d d f -  }||	d d d d d d d d d f -  }g }t.|D ]\}|d d d d d d |d d f dd| |d d d d d d |d d f dd }t/|dd||d d d d |d d f d}|0|d d d d d d df  q2tj1|dd}||	| j2d d d d d d f   }|| |
 }|r||j| j< | 3|dd|d|dd}|S )
Nr   r3   r4   r   r   )shiftsdims.rR   r   )4rA   r6   r   r   r   r   r   r   rK   rj   
isinstancerQ   rl   r}   r   cloner7   rS   rc   rk   r(   rollsumr   r*   r   r   r   r   r   r   r   r   ri   rs   r   rg   r   r   r   r   r   softplusr   r   r   rq   r   rt   stackr   r   )r,   input_statesr   r   r^   r   r\   r6   r   r<   r   	use_cacher  
conv_stater   r   r   r   r   r   
discrete_A
discrete_BdeltaB_ur  rx   scan_outputr  r1   r1   r2   slow_forward  s   

((&&* FH*X8&"zZambaMambaMixer.slow_forwardc                 C   s@   | j rtrd| jjjvrtd| j|||dS | j|||dS )NcudazFast Mamba kernels are not available. Make sure to they are installed and that the mamba module is on a CUDA device. lease run 'pip install causal-conv1d>=1.2.0' and 'pip install mamba-ssm', or set use_mamba_kernels=False in the model's config.)r   )r   r   r   rS   type
ValueErrorr  r  )r,   r<   r   r   r1   r1   r2   r?   )  s   zZambaMambaMixer.forwardr"   )rD   rE   rF   r   r   r&   r(   r   rQ   r  r  r?   rG   r1   r1   r/   r2   r   !  s    >
a]r   c                       s$   e Zd Z fddZdd Z  ZS )ZambaMLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFr   )r%   r&   rw   r-   re   r   r   	gate_projup_proj	down_projr   
hidden_actact_fnr,   rw   r/   r1   r2   r&   7  s   
zZambaMLP.__init__c                 C   s$   |  | | || | }|S rz   )r  r!  r  r  )r,   xr  r1   r1   r2   r?   A  s    zZambaMLP.forward)rD   rE   rF   r&   r?   rG   r1   r1   r/   r2   r  6  s    
r  c                       s   e Zd Zddedee f fddZedddd				
	
ddej	dej	dedeej	 dee
 dee dee dee deejeeejejf  f fddZ  ZS )ZambaAttentionDecoderLayerNrw   r}   c                    sH   t    t||| _t|| _t|j|jd| _	t|j
|jd| _d S )Nr.   )r%   r&   r   	self_attnr  feed_forwardr#   r   rms_norm_epsinput_layernormr-   pre_ff_layernormr   r/   r1   r2   r&   G  s
   

z#ZambaAttentionDecoderLayer.__init__r   r   r   r   Fr<   original_hidden_statesr   output_attentionsr  r   rI   c              	   K   sj   t j||gdd}| |}| jd||||||d|\}}	| |}| |}|f}
|r3|
|	f7 }
|
S )a  
        Args:
            hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)`
            original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`.
                This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The
                concatenated tensor is then used as input of the pre-attention RMSNorm
                (see fig. 2 in https://huggingface.co/papers/2405.16712).
            layer_idx (`int`): layer_idx in the forward pass. Used to distinguish Zamba's tied transformer layers.
            attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
                `(batch, sequence_length)` where padding elements are indicated by 0.
            past_key_values (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence.
        r4   r   )r<   r}   r   r   r,  r  Nr1   )r(   concatenater)  r&  r*  r'  )r,   r<   r+  r}   r   r   r,  r  r   self_attn_weightsoutputsr1   r1   r2   r?   O  s$    





z"ZambaAttentionDecoderLayer.forwardrz   )NNFF)rD   rE   rF   r   r   r   r&   r   r(   r   rQ   boolr   r   r@   FloatTensorr?   rG   r1   r1   r/   r2   r$  F  s4    	
r$  c                       s   e Zd Zdedef fddZedddd											
	
				ddejde	ej de	e de	ej de	ej de	e
 de	e de	e de	ej de	ej deeje	eejejf  f fddZ  ZS )ZambaMambaDecoderLayerrw   r}   c                    s4   t    t||d| _t|j|jd| _|| _d S )N)rw   r}   r%  )	r%   r&   r   mambar#   r-   r(  r)  r}   r   r/   r1   r2   r&     s   

zZambaMambaDecoderLayer.__init__r   r   r   r   NFr<   r+  r   r   r,  r  cache_positiontransformer_hidden_statesrI   c                 K   sd   |}|
dur
||
 n|}|  |}| j|||d}d}|| }|f}|r)||f7 }|r0||f7 }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
                `(batch, sequence_length)` where padding elements are indicated by 0.
            past_key_values (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence.
        N)r<   r   r   )r)  r3  )r,   r<   r+  r}   r   r   r   r,  r  r4  r5  r   residualr.  r/  r1   r1   r2   r?     s"   


zZambaMambaDecoderLayer.forward)	NNNNNFFNN)rD   rE   rF   r   r   r&   r   r(   r   r   rQ   r0  r   r@   r1  r?   rG   r1   r1   r/   r2   r2    sF    	
r2  c                       s   e Zd Zdedejdef fddZedddd		
	
	
	
	
			
dde	j
dee	j
 dee dee	j
 dee	j
 dee dee dee dee	j dee	jeee	je	jf  f fddZ  ZS )ZambaHybridLayershared_transflinearr3  c                    s    t    || _|| _|| _d S rz   )r%   r&   r8  r9  mamba_decoder)r,   r8  r9  r3  r/   r1   r2   r&     s   

zZambaHybridLayer.__init__r   r   r   r   NFr<   r+  r}   r   r   r,  r  r4  rI   c
              
   C   sp   | j ||||||||	d}
|
d }|r|
d }| |}| j|||||||	d}
|r6|
d |f|
dd  }
|
S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with
            hidden activations to form the input of the shared transformer layer.
            layer_idx (`int`): layer number.
            attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
                `(batch, sequence_length)` where padding elements are indicated by 0.
            past_key_values (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence.
        )r+  r}   r   r   r,  r  r4  r   r   )r5  r   r   r,  r  r4  r3   N)r8  r9  r:  )r,   r<   r+  r}   r   r   r   r,  r  r4  layer_outputsr5  r.  r1   r1   r2   r?     s4    

zZambaHybridLayer.forward)NNNNNFFN)rD   rE   rF   r$  r   r   r2  r&   r   r(   r   r   r   rQ   r0  r   r@   r1  r?   rG   r1   r1   r/   r2   r7    s@    	
r7  c                   @   s>   e Zd ZU eed< dZdZddgZdZdZ	dZ
dZdd	 Zd
S )ZambaPreTrainedModelrw   modelTr$  r2  r   Fc                 C   s  | j j}t|tjtjfr%|jjjd|d |j	d ur#|j	j
  d S d S t|tjrF|jjjd|d |jd urD|jj|j 
  d S d S t|trT|jjd d S t|tr|jjjd|d | j jd }tj|j| | | j j| j j | j j }tt| j j|t| j jt| j j  t| j j j| j j d}|tt!|   }|j"j#| tj$d|j%d tj&dd d d f }|'|j(d) }|j*j#t|+|j|j,d |j-jd d S d S )	Nr   )r:   stdg      ?r   )minr   r   r4   ).rw   initializer_ranger
  r   r   r   r*   datanormal_r   zero_	Embeddingpadding_idxr#   fill_r   r   r   inituniform_r   rd   r-   rj   r(   r   randmathr   time_step_maxtime_step_minclamptime_step_floorexpm1r   r   r   rg   r8   rJ   re   r   r   rK   r   r   )r,   r   r>  dt_init_stdr   dtinv_dtr   r1   r1   r2   _init_weights   sD   



$"z"ZambaPreTrainedModel._init_weightsN)rD   rE   rF   r   __annotations__base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_is_statefulrS  r1   r1   r1   r2   r<    s   
 r<  c                       s   e Zd ZdZdef fddZe										ddeej	 deej
 deej	 d	ee d
eej dee dee dee dee deej	 deeef fddZdd Z  ZS )
ZambaModelz
    Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`ZambaDecoderLayer`]

    Args:
        config: ZambaConfig
    rw   c           
         s  t  | |j| _|j| _t|j|j| j| _t	|}g }g }|j
| _
t|jD ]2}|j
| dkr=|t||d q*|j
| dkr\|tj| jj| jjdd |t||d q*t|}t|}g }g | _t| j
D ]6\}}|dkrd| d g d}	g | j fd	d
|	D | _|t|t|t| qo|t| qot|| _|j| _t|j|jd| _d| _|   d S )Nr3  )r}   rT   Fr   zlayers..)	z%shared_transf.self_attn.q_proj.weightz%shared_transf.self_attn.k_proj.weightz%shared_transf.self_attn.v_proj.weightz%shared_transf.self_attn.o_proj.weightz+shared_transf.feed_forward.gate_proj.weightz)shared_transf.feed_forward.up_proj.weightz+shared_transf.feed_forward.down_proj.weightz$shared_transf.input_layernorm.weightz%shared_transf.pre_ff_layernorm.weightc                    s   g | ]} | qS r1   r1   )r[   r   prefix_namer1   r2   r_   l  s    z'ZambaModel.__init__.<locals>.<listcomp>r%  )r%   r&   pad_token_idrE  
vocab_sizer   rD  r-   embed_tokensr$  rb   rq   rr   rt   r2  r   rw   iter_tied_weights_keys	enumerater7  next
ModuleListlayersr   r#   r(  final_layernormgradient_checkpointing	post_init)
r,   rw   blockmamba_layerslinear_layersrx   rh  layer_id
layer_type	tied_keysr/   r^  r2   r&   J  s>   zZambaModel.__init__N	input_idsr   position_idsr   inputs_embedsr  r,  output_hidden_statesreturn_dictr4  rI   c                 C   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|	d ur$|	n| j j}	|d u |d uA r4td| jrC| jrC|rCt	d d}|d u rL| 
|}|}t|}|r^|d u r^t	d |
d u rmtj|jd |jd}
|d u rv|
d}| |||
}|rdnd }|rdnd }t| jD ]A\}}|r||f7 }| jr| jr| |j|||||||||

}n||||||||||
d		}|d }|r|d d ur||d f7 }q| |}|r||f7 }|r|jsd
|_t||r|nd ||d}|	r|S | S )NzaYou cannot specify both input_ids and inputs_embeds at the same time, and must specify either onezX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.Fz{Zamba requires an initialized `ZambaHybridDynamicCache` to return a cache. None was provided, so no cache will be returned.r   rW   r   r1   )r+  r}   r   r   r   r,  r  r4  T)last_hidden_stater   r<   
attentions)rw   r,  ru  r  use_return_dictr  rj  r   r   r   rb  r(   r  r   rA   rS   r   _update_causal_maskre  rh  _gradient_checkpointing_func__call__ri  rc   r   to_tuple)r,   rr  r   rs  r   rt  r  r,  ru  rv  r4  r<   r+  r   all_hidden_statesall_self_attnsr}   layerr;  outputr1   r1   r2   r?   y  s   







zZambaModel.forwardc                 C   sv  | j jdkr|d urd|v r|S d S |j|j}}t|j}|jd }|d d }tj||f|||d}	|dkr@tj	|	dd}	|	tj
||d|ddk9 }	|	d d d d d d f |jd ddd}	|d ur|	 }	| d	kr|jd }
|	d
d |
f d|d d d d d d f d }|	d
d |
f |||	d
d |
f< | j jdkr|d ur|jjdv rt|	|}	|	S )Nflash_attention_2r   r   r4   )
fill_valuer6   rS   )diagonalrW   r   r3   .sdpa)r  xpunpu)rw   r   r6   rS   r(   finfor?  rA   fulltriur   rK   rJ   r  r   eqmasked_fillr  r   _unmask_unattended)r,   r   input_tensorr4  r6   rS   	min_dtypesequence_lengthtarget_lengthr   mask_lengthpadding_maskr1   r1   r2   rz    s0   
*
4$zZambaModel._update_causal_mask
NNNNNNNNNN)rD   rE   rF   r   r   r&   r   r   r(   r   r   rQ   r1  r0  r   r@   r   r?   rz  rG   r1   r1   r/   r2   r\  A  sL    /	

nr\  c                       s   e Zd Zdef fddZe												ddeej deej	 deej d	ee
 d
eej deej dee dee dee dee deej deeej	f deeef fddZ						dddZ  ZS )ZambaForCausalLMrw   c                    sP   t  | t|| _dg| jj| _|j| _tj|j|jdd| _	| 
  d S )Nzlm_head.weightFr   )r%   r&   r\  r=  rd  ra  r   r   r-   lm_headrk  r"  r/   r1   r2   r&     s   
zZambaForCausalLM.__init__Nr   rr  r   rs  r   rt  labelsr  r,  ru  rv  r4  logits_to_keeprI   c                 K   s   |dur|n| j j}|	dur|	n| j j}	|
dur|
n| j j}
| j||||||||	||
d
}|d }t|tr<t| dn|}| |dd|ddf }d}|dur^| j	||| j
fi |}|
st|f|dd  }|durr|f| S |S t|||j|j|jdS )ah  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, ZambaForCausalLM

        >>> model = ZambaForCausalLM.from_pretrained("Zyphra/Zamba-7B-v1")
        >>> tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba-7B-v1")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)
rr  r   rs  r   rt  r  r,  ru  r4  rv  r   r   losslogitsr   r<   rx  )rw   r,  ru  ry  r=  r
  r   slicer  loss_functionra  r   r   r<   rx  )r,   rr  r   rs  r   rt  r  r  r,  ru  rv  r4  r  r   r/  r<   slice_indicesr  r  r  r1   r1   r2   r?     s@   (zZambaForCausalLM.forwardTc              	   K   s<  |d u }	|	s5|d us|d |j d kr"|d d |j d  d f }n!|j d |j d kr4|d d |f }nt| j|j d | j| jd}|d url|d u rl| dd }||dkd |	sl|d d |j d  d f }|d urw|	rwd|i}
nd| i}
|
	||||| jj
|d | D ]\}}||
vr||
|< q|
S )Nr4   r   r   )r6   rS   rt  rr  )rs  r   r  r   r  r4  )rA   rQ   rw   r6   rS   longcumsummasked_fill_r   r   num_logits_to_keepitems)r,   rr  r   r   rt  r4  rs  r  r   empty_past_kvmodel_inputsr   r   r1   r1   r2   prepare_inputs_for_generationj  sB   
z.ZambaForCausalLM.prepare_inputs_for_generation)NNNNNNNNNNNr   )NNNNNT)rD   rE   rF   r   r&   r   r   r(   r   r   rQ   r1  r0  r   r   r@   r   r?   r  rG   r1   r1   r/   r2   r    sb    
	

Tr  a  
    The Zamba Model with a sequence classification head on top (linear layer).

    [`ZambaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )custom_introc                       s   e Zd Z fddZe										ddeej deej deej dee	e
eej f  deej d	eej d
ee dee dee dee de	eef fddZ  ZS )ZambaForSequenceClassificationc                    sJ   t  | |j| _t|| _| jj| _tj|j| jdd| _	| 
  d S r  )r%   r&   
num_labelsr\  r=  rd  r   r   r-   scorerk  r"  r/   r1   r2   r&     s   

z'ZambaForSequenceClassification.__init__Nrr  r   rs  r   rt  r  r  r,  ru  rv  rI   c                 C   sB  |
dur|
n| j j}
| j||||||||	|
d	}|d }| |}|dur+|jd }n|jd }| j jdu r>|dkr>td| j jdu rGd}n1|durl|| j jk|jt	j
}t	j|jd |jt	j
d}|| d}nd}t| jj d |t	j||jd	|f }d}|dur||j}| j jdu r| jdkrd
| j _n| jdkr|jt	jks|jt	jkrd| j _nd| j _| j jd
krt }| jdkr|| | }n+|||}n%| j jdkrt }||d| j|d}n| j jdkrt }|||}|
s|f|dd  }|dur|f| S |S t|||j|j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        N)r   rs  r   rt  r  r,  ru  rv  r   r   z=Cannot handle batch sizes > 1 if no padding token is defined.r4   rR   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`rW   
regressionsingle_label_classificationmulti_label_classificationr  )rw   ry  r=  r  rA   r`  r  r7   rS   r(   int32r   argmaxr   r   r0   rD   problem_typer  r6   r  r   r	   r   r   r   r   r   r   r<   rx  )r,   rr  r   rs  r   rt  r  r  r,  ru  rv  transformer_outputsr<   r  r^   last_non_pad_tokennon_pad_masktoken_indicespooled_logitsr  loss_fctr  r1   r1   r2   r?     sx   



"


z&ZambaForSequenceClassification.forwardr  )rD   rE   rF   r&   r   r   r(   r   r   r   r   listr1  r0  r@   r   r?   rG   r1   r1   r/   r2   r    sH    
	

r  )r  r  r\  r<  )r   )Jr   rJ  typingr   r   r   r   r(   r   torch.nnr   r   r	   activationsr   cache_utilsr   
generationr   modeling_attn_mask_utilsr   modeling_flash_attention_utilsr   modeling_outputsr   r   r   modeling_utilsr   r   processing_utilsr   utilsr   r   utils.deprecationr   utils.import_utilsr   r   configuration_zambar   &mamba_ssm.ops.selective_scan_interfacer   r   +mamba_ssm.ops.triton.selective_state_updater   causal_conv1dr    r!   r   r   
get_loggerrD   r   Moduler#   r   r   rP   rQ   r   r   r   r   r  r$  r2  r7  r<  r\  r  r  __all__r1   r1   r1   r2   <module>   s   

g
G  @EI, L  j