o
    eiP                     @   s  d Z ddlmZ ddlZddlmZ ddlmZ ddlmZm	Z	 ddl
mZmZmZmZmZmZ ddlmZmZmZmZmZ d	d
lmZ d	dlmZ d	dlmZ d	dlmZm Z  d	dl!m"Z" d	dl#m$Z$ d	dl%m&Z&m'Z'm(Z(m)Z) ddl*m+Z+ e),e-Z.G dd deddZ/G dd deZG dd deZ0d0ddZ1G dd deZ2G dd  d eZ3G d!d" d"ej4Z5G d#d$ d$eZ6G d%d& d&eZ7G d'd( d(e	Z8e&G d)d* d*e"Z9e&G d+d, d,e9Z:G d-d. d.eZ;g d/Z<dS )1zPyTorch Bamba model.    )	TypedDictN)nn)ACT2FN) HybridMambaAttentionDynamicCacheJambaAttentionDecoderLayer)LlamaAttentionLlamaForCausalLMLlamaMLPLlamaRMSNormLlamaRotaryEmbeddingrotate_half)MambaRMSNormGatedapply_mask_to_padding_statespad_tensor_by_sizereshape_into_chunkssegment_sum   )initialization)lazy_load_kernel)create_causal_mask)BaseModelOutputWithPastCausalLMOutputWithPast)PreTrainedModel)Unpack)auto_docstringcan_return_tupleis_torchdynamo_compilinglogging   )BambaConfigc                   @   s@   e Zd ZU dZejed< ejed< eed< eed< ejed< dS )BambaFlashAttentionKwargsaU  
    Keyword arguments for advanced Flash Attention, causal-conv1d, and mamba_ssm kernel usage.
    Use cases include padding-free training and fewer `torch.compile` graph breaks.

    cu_seq_lens_q (`torch.LongTensor`):
        Gets cumulative sequence length for query state.
    cu_seq_lens_k (`torch.LongTensor`):
        Gets cumulative sequence length for key state.
    max_length_q (`int`):
        Maximum sequence length for query state.
    max_length_k (`int`):
        Maximum sequence length for key state.
    seq_idx (`torch.IntTensor`):
        Index of each packed sequence.
    cu_seq_lens_qcu_seq_lens_kmax_length_qmax_length_kseq_idxN)	__name__
__module____qualname____doc__torch
LongTensor__annotations__int	IntTensor r/   r/   e/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/models/bamba/modular_bamba.pyr    9   s   
 

r    F)totalc                   @   s&   e Zd ZdZejdfdefddZdS )r   a  
    A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
    (which has a constant shape regardless of seq_len).

    This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
    and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
    For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
    while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
    For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
    while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
    and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
    Nconfigc                    s0  |j | _ d| _|j}|j}g | _g | _g | _t|jD ]^}| j | dkrS|  jt	j
 |j|j d|j |  ||dg7  _|  jt	j
 |j|j||dg7  _q|  jt	jg g  dg7  _|  jt	jg g  dg7  _| j| q fddt|jD | _ fddt|jD | _d S )	NFmamba   devicedtyper6   c                        g | ]}t jg g  d qS r8   r*   tensor.0_
batch_sizer6   r/   r0   
<listcomp>        z=HybridMambaAttentionDynamicCache.__init__.<locals>.<listcomp>c                    r9   r:   r;   r=   r@   r/   r0   rB      rC   )layers_block_typehas_previous_statemamba_d_convmamba_d_stateconv_states
ssm_statestransformer_layersrangenum_hidden_layersr*   zerosmamba_expandhidden_sizemamba_n_groupsmamba_n_headsmamba_d_headr<   append	key_cachevalue_cache)selfr2   rA   r7   r6   conv_kernel_sizessm_state_sizeir/   r@   r0   __init__`   sB   	
   z)HybridMambaAttentionDynamicCache.__init__)r&   r'   r(   r)   r*   float16r   rZ   r/   r/   r/   r0   r   R   s    r   c                   @      e Zd ZdS )BambaRotaryEmbeddingNr&   r'   r(   r/   r/   r/   r0   r]          r]   c                 C   s   | |}| |}|jd }| dd|f | d|df }}|dd|f |d|df }}	|| t||  }
|| t||  }tj|
|gdd}
tj||	gdd}|
|fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Removes the interleaving of cos and sin from GLM

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    .Ndim)	unsqueezeshaper   r*   cat)qkcossinunsqueeze_dim
rotary_dimq_rotq_passk_rotk_passq_embedk_embedr/   r/   r0   apply_rotary_pos_emb   s   


""rr   c                   @   r\   )BambaAttentionNr^   r/   r/   r/   r0   rs      r_   rs   c                   @   r\   )BambaRMSNormGatedNr^   r/   r/   r/   r0   rt      r_   rt   c                       s   e Zd ZdZdedef fddZ				ddejde	dB d	ej
dB d
ejdB dejdB f
ddZ			dde	dB d	ej
dB d
ejdB fddZ				dde	dB d	ej
dB d
ejdB dejdB fddZ  ZS )
BambaMixeruP  
    Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
    A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
    ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
    and is why Mamba is called **selective** state spaces)

    The are a few differences between this and Mamba2Mixer:
    - The variable use_precomputed_states is slightly different due to the hybrid cache structure
    - There's a few non-obvious bugs fixed with batching in the slow path that exist in main
    - Some extra variables that our layer doesn't need have been removed
    - We ported most of the refactors in https://github.com/huggingface/transformers/pull/35154, which is (as of Dec 18, 2024) unmerged
    r2   	layer_idxc                    s  t    |j| _|j| _|j| _|j| _t	|j
| j | _|| _|j| _|j| _t|j | _|j| _|j| _|j| _|j| _|j| _|j| _|j| _|j| _| jd| j | j  | _ t!j"| j | j |j| j| j | jd d| _#| j| j  | j }t!j$| j|| jd| _%t!&t'(| j| _)t'*d| jd }t!&t'+|| _,t-| j| jd| _.t!&t'(| j| _/t!j$| j| j| jd| _0t1d}t2|dd a3t2|dd a4t1d	}t2|d
d a5t2|dd a6t2|dd a7t8t5t4t3fa9t9st:;d d S t:;d d S )Nr4   r   )in_channelsout_channelsbiaskernel_sizegroupspadding)ry   epszcausal-conv1dcausal_conv1d_updatecausal_conv1d_fnz	mamba-ssmselective_state_updatemamba_chunk_scan_combined mamba_split_conv1d_scan_combineda  The fast path is not available because one of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)` is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and https://github.com/Dao-AILab/causal-conv1dzDThe fast path for Bamba will be used when running the model on a GPU)<superrZ   rQ   	num_headsrO   rG   rX   rF   rW   r-   rN   intermediate_sizerv   mamba_conv_biasuse_conv_bias
hidden_act
activationr   actmamba_proj_biasuse_biasrms_norm_epslayer_norm_epsilonrP   n_groupsrR   head_dimmamba_chunk_size
chunk_sizetime_step_limittime_step_mintime_step_maxconv_dimr   Conv1dconv1dLinearin_proj	Parameterr*   onesdt_biasarangelogA_logrt   normDout_projr   getattrr   r   r   r   r   allis_fast_path_availableloggerwarning_once)rV   r2   rv   projection_sizeAcausal_conv1d	mamba_ssm	__class__r/   r0   rZ      sh   

	zBambaMixer.__init__Nhidden_statescache_paramscache_positionattention_maskr%   c                 C   s  t ||}| |}|j\}}}	| j| j }
|d uoD|joD|dkoD|j| j jd |j| j jd   ko8|kn  oD|d uoD|d dk}|r)|	dj
| j| j| jgdd\}}}t||j| j | jj	d| jj| j}tj
|| j|
|
gdd\}}}t| j  }|d d d df d d d d d f d| j| jjtjd}|d d d d d f dd| j}| jd d d df d| j}| jd d d df d| j}||| j|jd | j }||| j|jd | j }||| j| j}t|j| j ||||||d |dd
}||| j| j }| ||}|  |d d d df }|S t| j  }| j!d	td
fkr>i nd| j!i}| j"r||d u r|t#|| jj	d| jj| j|f| j| j$|| j| jj| jj%| j j| j j| j| jddd|}|S |j
| j| j| jgdd\}}}|d ur|&dd}t'j()|| j*|jd  df}|j| j +| | jdvr| ,| |&dddd |f &dd}nt-|&dd| jj	d| jj| j|d&dd}t ||}tj
|| j|
|
gdd\}}}t.|||d| j|||||| jd|||| jdf| j$| jd |d| jdd|\}}|d ur:|d ur:|j| j +| |||d}| ||}|  |}|S )Nr   r   r`   ra   .r7   T)zr   dt_softplusg        infdt_limitF)r   r   r%   r   rmsnorm_weightrmsnorm_epsoutproj_weightoutproj_biasheaddimngroupsnorm_before_gatereturn_final_statesr4   )siluswish)xweightry   r   r%   )r   r   r   r%   r   r   r   )/r   r   rd   r   rX   rE   rH   rv   rI   squeezesplitr   r   r   r   r   r   ry   r   r*   expr   floatexpandr   tofloat32r   r   viewr   r   r   r   trainingr   r   variance_epsilon	transposer   
functionalpadrW   copy_r   r   r   )rV   r   r   r   r   r%   projected_statesrA   seq_lenr?   groups_time_state_sizeuse_precomputed_statesgatehidden_states_B_CdtBCr   r   r   hidden_states_reshapedoutdt_limit_kwargshidden_states_B_C_transposedrH   scan_output	ssm_stater/   r/   r0   cuda_kernels_forward  s  
	




<"
^"V
$




zBambaMixer.cuda_kernels_forwardc           3   
      s  |j \}}}|j}t||}|}	|	jjjjgdd\}
}}|d uoQ|joQ|dkoQ|j	j
 j d |jj
 j d   koE|kn  oQ|d uoQ|d dk}|r|j	j
 jddd|j	j
< |d d dd d f |j	j
 j|j	j
 d d d d df< |j	j
 jjjjd}tj|jjd dd}jr|jj }|}n8|d ur|dd}tj|j|j d  df}|j	j
 | |dddd |f dd}t||}tj|jjj jj gdd\}}}tj !  }|r[|jj
 j}|d d dd d f d d d df }|dd"||j d j#}j$d	 "j$j d j#}tjj%|||j }t&|j'd j'd }|d
 "jj#jjtj(d}t|d	 | j|d}|)|jddd d d f }|"|jjj |j d * }|)|d|j d }|d	 |dd d d f  }|)|dj#}||d	  j|d}|jj
 |jj
 | |  |)|jddd d d f }|"|jjj |j d * }|)|d|j d }|jj
 j|j|jd}|+|j j#j}|+|j jd}t,||}|+|jj#}j-d	 "j-j d j#}|||  |j}|)|dd d d df }ntj%|j$ }t&|j'd j'd }|)||dj#! }|)||dj! }|)||dj! }|j.jj djd}|j.jj djd}j/|j/  j/  j-d	 t0|  }||d	  }||j| } fdd||||fD \}}}}|1dddd}tj2|dd}tt3|} |d d d d d d d d d d d f |d d d d d d d d d d d f  }!|!jdd}"|"d	 | 1dddddd	  }#|#jdd}$|$d	 |d d d d d f  jdd}%t|d d d d d d dd f | }&||&1ddddd	  }'|'dd d d f |d	  jdd}(|r|jj
 d d d df j|(jd})nt4|(d d d df })tj5|)|(gdd}(tt3tj|d d d d d d df d}*|*dd}*|*d
 |(d d d d d df  jdd}+|+d d d df |+d d df }(},t|}-|dd d d f |(d d d d d df  }.|-1dddd}/|.d|/d	  }0|%|0 }|)|djj#}|| } dkrB|d d d |d d d d f }|)||d}|,d ur\|d ur\|jj
 |, 6||
}17|1|}2|2S )Nr`   ra   r   r   )shiftsdimsr8   r4   .).N).NNr   r5   )rb   output_sizec                    s   g | ]	}t | jqS r/   )r   r   )r>   tpad_sizerV   r/   r0   rB   J  s    z,BambaMixer.torch_forward.<locals>.<listcomp>r      )r   r   )8rd   r7   r   r   r   r   r   r   rE   rH   rv   rI   rollr   r6   r   r   r*   sumr   r   ry   r   r   r   r   r   rW   r   r   rX   r   r   r   r   r   r   softplusclampr   r   reshape
contiguousr   bmmr   repeat_interleaver   r   permutecumsumr   
zeros_likere   r   r   )3rV   input_statesr   r   r   rA   r   r?   r7   r   r   r   r   r   rH   r   r   r   r   r   cache_devicer   dAdBdBxrI   ssm_states_reshaped
C_reshapedyr   
D_residualA_cumsumLG_intermediateGM_intermediateMY_diagdecay_statesB_decaystatesprevious_statesdecay_chunk
new_statesr   state_decay_outC_times_statesstate_decay_out_permutedY_offr   contextualized_statesr/   r   r0   torch_forward  s   


@,
$"$$$P&*"&0(&
*
 zBambaMixer.torch_forwardc                 K   s   t rd| jjjjv rt s| |||||S |d urtd|j}|d urC|j	d dkrC|j	d dkrC||d d d d d f  
|}| ||||S )Ncudaz\`seq_idx` support requires fast path support. Please install `mamba_ssm` and `causal_conv1d`r   r   )r   r   r   r6   typer   r   NotImplementedErrorr7   rd   r   r  )rV   r   r   r   r   r%   kwargsr7   r/   r/   r0   forward  s   	$ zBambaMixer.forward)NNNN)NNN)r&   r'   r(   r)   r   r-   rZ   r*   Tensorr   r+   r.   r   r  r  __classcell__r/   r/   r   r0   ru      sV    Q
 .
 Sru   c                   @   r\   )BambaMLPNr^   r/   r/   r/   r0   r    r_   r  c                   @   r\   )BambaRMSNormNr^   r/   r/   r/   r0   r    r_   r  c                       s   e Zd Zddededef fddZ							dd	ejd
ejdB dej	dB de
dB dedB dedB dej	dB deejejf dB dee deejeejejf dB f fddZ  ZS )BambaDecoderLayerr3   r2   rv   
layer_typec                    sp   t  || | `d}|dkrtnd }||| _|| _|dkr(t||d| _d S |dkr4t||| _d S t	d)Nr   r3   )r2   rv   	attentionzInvalid layer_type)
r   rZ   	self_attnr  feed_forwardr  ru   r3   rs   
ValueError)rV   r2   rv   r  num_expertsffn_layer_classr   r/   r0   rZ     s   
zBambaDecoderLayer.__init__NFr   r   position_idspast_key_valuesoutput_attentions	use_cacher   position_embeddingsr  returnc	                 K   s   |}
|  |}| jdkr| jd||||d|	}d}n| jdkr4| jd||||||||d|	\}}|
| }|}
| |}| |}|
| }|f}|rR||f7 }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
                `(batch, sequence_length)` where padding elements are indicated by 0.
            past_key_values (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence.
            position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
            kwargs (`dict`, *optional*):
                Arbitrary kwargs. Can be used to provide `BambaFlashAttentionKwargs` for
                padding-free training and/or improve torch.compile performance.
        r3   )r   r   r   r   Nr  )r   r   r#  r$  r%  r&  r   r'  r/   )input_layernormr  r3   r  pre_ff_layernormr  )rV   r   r   r#  r$  r%  r&  r   r'  r  residualself_attn_weightsoutputsr/   r/   r0   r    sD   "


	



zBambaDecoderLayer.forward)r3   )NNNFFNN)r&   r'   r(   r   r-   strrZ   r*   r  r+   r   booltupler   r    FloatTensorr  r  r/   r/   r   r0   r    s<    	
r  c                       sL   e Zd ZU eed< dZdZdgZdZdZ	dZ
dZe  fddZ  ZS )BambaPreTrainedModelr2   modelTr  r$  c              
      sX   t  | t|tr*t|j t|jt	
t	d|jd  t|j d S d S )Nr   )r   _init_weights
isinstanceru   initones_r   r   r   r*   r   r   r   r   )rV   moduler   r/   r0   r4    s   
"z"BambaPreTrainedModel._init_weights)r&   r'   r(   r   r,   base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_is_statefulr*   no_gradr4  r  r/   r/   r   r0   r2    s   
 r2  c                       s   e Zd Zdef fddZee									ddejdB dej	dB dejdB de
dB d	ejdB d
edB dedB dedB dejdB dee defddZdd Z  ZS )
BambaModelr2   c                    s   t  | |j| _|j| _t|j|j| j| _g }t	|j
D ]}|t|||j| d q t|| _|j| _t|j|jd| _t|d| _d| _|   d S )N)rv   r  r}   )r2   F)r   rZ   pad_token_idpadding_idx
vocab_sizer   	EmbeddingrO   embed_tokensrK   rL   rS   r  rD   
ModuleListlayers_attn_implementationr  r   final_layernormr]   
rotary_embgradient_checkpointing	post_init)rV   r2   decoder_layersrY   r   r/   r0   rZ   &  s   zBambaModel.__init__N	input_idsr   r#  r$  inputs_embedsr&  r%  output_hidden_statesr   r  r(  c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}|d u rB| 	|}|}|rO|d u rOtd |	d u r^t
j|jd |jd}	|d u rg|	d}t| j |||	||d}| ||	}| j||d	}|rd
nd }|rd
nd }| jD ]5}|jdkr|n|}|r||f7 }||f||||||	|d|
}|d }|r|d d ur||d f7 }q| |}|r||f7 }|r|jsd|_|sd n|}t||||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBamba requires an initialized `HybridMambaAttentionDynamicCache` to return a cache. None was provided, so no cache will be returned.r   r8   r   )r2   rP  r   r   r$  r#  )r#  r/   r3   )r   r#  r$  r%  r&  r   r'  T)last_hidden_stater$  r   
attentions)r2   r%  rQ  r&  r   rL  r   r   r   rF  r*   r   rd   r6   rc   r   _update_mamba_maskrK  rH  r  rJ  rE   r   )rV   rO  r   r#  r$  rP  r&  r%  rQ  r   r  r   causal_mask
mamba_maskr'  all_hidden_statesall_self_attnsdecoder_layer
layer_masklayer_outputs
next_cacher/   r/   r0   r  9  s   



	


zBambaModel.forwardc                 C   s.   |}|d dks|durt |dkrd}|S )zv
        No need for zeroing states when
            1. Cached forward
            2. Attending to all inputs
        r   Nr   )r*   r   )rV   r   r   rV  r/   r/   r0   rT    s   "zBambaModel._update_mamba_mask)	NNNNNNNNN)r&   r'   r(   r   rZ   r   r   r*   r+   r  r   r1  r/  r   r    r   r  rT  r  r/   r/   r   r0   rA  $  sJ    	
erA  c                       s   e Zd Z fddZ											ddejdB dejdB dejdB dedB d	ejdB d
ejdB de	dB de	dB de	dB dejdB de
ejB defddZ							d fdd	Z  ZS )BambaForCausalLMc                    s    t  | |j| _|   d S )N)r   rZ   z_loss_coefficientrM  )rV   r2   r   r/   r0   rZ     s   zBambaForCausalLM.__init__Nr   rO  r   r#  r$  rP  labelsr&  r%  rQ  r   logits_to_keepr(  c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd
||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }d}|durt| j	d
||| j j
d|}| jdkrt|jddj|jdd }|| j|  }t|||j|j|jd	S )aJ  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, BambaForCausalLM

        >>> model = BambaForCausalLM.from_pretrained("...")
        >>> tokenizer = AutoTokenizer.from_pretrained("...")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	rO  r   r#  r$  rP  r&  r%  rQ  r   )logitsr_  rD  r   r`   ra   r   r4   )lossra  r$  r   rS  r/   )r2   r%  rQ  r3  rR  r5  r-   slicelm_headloss_functionrD  r^  	logsumexpr   r7   powmeanr   r$  r   rS  )rV   rO  r   r#  r$  rP  r_  r&  r%  rQ  r   r`  r  r-  r   slice_indicesra  rb  z_lossr/   r/   r0   r    s@   %

 zBambaForCausalLM.forwardTFc	              
      sX   |d u rt | j|jd | j| jd}| jj|	d< t j|f|||||||d|	}
|
S )Nr   r8   r`  )r$  r   rP  r   r#  r&  is_first_iteration)r   r2   rd   r7   r6   num_logits_to_keepr   prepare_inputs_for_generation)rV   rO  r$  r   rP  r   r#  r&  rk  r  model_inputsr   r/   r0   rm    s&   	z.BambaForCausalLM.prepare_inputs_for_generation)NNNNNNNNNNr   )NNNNNTF)r&   r'   r(   rZ   r*   r+   r  r   r1  r/  r-   r   r  rm  r  r/   r/   r   r0   r]    s\    		

Pr]  )rA  r]  r2  )r   )=r)   typingr   r*   r   transformers.activationsr   (transformers.models.jamba.modeling_jambar   r   (transformers.models.llama.modeling_llamar   r   r	   r
   r   r   *transformers.models.mamba2.modeling_mamba2r   r   r   r   r    r   r6  integrations.hub_kernelsr   masking_utilsr   modeling_outputsr   r   modeling_utilsr   processing_utilsr   utilsr   r   r   r   configuration_bambar   
get_loggerr&   r   r    r]   rr   rs   rt   Moduleru   r  r  r  r2  rA  r]  __all__r/   r/   r/   r0   <module>   sJ    
5
&   o` x