o
    iI                    @   sp  d dl mZmZmZmZmZ d dlZd dlm  m	Z
 d dlmZ d dlmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZmZ ddlmZmZ ddlmZmZ ddlm Z  ddl!m"Z"m#Z#m$Z$m%Z% ddl&m'Z' ddl(m)Z)m*Z* ddl+m,Z, e* rd dl-m.Z. d dl/m0Z0m1Z1 ndZ.e) rd dl2m3Z3m4Z4 nd\Z4Z3e$ rd dl5m6Z6 ddl7m8Z8 e%9e:Z;dd Z<d\ddZ=dej>de?dej>fd d!Z@	"d]d#ejAd$ej>d%ej>d&ej>d'eej> d(eBd)eBfd*d+ZCG d,d- d-ejAZDG d.d/ d/ZEd0ej>d1e?fd2d3ZFd4d5 ZGd6d7 ZHeIe.e3e4fZJd8d9 ZKG d:d; d;ejAZLG d<d= d=ejjAZMG d>d? d?ejAZNG d@dA dAedBdCZOG dDdE dEejAZPG dFdG dGejAZQG dHdI dIejAZRG dJdK dKejAZSG dLdM dMeZTe"G dNdO dOeZUG dPdQ dQejAZVe"G dRdS dSeUZW		T	d^dUeej>eXej> df dVee? d'eej> deej>e?f fdWdXZYG dYdZ dZeUeZZg d[Z[dS )_    )AnyCallableOptional	TypedDictUnionN)nn)ACT2FN   )Cache)GenerationMixin)AttentionMaskConverter)GradientCheckpointingLayer)BaseModelOutputWithPastMoeCausalLMOutputWithPastMoeModelOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)auto_docstringcan_return_tupleis_torch_flex_attn_availablelogging)deprecate_kwarg)is_causal_conv1d_availableis_mamba_2_ssm_available   )GraniteMoeHybridConfig)selective_state_update)mamba_chunk_scan_combined mamba_split_conv1d_scan_combined)causal_conv1d_fncausal_conv1d_updateNN)	BlockMask)make_flex_block_causal_maskc                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..N   dim)shapetorchcat)xx1x2 r1   {/home/ubuntu/veenaModal/venv/lib/python3.10/site-packages/transformers/models/granitemoehybrid/modeling_granitemoehybrid.pyrotate_halfA   s   r3   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer3   )qkcossinposition_idsunsqueeze_dimq_embedk_embedr1   r1   r2   apply_rotary_pos_embH   s
   

r=   hidden_statesn_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r+   expandreshape)r>   r?   batchnum_key_value_headsslenhead_dimr1   r1   r2   	repeat_kvc   s
   0rG           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr(   r	   r'   )r*   dtype)ptrainingr   )rG   num_key_value_groupsr,   matmul	transposer+   r   
functionalsoftmaxfloat32torQ   rO   rS   
contiguous)rI   rJ   rK   rL   rM   rN   rO   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr1   r1   r2   eager_attention_forwardo   s   
&rb   c                       s   e Zd ZdZdedef fddZedddd		
	
	
		
	
ddej	de
ej	 de
ej de
e dede
ej de
eej	ej	f  deej	e
ej	 e
eej	  f fddZ  ZS )GraniteMoeHybridAttentionz=Multi-headed attention from 'Attention Is All You Need' paperconfig	layer_idxc                    s   t    || _|| _|d u rtd| jj d |j| _|j	| _	|j
| _| j	| j | _|j| _| j| j | _d| _|j| _| j| j | j	krUtd| j	 d| j dtj| j	| j| j |jd| _tj| j	| j| j |jd| _tj| j	| j| j |jd| _tj| j	| j	|jd| _d S )NzInstantiating z without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.Tz?hidden_size must be divisible by num_heads (got `hidden_size`: z and `num_heads`: z).bias)super__init__rd   re   loggerwarning_once	__class____name__attention_dropouthidden_sizenum_attention_heads	num_headsrF   rD   rT   	is_causalattention_multiplierrN   
ValueErrorr   Linearattention_biasq_projk_projv_projo_projselfrd   re   rl   r1   r2   ri      s2   

z"GraniteMoeHybridAttention.__init__past_key_valuepast_key_values4.58new_nameversionNFr>   rM   r9   	use_cachecache_positionposition_embeddingsr@   c                 K   sD  |  \}	}
}| |}| |}| |}||	|
| j| jdd}||	|
| j| jdd}||	|
| j| jdd}|d urF|nd\}}|d urWt	||||\}}|d url|||d}|
||| j|\}}t}| jjdkrzt| jj }|| ||||f| jsdn| j| jd|\}}||	|
d}| |}||fS )	Nr   r(   r$   )r8   r7   r   eagerrH   )rO   rN   r'   )sizerw   rx   ry   viewrq   rF   rV   rD   r=   updatere   rb   rd   _attn_implementationr   rS   rn   rN   rz   )r|   r>   rM   r9   r   r   r   r   r\   bszq_len_query_statesr]   r^   r7   r8   cache_kwargsattention_interfacera   r_   r1   r1   r2   forward   s>   




z!GraniteMoeHybridAttention.forward)NNNFNN)rm   
__module____qualname____doc__r   intri   r   r,   Tensorr   
LongTensorr
   booltupler   __classcell__r1   r1   r}   r2   rc      s6     
rc   c                   @   s   e Zd ZdZdZejdfdefddZ	ddej	dej	d	e
d
eeeef  deej	ej	f f
ddZdejfddZdd	ee
 de
fddZdS ) HybridMambaAttentionDynamicCachea  
    A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
    (which has a constant shape regardless of seq_len).

    This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
    and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
    For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
    while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
    For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
    while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
    and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
    FNrd   c                    s0  |j | _ d| _|j}|j}g | _g | _g | _t|jD ]^}| j | dkrS|  jt	j
 |j|j d|j |  ||dg7  _|  jt	j
 |j|j||dg7  _q|  jt	jg g  dg7  _|  jt	jg g  dg7  _| j| q fddt|jD | _ fddt|jD | _d S )	NFmambar(   devicerQ   r   c                        g | ]}t jg g  d qS r   r,   tensor.0r   
batch_sizer   r1   r2   
<listcomp>       z=HybridMambaAttentionDynamicCache.__init__.<locals>.<listcomp>c                    r   r   r   r   r   r1   r2   r     r   )layers_block_typehas_previous_statemamba_d_convmamba_d_stateconv_states
ssm_statestransformer_layersrangenum_hidden_layersr,   zerosmamba_expandro   mamba_n_groupsmamba_n_headsmamba_d_headr   append	key_cachevalue_cache)r|   rd   r   rQ   r   conv_kernel_sizessm_state_sizeir1   r   r2   ri      sB   	
   z)HybridMambaAttentionDynamicCache.__init__r]   r^   re   r   r@   c                 C   sz   | j | jd dkr|| j |< || j|< ntj| j | |gdd| j |< tj| j| |gdd| j|< | j | | j| fS )Nr'   r   r(   r)   )r   r+   r   r,   r-   )r|   r]   r^   re   r   r1   r1   r2   r     s   
z'HybridMambaAttentionDynamicCache.updatebeam_idxc                 C   s   t t| jD ]V}| j| j}| j| d||| j|< | j| j}| j| d||| j|< | j| j}| j| d||| j|< | j| j}| j| d||| j|< qdS )zDReorders the cache for beam search, given the selected beam indices.r   N)	r   lenr   r   index_selectrZ   r   r   r   )r|   r   re   r   r1   r1   r2   reorder_cache*  s    z.HybridMambaAttentionDynamicCache.reorder_cacher   c                 C   s:   || j vr
| j d n|}t| j|krdS | j| jd S )zYReturns the sequence length of the cached states. A layer index can be optionally passed.r   rP   )r   r   r   r+   )r|   re   r1   r1   r2   get_seq_length7  s   z/HybridMambaAttentionDynamicCache.get_seq_lengthN)r   )rm   r   r   r   is_compileabler,   float16r   ri   r   r   r   dictstrr   r   r   r   r   r   r1   r1   r1   r2   r      s$    +
r   input_tensorpad_sizec                 C   sH   t | jdkrddddd|ddfnddd|ddf}tjjj| |dddS )z
    Padding x tensor with `pad_size` on the seq_len dim (dim=1)

    Assumes that we only have tensors of either size 4 or 3
       r   constant)moderL   )r   r+   r,   r   rW   pad)r   r   	pad_shaper1   r1   r2   pad_tensor_by_sizeC  s   2r   c                 C   sX   t | |} t| jdkr| | jd d|| jd S | | jd d|| jd | jd S )z
    Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and
    simultaneously splitting it into chunk sequences.

    Assumes that we only have tensors of either size 4 or 3
    r	   r   r'   r(   )r   r   r+   rB   )r   r   
chunk_sizer1   r1   r2   reshape_into_chunksN  s   
r   c                 C   s   |  d}| d jg |   |R  } tjtj||| jtjddd}| | d} tj| dd}tjtj||| jtjddd}|| tj	 }|S )zo
    More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions.
    r'   .Nr   diagonalr   rP   r)   )
r   rA   r,   trilonesr   r   masked_fillcumsuminf)r   r   masktensor_segsumr1   r1   r2   segment_sumb  s   
  r   c                 C   sN   |dur%|j d dkr%|j d dkr%| j}| |dddddf  |} | S )zm
    Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66
    Nr   r   )r+   rQ   rZ   )r>   rM   rQ   r1   r1   r2   apply_mask_to_padding_statesy  s   $ r   c                       s   e Zd ZdZdedef fddZ				ddejde	e
 d	e	ej d
e	ej de	ej f
ddZ			dde	e
 d	e	ej d
e	ej fddZ				dde	e
 d	e	ej d
e	ej de	ej fddZ  ZS )GraniteMoeHybridMambaLayeruP  
    Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
    A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
    ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
    and is why Mamba is called **selective** state spaces)

    The are a few differences between this and Mamba2Mixer:
    - The variable use_precomputed_states is slightly different due to the hybrid cache structure
    - There's a few non-obvious bugs fixed with batching in the slow path that exist in main
    - Some extra variables that our layer doesn't need have been removed
    - We ported most of the refactors in https://github.com/huggingface/transformers/pull/35154, which is (as of Dec 18, 2024) unmerged
    rd   re   c                    s  t    |j| _|j| _|j| _|j| _t	|j
| j | _|| _|j| _|j| _t|j | _|j| _|j| _|j| _|j| _|j| _dtdf| _d| _d| _ | jd| j | j  | _!t"j#| j!| j!|j| j| j!| jd d| _$| j| j! | j }t"j%| j|| jd| _&t"'t()| j| _*t(+d| jd }t"'t(,|| _-t.| j| jd	| _/t"'t()| j| _0t"j%| j| j| jd| _1t2st34d
 d S t34d d S )NrH   r   gMbP?g?r(   r   )in_channelsout_channelsrg   kernel_sizegroupspaddingrf   epsa  The fast path is not available because one of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)` is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and https://github.com/Dao-AILab/causal-conv1dzOThe fast path for GraniteMoeHybrid will be used when running the model on a GPU)5rh   ri   r   rq   ro   r   r   r   r   r   r   intermediate_sizere   mamba_conv_biasuse_conv_bias
hidden_act
activationr   actmamba_proj_biasuse_biasrms_norm_epslayer_norm_epsilonr   n_groupsr   rF   mamba_chunk_sizer   floattime_step_limittime_step_mintime_step_maxconv_dimr   Conv1dconv1dru   in_proj	Parameterr,   r   dt_biasarangelogA_logGraniteMoeHybridRMSNormGatednormDout_projis_fast_path_availablerj   rk   )r|   rd   re   projection_sizeAr}   r1   r2   ri     sX   

	z#GraniteMoeHybridMambaLayer.__init__Nr>   cache_paramsr   rM   seq_idxc                 C   s  t ||}| |}|j\}}}	| j| j }
|d uoD|joD|dkoD|j| j jd |j| j jd   ko8|kn  oD|d uoD|d dk}|r)|	dj
| j| j| jgdd\}}}t||j| j | jj	d| jj| j}tj
|| j|
|
gdd\}}}t| j  }|d d d df d d d d d f d| j| jjtjd}|d d d d d f dd| j}| jd d d df d| j}| jd d d df d| j}||| j|jd | j }||| j|jd | j }||| j| j}t|j| j ||||||d |dd
}||| j| j }| ||}|  |d d d df }|S t| j  }| j!d	td
fkr>i nd| j!i}| j"r||d u r|t#|| jj	d| jj| j|f| j| j$|| j| jj| jj%| j j| j j| j| jddd|}|S |j
| j| j| jgdd\}}}|d ur|&dd}t'j()|| j*|jd  df}|j| j +| | jdvr| ,| |&dddd |f &dd}nt-|&dd| jj	d| jj| j|d&dd}t ||}tj
|| j|
|
gdd\}}}t.|||d| j|||||| jd|||| jdf| j$| jd |d| jdd|\}}|d ur:|d ur:|j| j +| |||d}| ||}|  |}|S )Nr   r   r'   r)   .rQ   T)zr   dt_softplusrH   r   dt_limitF)r  r   r  r   rmsnorm_weightrmsnorm_epsoutproj_weightoutproj_biasheaddimngroupsnorm_before_gatereturn_final_statesr(   )siluswish)r.   weightrg   r   r  )r   r  r
  r  r  r   r  )/r   r   r+   r   r   r   r   re   r   squeezesplitr   r   rq   r#   r   r  rg   r   r,   expr   r   rA   rF   rZ   rY   r   r  r   r   r  r  r   rS   r!   r   variance_epsilonrV   r   rW   r   r   copy_r   r"   r    )r|   r>   r  r   rM   r  projected_statesr   seq_lenr   groups_time_state_sizeuse_precomputed_statesgatehidden_states_B_CdtBCr  r   r  hidden_states_reshapedoutdt_limit_kwargshidden_states_B_C_transposedr   scan_output	ssm_stater1   r1   r2   cuda_kernels_forward  s  
	




<"
^"V
$




z/GraniteMoeHybridMambaLayer.cuda_kernels_forwardc           3   
      s  |j \}}}|j}t||}|}	|	jjjjgdd\}
}}|d uoQ|joQ|dkoQ|j	j
 j d |jj
 j d   koE|kn  oQ|d uoQ|d dk}|r|j	j
 jddd|j	j
< |d d dd d f |j	j
 j|j	j
 d d d d df< |j	j
 jjjjd}tj|jjd dd}jr|jj }|}n8|d ur|dd}tj|j|j d  df}|j	j
 | |dddd |f dd}t||}tj|jjj jj gdd\}}}tj !  }|r[|jj
 j}|d d dd d f d d d df }|dd"||j d j#}j$d	 "j$j d j#}tjj%|||j }t&|j'd j'd }|d
 "jj#jjtj(d}t|d	 | j|d}|)|jddd d d f }|"|jjj |j d * }|)|d|j d }|d	 |dd d d f  }|)|dj#}||d	  j|d}|jj
 |jj
 | |  |)|jddd d d f }|"|jjj |j d * }|)|d|j d }|jj
 j|j|jd}|+|j j#j}|+|j jd}t,||}|+|jj#}j-d	 "j-j d j#}|||  |j}|)|dd d d df }ntj%|j$ }t&|j'd j'd }|)||dj#! }|)||dj! }|)||dj! }|j.jj djd}|j.jj djd}j/|j/  j/  j-d	 t0|  }||d	  }||j| } fdd||||fD \}}}}|1dddd}tj2|dd}tt3|} |d d d d d d d d d d d f |d d d d d d d d d d d f  }!|!jdd}"|"d	 | 1dddddd	  }#|#jdd}$|$d	 |d d d d d f  jdd}%t|d d d d d d dd f | }&||&1ddddd	  }'|'dd d d f |d	  jdd}(|r|jj
 d d d df j|(jd})nt4|(d d d df })tj5|)|(gdd}(tt3tj|d d d d d d df d}*|*dd}*|*d
 |(d d d d d df  jdd}+|+d d d df |+d d df }(},t|}-|dd d d f |(d d d d d df  }.|-1dddd}/|.d|/d	  }0|%|0 }|)|djj#}|| } dkrB|d d d |d d d d f }|)||d}|,d ur\|d ur\|jj
 |, 6||
}17|1|}2|2S )Nr'   r)   r   r   )shiftsdimsr   r(   .r   ).NNr	  r   )r*   output_sizec                    s   g | ]	}t | jqS r1   )r   r   )r   tr   r|   r1   r2   r     s    z<GraniteMoeHybridMambaLayer.torch_forward.<locals>.<listcomp>r	   r   rP   )r   r   )8r+   rQ   r   r   r  r   r   rq   r   r   re   r   rollrZ   r   r   r  r,   sumr  r   rg   r   rV   r   rW   r   r   r  r   r   r  r   r   rA   rF   r   softplusclampr   rY   rB   r[   r   bmmr  repeat_interleaver   r   permuter   r   
zeros_liker-   r  r  )3r|   input_statesr  r   rM   r   r  r   rQ   r  r!  r"  r#  r   r   r)  r>   r$  r%  r  cache_devicer   dAdBdBxr   ssm_states_reshaped
C_reshapedyr  
D_residualA_cumsumLG_intermediateGM_intermediateMY_diagdecay_statesB_decaystatesprevious_statesdecay_chunk
new_statesr+  state_decay_outC_times_statesstate_decay_out_permutedY_offr*  contextualized_statesr1   r1  r2   torch_forward~  s   


@,
$"$$$P&*"&0(&
*
 z(GraniteMoeHybridMambaLayer.torch_forwardc                 K   s   t rd| jjjjv r| |||||S |d urtd|j}|d ur@|jd dkr@|jd dkr@||d d d d d f  	|}| 
||||S )Ncudaz\`seq_idx` support requires fast path support. Please install `mamba_ssm` and `causal_conv1d`r   r   )r  r   r  r   typer,  NotImplementedErrorrQ   r+   rZ   rU  )r|   r>   r  r   rM   r  r\   rQ   r1   r1   r2   r   M  s   	$ z"GraniteMoeHybridMambaLayer.forward)NNNN)NNN)rm   r   r   r   r   r   ri   r,   r   r   r   r   	IntTensorr,  rU  r   r   r1   r1   r}   r2   r     sV    D
 .
 Sr   c                       s(   e Zd Zd fdd	ZdddZ  ZS )	r   ư>c                    s&   t    tt|| _|| _d S r   rh   ri   r   r   r,   r   r  r  r|   ro   r   r}   r1   r2   ri   e  s   

z%GraniteMoeHybridRMSNormGated.__init__Nc                 C   sj   |j }|tj}|d ur|tj|tj }|djddd}|t	|| j
  }| j|| S Nr(   r'   T)keepdim)rQ   rZ   r,   rY   r   rW   r  powmeanrsqrtr  r  )r|   r>   r!  input_dtypevariancer1   r1   r2   r   j  s   z$GraniteMoeHybridRMSNormGated.forwardrZ  r   )rm   r   r   ri   r   r   r1   r1   r}   r2   r   d  s    r   c                       s<   e Zd ZdZdef fddZdejdejfddZ  Z	S )	GraniteMoeHybridMLPz~
    MLP layer for shared experts

    Args:
        config:
            Configuration object with model hyperparameters.
    rd   c                    sZ   t    |j| _|j| _t|j | _tj	| j| jd dd| _
tj	| j| jdd| _d S )Nr(   Frf   )rh   ri   ro   
input_sizeshared_intermediate_sizer   r   r   r   ru   input_linearoutput_linearr|   rd   r}   r1   r2   ri     s   
zGraniteMoeHybridMLP.__init__r>   r@   c                 C   s<   |  |}|jddd}| |d |d  }| |}|S )Nr(   r'   r)   r   r   )rh  chunkr   ri  )r|   r>   chunked_hidden_statesr1   r1   r2   r     s
   

zGraniteMoeHybridMLP.forward)
rm   r   r   r   r   ri   r,   r   r   r   r1   r1   r}   r2   re  v  s    	re  c                   @   s@   e Zd ZU dZejed< ejed< eed< eed< ejed< dS )GraniteFlashAttentionKwargsa  
    Keyword arguments for advanced Flash Attention, causal-conv1d, and mamba_ssm kernel usage.
    Use cases include padding-free training and fewer `torch.compile` graph breaks.

    Attributes:
        cu_seq_lens_q (`torch.LongTensor`)
            Gets cumulative sequence length for query state.
        cu_seq_lens_k (`torch.LongTensor`)
            Gets cumulative sequence length for key state.
        max_length_q (`int`):
            Maximum sequence length for query state.
        max_length_k (`int`):
            Maximum sequence length for key state.
        seq_idx (`torch.IntTensor):
            Index of each packed sequence.
    cu_seq_lens_qcu_seq_lens_kmax_length_qmax_length_kr  N)	rm   r   r   r   r,   r   __annotations__r   rY  r1   r1   r1   r2   rm    s   
 

rm  F)totalc                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	GraniteMoeHybridRMSNormrZ  c                    s&   t    tt|| _|| _dS )zF
        GraniteMoeHybridRMSNorm is equivalent to T5LayerNorm
        Nr[  r\  r}   r1   r2   ri     s   

z GraniteMoeHybridRMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S r]  )	rQ   rZ   r,   rY   r_  r`  ra  r  r  )r|   r>   rb  rc  r1   r1   r2   r     s
   zGraniteMoeHybridRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)r   r  r+   r  )r|   r1   r1   r2   
extra_repr  s   z"GraniteMoeHybridRMSNorm.extra_reprrd  )rm   r   r   ri   r   ru  r   r1   r1   r}   r2   rt    s    rt  c                       s6   e Zd Zdedededdf fddZdd	 Z  ZS )
GraniteMoeHybridParallelExpertsnum_expertsrf  r/  r@   Nc                    s6   t    tt|||| _|| _|| _|| _	dS )a  
        Initialize the GraniteMoeHybridParallelExperts module.
        The experts weights are stored in [num_experts, output_size, input_size] format. Such that it's compatible with
        many MoE libraries, such as [Megablock](https://github.com/databricks/megablocks) and
        [ScatterMoE](https://github.com/shawntan/scattermoe), as well as the
        [MoE kernel](https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/fused_moe/fused_moe.py)
        used in vllm.

        Args:
            num_experts (int):
                Number of experts.
            input_size (int):
                Size of the input.
            output_size (int):
                Size of the output.
        N)
rh   ri   r   r   r,   emptyr  rw  rf  r/  )r|   rw  rf  r/  r}   r1   r2   ri     s
   

z(GraniteMoeHybridParallelExperts.__init__c                 C   sP   |j |dd}g }t| jD ]}|t|| | j|  qtj|dd}|S )a  
        Forward pass of the GraniteMoeHybridParallelExperts module.

        Args:
            inputs (Tensor):
                Input tensor.
            expert_size:
                Expert size information.

        Returns:
            Tensor: Output tensor.
        r   r)   )	r  r   rw  r   Flinearr  r,   r-   )r|   inputsexpert_size
input_listoutput_listr   resultsr1   r1   r2   r     s   z'GraniteMoeHybridParallelExperts.forwardrm   r   r   r   ri   r   r   r1   r1   r}   r2   rv    s    rv  c                       s2   e Zd Zdededef fddZdd Z  ZS )GraniteMoeHybridTopKGatingrf  rw  top_kc                    s2   t    || _|| _|| _tj||dd| _dS )a  
        Initialize the top-k gating mechanism.
        Args:
            input_size (`int`):
                Size of the input.
            num_experts (`int`):
                Number of experts.
            top_k (`int`):
                Number of top experts to select.
        Frf   N)rh   ri   rw  rf  r  r   ru   layer)r|   rf  rw  r  r}   r1   r2   ri     s
   
z#GraniteMoeHybridTopKGating.__init__c                 C   s   |  | }|j| jdd\}}tj|dd|}tj|d| j	g|j
|jd}|d|d}| d}| }| }	|	d\}
}|j| jdd}| }|| }|||||fS )Nr   r)   r   rQ   r   trunc)rounding_mode)r  r   topkr  r,   rX   type_asr   r   rw  rQ   r   scatterlongr3  tolistflattensortdiv)r|   r>   logitstop_k_logitstop_k_indicestop_k_gatesr   gatesr|  top_k_expertsr   index_sorted_expertsbatch_indexbatch_gatesr1   r1   r2   r     s   z"GraniteMoeHybridTopKGating.forwardr  r1   r1   r}   r2   r    s    r  c                       s.   e Zd ZdZdef fddZdd Z  ZS )GraniteMoeHybridMoEz
    A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.

    Args:
        config:
            Configuration object with model hyperparameters.
    rd   c                    sl   t    |j| _|j| _t|j | _t|j	| j| jd | _
t|j	| j| j| _t| j|j	|jd| _d S )Nr(   )rf  rw  r  )rh   ri   ro   rf  r   r   r   r   rv  num_local_expertsrh  ri  r  num_experts_per_tokrouterrj  r}   r1   r2   ri   #  s   
zGraniteMoeHybridMoE.__init__c                 C   s   |  \}}}|d|}| |\}}}}}	|| }
| |
|}|jddd}| |d |d  }| ||}||dddf  }tj|| | j	f|j
|jd}|d||}|||| j	}||	fS )a  
        Forward pass of the mixture of experts layer.

        Args:
            layer_input (Tensor):
                Input tensor.

        Returns:
            Tensor:
                Output tensor.
            Tensor:
                Router logits.
        r'   r(   r)   r   r   Nr  )r   rB   r  rh  rk  r   ri  r,   r   rf  rQ   r   	index_addr   )r|   layer_inputr   lengthemb_sizer   r  r  r|  router_logitsexpert_inputsr>   rl  expert_outputsr   layer_outputr1   r1   r2   r   6  s   zGraniteMoeHybridMoE.forward)rm   r   r   r   r   ri   r   r   r1   r1   r}   r2   r    s    r  c                       s   e Zd Zdedef fddZedddd					
	
			
		ddejde	ej de	e
 de	e de	e de	ej de	e de	eejejf  dee deeje	eejejf  f fddZ  ZS )GraniteMoeHybridDecoderLayerrd   re   c                    s   t    |j| _d | _|jdkrt|| _t|j|jd| _	t|j|jd| _
|j| _t|| _d | _|j| dkrBt||| _nt||| _|j| | _t|dddk| _d S )Nr   r   r   r  )rh   ri   ro   	self_attnr  r  block_sparse_moert  r   input_layernormpost_attention_layernormresidual_multiplierre  
shared_mlpr   r   r   rc   
layer_typegetattrhas_expertsr{   r}   r1   r2   ri   W  s   



z%GraniteMoeHybridDecoderLayer.__init__r~   r   r   r   NFr>   rM   output_attentionsr   r   output_router_logitsr   r\   r@   c	              
   K   s   |}
|  |}| jdur| jd||||d|	}d}n| jd|||||||d|	\}}|
|| j  }|}
| |}| jrN| |\}}|| | }n| |}d}|
|| j  }|f}|rf||f7 }|rm||f7 }|S )a0  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            past_key_values (`Cache`, *optional*): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            output_router_logits (`bool`, *optional*):
                Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
                should not be returned during inference.
            position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
            kwargs (`dict`, *optional*):
                Arbitrary kwargs.Can be used to provide `GraniteFlashAttentionKwargs` for
                padding-free training and/or improve torch.compile performance.
        N)r>   r   r  rM   )r>   rM   r   r  r   r   r   r1   )r  r   r  r  r  r  r  r  )r|   r>   rM   r   r  r   r   r  r   r\   residualself_attn_weightsmoe_hidden_statesr  outputsr1   r1   r2   r   n  sL   &






z$GraniteMoeHybridDecoderLayer.forward)NNFFNFN)rm   r   r   r   r   ri   r   r,   r   r   r
   r   r   r   r   rm  FloatTensorr   r   r1   r1   r}   r2   r  V  s>    	
r  c                       sJ   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZ fddZ  ZS )	GraniteMoeHybridPreTrainedModelrd   modelTr  r   Fc                    s   t  | t|tr|jjjd| jjd t|t	r9|j
jd ttd|jd |j_|jjd d S t|trG|jjd d S d S )NrH   )r`  stdg      ?r   )rh   _init_weights
isinstancerv  r  datanormal_rd   initializer_ranger   r   fill_r,   r   r   rq   r   r  r   )r|   rI   r}   r1   r2   r    s   


z-GraniteMoeHybridPreTrainedModel._init_weights)rm   r   r   r   rr  base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_can_compile_fullgraph_is_statefulr  r   r1   r1   r}   r2   r    s   
 r  c                       sD   e Zd ZU ejed< ddef fddZe e	dd Z
  ZS )	GraniteMoeHybridRotaryEmbeddinginv_freqNrd   c                    s   t    t|drt|jtr|jd|jd| _nd| _|j| _	|j| _
|| _t| j | _| | j|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typerW  defaultr  F)
persistent)rh   ri   hasattrr  r  r   getr  max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenrd   r   rope_init_fnattention_scalingregister_bufferr  original_inv_freq)r|   rd   r   r  r}   r1   r2   ri     s   
z(GraniteMoeHybridRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r'   r   mpscpuF)device_typeenabledr(   r)   r	  )r  r   rA   r+   rZ   r   r  rW  r   r,   autocastrV   r-   r7   r  r8   rQ   )
r|   r.   r9   inv_freq_expandedposition_ids_expandedr  freqsembr7   r8   r1   r1   r2   r     s   0&z'GraniteMoeHybridRotaryEmbedding.forwardr   )rm   r   r   r,   r   rr  r   ri   no_gradr   r   r   r1   r1   r}   r2   r    s   
 
r  c                       s4  e Zd Zdef fddZee											d!deej	 deej
 deej	 deeeeej f  d	eej d
ee dee dee dee dee deej	 dee deeef fddZ	d"deej
df dej
dej
dedef
ddZedej
dededejdej
defddZdd  Z  ZS )#GraniteMoeHybridModelrd   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _d| _ j| _ j| _ j| _| j| j | _ j| _ j| _ j| _| jdkr]t nd | _|   d S )Nc                    s   g | ]}t  |qS r1   )r  )r   re   rd   r1   r2   r         z2GraniteMoeHybridModel.__init__.<locals>.<listcomp>r   Frope)rh   ri   pad_token_idpadding_idx
vocab_sizer   	Embeddingro   embed_tokens
ModuleListr   r   layersrt  r   r  gradient_checkpointingembedding_multiplierrp   rq   rF   r  
rope_thetaposition_embedding_typer  
rotary_emb	post_initrj  r}   r  r2   ri     s$   zGraniteMoeHybridModel.__init__N	input_idsrM   r9   r   inputs_embedsr   r  output_hidden_statesr  return_dictr   r\   r@   c                 K   s2  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|
d ur$|
n| j j}
|d u |d uA r4td| jrC| jrC|rCt	d d}|d u rL| 
|}|| j }|r\|d u r\t	d |d u rx|d urh| nd}tj|||jd  |jd}|d u r|d}| |||||}| ||}|}d }| jd ur| ||}|rdnd }|rdnd }|	rdnd }| jD ]D}|jd	kr|n|}|r||f7 }||f||||||	|d
|}|d }|r|d d ur||d f7 }|	r|d d ur||d f7 }q| |}|r||f7 }|r|jsd|_t|||||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzGraniteMoeHybrid requires an initialized `HybridMambaAttentionDynamicCache` to return a cache. Because one was not provided, no cache will be returned.r   r   r   r1   r   )rM   r   r  r   r   r  r   r'   T)last_hidden_stater   r>   
attentionsr  )rd   r  r  r   use_return_dictrt   r  rS   rj   rk   r  r  r   r,   r   r+   r   r4   _update_causal_mask_update_mamba_maskr  r  r  r  r   r   )r|   r  rM   r9   r   r  r   r  r  r  r  r   r\   past_seen_tokensr`   
mamba_maskr>   r   all_hidden_statesall_self_attnsall_router_logitsdecoder_layer
layer_masklayer_outputsr1   r1   r2   r     s   






	

zGraniteMoeHybridModel.forwardFr%   r   c                 C   s:  | j jdkr|d ur|dk r|S d S | j jdkr&t|tjr$t|}|S |d ur.| nd}|d ur7|jnd}| j jdkrO|sO|sOt	j
|||| jdrOd S |j}|jd }	|r^| }
nt|tjri|jd	 n||	 d }
| j||	|
|||jd d
}| j jdkr|d ur|jjdv r|st|j}t	||}|S )Nflash_attention_2rH   flex_attentionr   Fsdpa)r  past_key_values_lengthis_trainingr   r'   )sequence_lengthtarget_lengthrQ   r   r   )rV  xpunpu)rd   r   anyr  r,   r   r&   r   r   r   _ignore_causal_mask_sdparS   rQ   r+   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr   rW  finfomin_unmask_unattended)r|   rM   r   r   r   r  r  using_compilable_cacherQ   r  r  r`   	min_dtyper1   r1   r2   r    sT   




z)GraniteMoeHybridModel._update_causal_maskr  r  rQ   r   c                 K   sD  | dur|   dkr| }|S t|j}tj||f|||jd}|dkr+tj|dd}|tj||jd|ddk9 }|ddddddf 	|ddd}| dur|
 }| jd }	|ddddddd|	f | ddddddf |j }
|
dk}
|ddddddd|	f |
||ddddddd|	f< |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        Nr   )
fill_valuerQ   r   r   r   r   r'   r   )r*   r,   r  r  fullr   triur   rB   rA   cloner+   rZ   r   )rM   r  r  rQ   r   r   r\   r`   r  mask_lengthpadding_maskr1   r1   r2   r    s,    $
6  zKGraniteMoeHybridModel._prepare_4d_causal_attention_mask_with_cache_positionc                 C   s.   |}|d dks|durt |dkrd}|S )zv
        No need for zeroing states when
            1. Cached forward
            2. Attending to all inputs
        r   Nr   )r,   all)r|   rM   r   r  r1   r1   r2   r    s   "z(GraniteMoeHybridModel._update_mamba_mask)NNNNNNNNNNN)F)rm   r   r   r   ri   r   r   r   r,   r   r   r   r
   listr  r   r   rm  r   r   r   r  staticmethodr   rQ   r  r  r   r1   r1   r}   r2   r    s    	

{
D6r  r(   gate_logitsrw  c                    s  | du s	t | tsdS t | tr#| d j tj fdd| D dd}tjjj|dd}tj||dd\}}tjj	||}|du rStj
| dd}	tj
|dd}
nm|j\}}|jd ||  }|dddddddf |||||fd|| }tj| | ddtj|dd }	|ddddddf ||||jd fd|jd  }tj|| ddtj|dd }
|jjdur|jjnd}|jd t| }t|	dd|||jd  f |
d }|| S )a  
    Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.

    See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
    function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
    experts is too unbalanced.

    Args:
        gate_logits:
            Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
            shape [batch_size X sequence_length, num_experts].
        num_experts:
            Number of experts
        top_k:
            The number of experts to route per-token, can be also interpreted as the `top-k` routing
            parameter.
        attention_mask (`torch.Tensor`, *optional*):
            The attention_mask used in forward function
            shape [batch_size X sequence_length] if not None.

    Returns:
        The auxiliary loss.
    Nr   c                    s   g | ]}|  qS r1   )rZ   )r   
layer_gatecompute_devicer1   r2   r   ?  r  z,load_balancing_loss_func.<locals>.<listcomp>r)   r'   r   )r  r   r   r,   r-   r   rW   rX   r  one_hotr`  r   r+   rA   rB   rZ   r3  indexr   r4   )r  rw  r  rM   concatenated_gate_logitsrouting_weightsr   selected_expertsexpert_masktokens_per_expertrouter_prob_per_expertr   r  r   expert_attention_mask router_per_expert_attention_maskdevice_indexrankoverall_lossr1   r  r2   load_balancing_loss_func  sF   



&r-  c                        s   e Zd ZdgZdef fddZe													ddeej	 deej
 d	eej	 d
eeeeej f  deej deej	 dee dee dee dee dee deej	 deeej
f deeef fddZ						dddZ  ZS )GraniteMoeHybridForCausalLMzlm_head.weightrd   c                    sX   t  | t|| _|j| _tj|j|jdd| _|j	| _	|j
| _|j| _|   d S )NFrf   )rh   ri   r  r  r  r   ru   ro   lm_headrouter_aux_loss_coefr  rw  r  r  rj  r}   r1   r2   ri   v  s   
z$GraniteMoeHybridForCausalLM.__init__Nr   r  rM   r9   r   r  labelsr   r  r  r  r  r   logits_to_keepr@   c                 K   s  |dur|n| j j}|
dur|
n| j j}
|	dur|	n| j j}	|dur$|n| j j}| jd||||||||	|
||d|}|d }t|trKt| dn|}| 	|dd|ddf }|| j j
 }d}|dury| }| j||fd| j ji|}d}|
rt|r|jn|d | j| j|}|dur|| j||j 7 }|s|f|dd  }|
r|f| }|dur|f| S |S t||||j|j|j|jdS )	ax  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, GraniteMoeHybridForCausalLM

        >>> model = GraniteMoeHybridForCausalLM.from_pretrained("ibm/PowerMoE-3b")
        >>> tokenizer = AutoTokenizer.from_pretrained("ibm/PowerMoE-3b")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)r  rM   r9   r   r  r   r  r  r  r  r   r   r  r'   r   )lossaux_lossr  r   r>   r  r  r1   )rd   r  r  r  r  r  r  r   slicer/  logits_scalingr   loss_functionr  r-  r  rw  r  r0  rZ   r   r   r   r>   r  )r|   r  rM   r9   r   r  r1  r   r  r  r  r  r   r2  r\   r  r>   slice_indicesr  r3  r4  outputr1   r1   r2   r     sx   (
z#GraniteMoeHybridForCausalLM.forwardTc                 K   s:  |d u }	|	s5|d us|d |j d kr"|d d |j d  d f }n#|j d |j d kr4|d d |f }n|rEt| j|j d | j| jd}|d urn|d u rn| dd }||dkd |	sn|d d |j d  d f }|d ury|	ryd|i}
nd| i}
|
	|||||d |
 D ]\}}||
vr||
|< q|
S )Nr'   r   r   r   r  r  )r9   r   r   rM   r   )r+   r   rd   rQ   r   r  r   masked_fill_r[   r   items)r|   r  r   rM   r  r   r9   r   r\   empty_past_kvmodel_inputsrK   rL   r1   r1   r2   prepare_inputs_for_generation  sB   
z9GraniteMoeHybridForCausalLM.prepare_inputs_for_generation)NNNNNNNNNNNNr   )NNNNNT)rm   r   r   _tied_weights_keysr   ri   r   r   r,   r   r   r   r
   r  r  r   r   r   r   r   r>  r   r1   r1   r}   r2   r.  s  sj    	

pr.  )r.  r  r  )Nr   )rH   )Nr(   N)\typingr   r   r   r   r   r,   torch.nn.functionalr   rW   ry  transformers.activationsr   cache_utilsr
   
generationr   modeling_attn_mask_utilsr   modeling_layersr   modeling_outputsr   r   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   utils.deprecationr   utils.import_utilsr   r   configuration_granitemoehybridr   +mamba_ssm.ops.triton.selective_state_updater   !mamba_ssm.ops.triton.ssd_combinedr    r!   causal_conv1dr"   r#   !torch.nn.attention.flex_attentionr%   integrations.flex_attentionr&   
get_loggerrm   rj   r3   r=   r   r   rG   Moduler   rb   rc   r   r   r   r   r  r  r   r   r   re  rm  rt  rv  r  r  r  r  r  r  r   r-  r.  __all__r1   r1   r1   r2   <module>   s   


W`   b-0<q$  
V ?