o
    wi                     @   sv  d dl mZmZmZ d dlZd dlm  mZ d dlmZ ddl	m
Z
 ddlmZmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZmZ ddlmZmZ ddlmZmZ ddlmZm Z m!Z! ddl"m#Z# e  rvd dl$m%Z% ddl&m'Z' e!(e)Z*G dd dej+Z,G dd dej+Z-G dd dej+Z.G dd dej+Z/G dd dej+Z0dd Z1dAddZ2d ej3d!e4d"ej3fd#d$Z5	%dBd&ej+d'ej3d(ej3d)ej3d*eej3 d+e6d,e6fd-d.Z7G d/d0 d0ej+Z8G d1d2 d2eZ9eG d3d4 d4eZ:G d5d6 d6ej+Z;eG d7d8 d8e:Z<		9	dCd:eej3e=ej3 df d;ee4 d*eej3 d"eej3e4f fd<d=Z>G d>d? d?e:eZ?g d@Z@dS )D    )CallableOptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter)GradientCheckpointingLayer)BaseModelOutputWithPastMoeCausalLMOutputWithPastMoeModelOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)auto_docstringis_torch_flex_attn_availablelogging   )GraniteMoeSharedConfig)	BlockMask)make_flex_block_causal_maskc                       s<   e Zd ZdZdef fddZdejdejfddZ  Z	S )	GraniteMoeSharedMLPz~
    MLP layer for shared experts

    Args:
        config:
            Configuration object with model hyperparameters.
    configc                    sZ   t    |j| _|j| _t|j | _tj	| j| jd dd| _
tj	| j| jdd| _d S )N   Fbias)super__init__hidden_size
input_sizeshared_intermediate_sizer   
hidden_act
activationr   Linearinput_linearoutput_linearselfr   	__class__ {/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/transformers/models/granitemoeshared/modeling_granitemoeshared.pyr!   :   s   
zGraniteMoeSharedMLP.__init__hidden_statesreturnc                 C   s<   |  |}|jddd}| |d |d  }| |}|S )Nr   dimr   r   )r(   chunkr&   r)   )r+   r0   chunked_hidden_statesr.   r.   r/   forwardC   s
   

zGraniteMoeSharedMLP.forward)
__name__
__module____qualname____doc__r   r!   torchTensorr7   __classcell__r.   r.   r,   r/   r   1   s    	r   c                       s.   e Zd Zd fdd	Zdd Zdd Z  ZS )	GraniteMoeSharedRMSNormư>c                    s&   t    tt|| _|| _dS )zF
        GraniteMoeSharedRMSNorm is equivalent to T5LayerNorm
        N)r    r!   r   	Parameterr<   onesweightvariance_epsilon)r+   r"   epsr,   r.   r/   r!   L   s   

z GraniteMoeSharedRMSNorm.__init__c                 C   sJ   |j }|tj}|djddd}|t|| j  }| j|| S )Nr   r2   T)keepdim)	dtypetor<   float32powmeanrsqrtrD   rC   )r+   r0   input_dtypevariancer.   r.   r/   r7   T   s
   zGraniteMoeSharedRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tuplerC   shaperD   r+   r.   r.   r/   
extra_repr[   s   z"GraniteMoeSharedRMSNorm.extra_repr)r@   )r8   r9   r:   r!   r7   rR   r>   r.   r.   r,   r/   r?   K   s    r?   c                       s6   e Zd Zdedededdf fddZdd	 Z  ZS )
GraniteMoeSharedParallelExpertsnum_expertsr#   output_sizer1   Nc                    s6   t    tt|||| _|| _|| _|| _	dS )a  
        Initialize the GraniteMoeSharedParallelExperts module.
        The experts weights are stored in [num_experts, output_size, input_size] format. Such that it's compatible with
        many MoE libraries, such as [Megablock](https://github.com/databricks/megablocks) and
        [ScatterMoE](https://github.com/shawntan/scattermoe), as well as the
        [MoE kernel](https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/fused_moe/fused_moe.py)
        used in vllm.

        Args:
            num_experts (int):
                Number of experts.
            input_size (int):
                Size of the input.
            output_size (int):
                Size of the output.
        N)
r    r!   r   rA   r<   emptyrC   rT   r#   rU   )r+   rT   r#   rU   r,   r.   r/   r!   `   s
   

z(GraniteMoeSharedParallelExperts.__init__c                 C   sP   |j |dd}g }t| jD ]}|t|| | j|  qtj|dd}|S )a  
        Forward pass of the GraniteMoeSharedParallelExperts module.

        Args:
            inputs (Tensor):
                Input tensor.
            expert_size:
                Expert size information.

        Returns:
            Tensor: Output tensor.
        r   r3   )	splitrangerT   appendFlinearrC   r<   cat)r+   inputsexpert_size
input_listoutput_listiresultsr.   r.   r/   r7   w   s   z'GraniteMoeSharedParallelExperts.forwardr8   r9   r:   intr!   r7   r>   r.   r.   r,   r/   rS   _   s    rS   c                       s2   e Zd Zdededef fddZdd Z  ZS )GraniteMoeSharedTopKGatingr#   rT   top_kc                    s2   t    || _|| _|| _tj||dd| _dS )a  
        Initialize the top-k gating mechanism.
        Args:
            input_size (`int`):
                Size of the input.
            num_experts (`int`):
                Number of experts.
            top_k (`int`):
                Number of top experts to select.
        Fr   N)r    r!   rT   r#   rf   r   r'   layer)r+   r#   rT   rf   r,   r.   r/   r!      s
   
z#GraniteMoeSharedTopKGating.__init__c                 C   s   |  | }|j| jdd\}}tj|dd|}tj|d| j	g|j
|jd}|d|d}| d}| }| }	|	d\}
}|j| jdd}| }|| }|||||fS )Nr   r3   r   rG   devicetrunc)rounding_mode)rg   floattopkrf   r<   softmaxtype_aszerossizerT   rG   ri   scatterlongsumtolistflattensortdiv)r+   r0   logitstop_k_logitstop_k_indicestop_k_gatesrp   gatesr^   top_k_experts_index_sorted_expertsbatch_indexbatch_gatesr.   r.   r/   r7      s   z"GraniteMoeSharedTopKGating.forwardrc   r.   r.   r,   r/   re      s    re   c                       s.   e Zd ZdZdef fddZdd Z  ZS )GraniteMoeSharedMoEz
    A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.

    Args:
        config:
            Configuration object with model hyperparameters.
    r   c                    sl   t    |j| _|j| _t|j | _t|j	| j| jd | _
t|j	| j| j| _t| j|j	|jd| _d S )Nr   )r#   rT   rf   )r    r!   r"   r#   intermediate_sizer   r%   r&   rS   num_local_expertsr(   r)   re   num_experts_per_tokrouterr*   r,   r.   r/   r!      s   
zGraniteMoeSharedMoE.__init__c                 C   s   |  \}}}|d|}| |\}}}}}	|| }
| |
|}|jddd}| |d |d  }| ||}||dddf  }tj|| | j	f|j
|jd}|d||}|||| j	}||	fS )a  
        Forward pass of the mixture of experts layer.

        Args:
            layer_input (Tensor):
                Input tensor.

        Returns:
            Tensor:
                Output tensor.
            Tensor:
                Router logits.
        r2   r   r3   r   r   Nrh   )rq   reshaper   r(   r5   r&   r)   r<   rp   r#   rG   ri   	index_addview)r+   layer_inputbszlengthemb_sizer   r   r   r^   router_logitsexpert_inputsr0   r6   expert_outputsrp   layer_outputr.   r.   r/   r7      s   zGraniteMoeSharedMoE.forward)r8   r9   r:   r;   r   r!   r7   r>   r.   r.   r,   r/   r      s    r   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..Nr2   r   r3   )rP   r<   r\   )xx1x2r.   r.   r/   rotate_half   s   r   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer   )qkcossinposition_idsunsqueeze_dimq_embedk_embedr.   r.   r/   apply_rotary_pos_emb   s
   

r   r0   n_repr1   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)rP   expandr   )r0   r   batchnum_key_value_headsslenhead_dimr.   r.   r/   	repeat_kv  s
   0r           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr   r   r2   )r4   rG   )ptrainingr   )r   num_key_value_groupsr<   matmul	transposerP   r   
functionalrn   rI   rH   rG   r   r   
contiguous)r   r   r   r   r   r   r   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr.   r.   r/   eager_attention_forward&  s   
&r   c                       s   e Zd ZdZddedee f fddZ						ddej	d	eej	 d
eej
 dee dedeej
 deeej	ej	f  deej	eej	 eeej	  f fddZ  ZS )GraniteMoeSharedAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNr   	layer_idxc                    s   t    || _|| _|d u rtd| jj d |j| _|j	| _	|j
| _| j	| j | _|j| _| j| j | _d| _|j| _| j| j | j	krUtd| j	 d| j dtj| j	| j| j |jd| _tj| j	| j| j |jd| _tj| j	| j| j |jd| _tj| j	| j	|jd| _d S )NzInstantiating z without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.Tz?hidden_size must be divisible by num_heads (got `hidden_size`: z and `num_heads`: z).r   )r    r!   r   r   loggerwarning_oncer-   r8   attention_dropoutr"   num_attention_heads	num_headsr   r   r   	is_causalattention_multiplierr   
ValueErrorr   r'   attention_biasq_projk_projv_projo_projr+   r   r   r,   r.   r/   r!   F  s2   

z"GraniteMoeSharedAttention.__init__Fr0   r   r   past_key_value	use_cachecache_positionposition_embeddingsr1   c                 K   sF  |  \}	}
}| |}| |}| |}||	|
| j| jdd}||	|
| j| jdd}||	|
| j| jdd}|d urF|nd\}}|d urWt	||||\}}|d url|||d}|
||| j|\}}t}| jjdkrzt| jj }|| ||||f| jsdn| j| jd|\}}||	|
d}| |}|||fS )	Nr   r   )NN)r   r   r   eagerr   )r   r   r2   )rq   r   r   r   r   r   r   r   r   r   updater   r   r   _attn_implementationr   r   r   r   r   )r+   r0   r   r   r   r   r   r   r   r   q_lenr   query_statesr   r   r   r   cache_kwargsattention_interfacer   r   r.   r.   r/   r7   f  s>   





z!GraniteMoeSharedAttention.forwardN)NNNFNN)r8   r9   r:   r;   r   r   rd   r!   r<   r=   
LongTensorr   boolrO   r7   r>   r.   r.   r,   r/   r   C  s4    #
r   c                       s   e Zd Zdedef fddZ								ddejdeej d	eej	 d
ee
 dee dee deej	 dee deeejejf  deejeeejejf  f fddZ  ZS )GraniteMoeSharedDecoderLayerr   r   c                    s   t    |j| _t||d| _|jdkrt|| _t|j|j	d| _
t|j|j	d| _|j| _|jdkr:d | _d S t|| _d S )N)r   r   r   rE   )r    r!   r"   r   	self_attnr   r   block_sparse_moer?   rms_norm_epsinput_layernormpost_attention_layernormresidual_multiplierr$   r   
shared_mlpr   r,   r.   r/   r!     s   


"z%GraniteMoeSharedDecoderLayer.__init__NFr0   r   r   r   output_attentionsr   r   output_router_logitsr   r1   c
                 K   s   |}|  |}| jd||||||||	d|
\}}}||| j  }|}| |}| |\}}| jdu r8|}n|| | }||| j  }|f}|rP||f7 }|rW||f7 }|r^||f7 }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            output_router_logits (`bool`, *optional*):
                Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
                should not be returned during inference.
            position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
            kwargs (`dict`, *optional*):
                Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
                into the model
        )r0   r   r   r   r   r   r   r   Nr.   )r   r   r   r   r   r   )r+   r0   r   r   r   r   r   r   r   r   r   residualself_attn_weightspresent_key_valuemoe_hidden_statesr   outputsr.   r.   r/   r7     s<   &
	




z$GraniteMoeSharedDecoderLayer.forward)NNNFFNFN)r8   r9   r:   r   rd   r!   r<   r=   r   r   r   r   rO   FloatTensorr7   r>   r.   r.   r,   r/   r     s>    	
r   c                   @   s@   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdd ZdS )	GraniteMoeSharedPreTrainedModelmodelTr   past_key_valuesFc                 C   s   t |tjr |jjjd| jjd |jd ur|jj	  d S d S t |tj
rC|jjjd| jjd |jd urA|jj|j 	  d S d S t |trQ|jjd d S t |trc|jjjd| jjd d S d S )Nr   )rK   stdg      ?)
isinstancer   r'   rC   datanormal_r   initializer_ranger   zero_	Embeddingpadding_idxr?   fill_rS   )r+   r   r.   r.   r/   _init_weights  s   



z-GraniteMoeSharedPreTrainedModel._init_weightsN)r8   r9   r:   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_2_supports_sdpa_supports_cache_class_supports_quantized_cache_supports_static_cacher   r.   r.   r.   r/   r     s    r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )GraniteMoeSharedRotaryEmbeddingNr   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r    r!   hasattrr  getr  max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr   r   rope_init_fnattention_scalingregister_bufferr
  original_inv_freq)r+   r   ri   r
  r,   r.   r/   r!     s   
z(GraniteMoeSharedRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r2   r   mpscpuF)device_typeenabledr   r3   )rG   )r
  rl   r   rP   rH   ri   r   r  strr<   autocastr   r\   r   r  r   rG   )
r+   r   r   inv_freq_expandedposition_ids_expandedr  freqsembr   r   r.   r.   r/   r7   (  s   0&z'GraniteMoeSharedRotaryEmbedding.forwardr   )
r8   r9   r:   r   r!   r<   no_gradr   r7   r>   r.   r.   r,   r/   r    s
    r  c                       s0  e Zd Zdef fddZdd Zdd Ze											d"d	ee	j
 d
ee	j dee	j
 deeeee	j f  dee	j dee dee dee dee dee dee	j
 deeef fddZ	d#d
ee	jdf de	jde	jdedef
ddZed
e	jdedede	jde	jdefd d!Z  ZS )$GraniteMoeSharedModelr   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _d| _ j| _ j| _ j| _| j| j | _ j| _ j| _ j| _| jdkr]t nd | _|   d S )Nc                    s   g | ]}t  |qS r.   )r   ).0r   r   r.   r/   
<listcomp>A      z2GraniteMoeSharedModel.__init__.<locals>.<listcomp>r   Frope)r    r!   pad_token_idr   
vocab_sizer   r   r"   embed_tokens
ModuleListrX   num_hidden_layerslayersr?   r   normgradient_checkpointingembedding_multiplierr   r   r   r  
rope_thetaposition_embedding_typer  
rotary_emb	post_initr*   r,   r"  r/   r!   :  s$   zGraniteMoeSharedModel.__init__c                 C      | j S r   r(  rQ   r.   r.   r/   get_input_embeddingsS     z*GraniteMoeSharedModel.get_input_embeddingsc                 C   
   || _ d S r   r4  r+   r   r.   r.   r/   set_input_embeddingsV     
z*GraniteMoeSharedModel.set_input_embeddingsN	input_idsr   r   r   inputs_embedsr   r   output_hidden_statesr   return_dictr   r1   c                 C   sH  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|
d ur$|
n| j j}
|d u |d uA r4td| jrC| jrC|rCt	d d}|d u rL| 
|}|| j }d}|rft|tsfd}t|}t	d |d u r|d urr| nd}tj|||jd  |jd}|d u r|d}| |||||}|}d }| jd ur| ||}|rd	nd }|rd	nd }|	rd	nd }d }| jD ]6}|r||f7 }|||||||||	|d
	}|d }|r||rdnd }|r||d f7 }|	r||d f7 }q| |}|r||f7 }|r|nd }|r| }|
stdd ||||fD S t|||||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FTzWe detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)r   r   ri   r.   )r   r   r   r   r   r   r   r   r   r2   c                 s   s    | ]	}|d ur|V  qd S r   r.   )r!  vr.   r.   r/   	<genexpr>  s    z0GraniteMoeSharedModel.forward.<locals>.<genexpr>)last_hidden_stater   r0   
attentionsr   )r   r   r=  r   use_return_dictr   r-  r   r   r   r(  r.  r   r   r	   from_legacy_cacheget_seq_lengthr<   arangerP   ri   r   _update_causal_maskr1  r+  r,  to_legacy_cacherO   r   )r+   r;  r   r   r   r<  r   r   r=  r   r>  r   return_legacy_cachepast_seen_tokensr   r0   r   all_hidden_statesall_self_attnsall_router_logitsnext_decoder_cachedecoder_layerlayer_outputs
next_cacher.   r.   r/   r7   Y  s   









zGraniteMoeSharedModel.forwardFr   input_tensorc                 C   s:  | j jdkr|d ur|dk r|S d S | j jdkr&t|tjr$t|}|S |d ur.| nd}|d ur7|jnd}| j jdkrO|sO|sOt	j
|||| jdrOd S |j}|jd }	|r^| }
nt|tjri|jd	 n||	 d }
| j||	|
|||jd d
}| j jdkr|d ur|jjdv r|st|j}t	||}|S )Nflash_attention_2r   flex_attentionr   Fsdpa)r<  past_key_values_lengthis_trainingr   r2   )sequence_lengthtarget_lengthrG   r   
batch_size)cudaxpunpu)r   r   anyr   r<   r=   r   rF  is_compileabler   _ignore_causal_mask_sdpar   rG   rP   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionri   r  finfomin_unmask_unattended)r+   r   rS  r   r   r   rK  using_compilable_cacherG   rY  rZ  r   	min_dtyper.   r.   r/   rH    sT   




z)GraniteMoeSharedModel._update_causal_maskrY  rZ  rG   r[  c                 K   sD  | dur|   dkr| }|S t|j}tj||f|||jd}|dkr+tj|dd}|tj||jd|ddk9 }|ddddddf 	|ddd}| dur|
 }| jd }	|ddddddd|	f | ddddddf |j }
|
dk}
|ddddddd|	f |
||ddddddd|	f< |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )
fill_valuerG   ri   r   )diagonalr?  r2   r   )r4   r<   rd  re  fullri   triurG  r   r   clonerP   rH   masked_fill)r   rY  rZ  rG   r   r[  r   r   rh  mask_lengthpadding_maskr.   r.   r/   rc    s,    $
6  zKGraniteMoeSharedModel._prepare_4d_causal_attention_mask_with_cache_position)NNNNNNNNNNN)F)r8   r9   r:   r   r!   r5  r9  r   r   r<   r   r=   r   r   listr   r   rO   r   r7   rH  staticmethodrd   rG   rc  r>   r.   r.   r,   r/   r   8  s    	

{
Dr   r   gate_logitsrT   c                    s  | du s	t | tsdS t | tr#| d j tj fdd| D dd}tjjj|dd}tj||dd\}}tjj	||}|du rStj
| dd}	tj
|dd}
ng|j\}}|jd ||  }|dddddddf |||||fd|| }tj| | ddtj|dd }	|ddddddf ||||fd| }tj|| ddtj|dd }
t|	|
d }|| S )a  
    Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.

    See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
    function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
    experts is too unbalanced.

    Args:
        gate_logits:
            Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
            shape [batch_size X sequence_length, num_experts].
        num_experts:
            Number of experts
        top_k:
            The number of experts to route per-token, can be also interpreted as the `top-k` routing
            parameter.
        attention_mask (`torch.Tensor`, *optional*):
            The attention_mask used in forward function
            shape [batch_size X sequence_length] if not None.

    Returns:
        The auxiliary loss.
    Nr   c                    s   g | ]}|  qS r.   )rH   )r!  
layer_gatecompute_devicer.   r/   r#  m  r$  z,load_balancing_loss_func.<locals>.<listcomp>r3   r2   )r   rO   ri   r<   r\   r   r   rn   rm   one_hotrK   rl   rP   r   r   rH   rt   r   )rt  rT   rf   r   concatenated_gate_logitsrouting_weightsr   selected_expertsexpert_masktokens_per_expertrouter_prob_per_expertr[  rY  r*  expert_attention_mask router_per_expert_attention_maskoverall_lossr.   rv  r/   load_balancing_loss_funcK  s>   



r  c                        s"  e Zd ZdgZdef fddZdd Zdd Zd	d
 Zdd Z	dd Z
dd Ze													d%deej deej deej deeeeej f  deej deej dee dee dee dee dee deej deeejf d eeef fd!d"Zed#d$ Z  ZS )&GraniteMoeSharedForCausalLMzlm_head.weightr   c                    sX   t  | t|| _|j| _tj|j|jdd| _|j	| _	|j
| _|j| _|   d S )NFr   )r    r!   r   r   r'  r   r'   r"   lm_headrouter_aux_loss_coefr   rT   r   r2  r*   r,   r.   r/   r!     s   
z$GraniteMoeSharedForCausalLM.__init__c                 C   s   | j jS r   r   r(  rQ   r.   r.   r/   r5    s   z0GraniteMoeSharedForCausalLM.get_input_embeddingsc                 C   s   || j _d S r   r  r8  r.   r.   r/   r9    s   z0GraniteMoeSharedForCausalLM.set_input_embeddingsc                 C   r3  r   r  rQ   r.   r.   r/   get_output_embeddings  r6  z1GraniteMoeSharedForCausalLM.get_output_embeddingsc                 C   r7  r   r  )r+   new_embeddingsr.   r.   r/   set_output_embeddings  r:  z1GraniteMoeSharedForCausalLM.set_output_embeddingsc                 C   r7  r   r   )r+   decoderr.   r.   r/   set_decoder  r:  z'GraniteMoeSharedForCausalLM.set_decoderc                 C   r3  r   r  rQ   r.   r.   r/   get_decoder  r6  z'GraniteMoeSharedForCausalLM.get_decoderNr   r;  r   r   r   r<  labelsr   r   r=  r   r>  r   logits_to_keepr1   c                 K   s  |dur|n| j j}|
dur|
n| j j}
|	dur|	n| j j}	|dur$|n| j j}| j||||||||	|
||d}|d }t|trGt| dn|}| 	|dd|ddf }|| j j
 }d}|duru| }| j||fd| j ji|}d}|
rt|r|jn|d | j| j|}|dur|| j||j 7 }|s|f|dd  }|
r|f| }|dur|f| S |S t||||j|j|j|jdS )ax  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, GraniteMoeSharedForCausalLM

        >>> model = GraniteMoeSharedForCausalLM.from_pretrained("ibm/PowerMoE-3b")
        >>> tokenizer = AutoTokenizer.from_pretrained("ibm/PowerMoE-3b")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)r;  r   r   r   r<  r   r   r=  r   r>  r   r   r'  r2   r   )lossaux_lossry   r   r0   rC  r   )r   r   r   r=  rD  r   r   rd   slicer  logits_scalingrl   loss_functionr'  r  r   rT   r   r  rH   ri   r   r   r0   rC  )r+   r;  r   r   r   r<  r  r   r   r=  r   r>  r   r  r   r   r0   slice_indicesry   r  r  outputr.   r.   r/   r7     st   (
z#GraniteMoeSharedForCausalLM.forwardc                    s.   d}| D ]}|t  fdd|D f7 }q|S )Nr.   c                 3   s$    | ]}| d  |jV  qdS )r   N)index_selectrH   ri   )r!  
past_statebeam_idxr.   r/   rA  1  s   " z=GraniteMoeSharedForCausalLM._reorder_cache.<locals>.<genexpr>)rO   )r   r  reordered_past
layer_pastr.   r  r/   _reorder_cache,  s   z*GraniteMoeSharedForCausalLM._reorder_cache)NNNNNNNNNNNNr   )r8   r9   r:   _tied_weights_keysr   r!   r5  r9  r  r  r  r  r   r   r<   r   r=   r   r   rr  r   r   rd   rO   r   r7   rs  r  r>   r.   r.   r,   r/   r    sl    	

lr  )r  r   r   )Nr   )r   )Nr   N)Atypingr   r   r   r<   torch.nn.functionalr   r   rZ   activationsr   cache_utilsr   r	   
generationr
   modeling_attn_mask_utilsr   modeling_layersr   modeling_outputsr   r   r   modeling_rope_utilsr   r   modeling_utilsr   r   utilsr   r   r   configuration_granitemoesharedr   !torch.nn.attention.flex_attentionr   integrations.flex_attentionr   
get_loggerr8   r   Moduler   r?   rS   re   r   r   r   r=   rd   r   rl   r   r   r   r   r  r   rO   r  r  __all__r.   r.   r.   r/   <module>   s   
-0<

Va"  
R 