o
    i|                     @   s  d dl mZ d dlmZmZ d dlZd dlmZ d dlm	Z	 ddl
mZmZ ddlmZ ddlmZ dd	lmZmZ dd
lmZ ddlmZ ddlmZmZmZmZ ddlmZ ddlm Z m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z' ddl(m)Z)m*Z* ddl+m,Z, e-e.Z/eeddG dd deZ0G dd de%Z1G dd de&Z2G dd de#Z3G dd de Z4G dd  d e!Z5ed!deG d"d# d#eZ6eG d$d% d%e$e6Z7G d&d' d'ej8Z9ed(dG d)d* d*e"eZ:G d+d, d,ej8Z;eG d-d. d.e$Z<ed/dG d0d1 d1e6e,Z=g d2Z>dS )3    )	dataclass)OptionalUnionN)check_model_inputs   )CacheDynamicCache)GenerationMixin)create_causal_mask)BaseModelOutputWithPastCausalLMOutputWithPast)PreTrainedModel)Unpack)ModelOutputauto_docstringcan_return_tuplelogging   )	AutoModel)LlamaAttentionLlamaDecoderLayerLlamaForCausalLMLlamaMLP
LlamaModelLlamaRMSNormLlamaRotaryEmbeddingTransformersKwargs   )	CsmConfigCsmDepthDecoderConfig)CsmGenerationMixinz:
    Base class for the model autoregressive outputs.
    )custom_introc                   @   s   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
ee ed< dZeeejdf  ed< dZeeejdf  ed< dZeej ed	< dZeej ed
< dZee ed< dZeeejdf  ed< dZeeejdf  ed< dZeej ed< dS )CsmOutputWithPasta	  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Language modeling loss (for next-token prediction).
    logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
        Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
    past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
        It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).

        Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
        `past_key_values` input) to speed up sequential decoding.
    depth_decoder_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Language modeling loss (for next-token prediction) of the depth decoder model.
    depth_decoder_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
        Prediction scores of the depth decoder (scores for each vocabulary token before SoftMax).
    depth_decoder_past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
        It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
    depth_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
        one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.

        Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
    depth_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
        sequence_length)`.
    backbone_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Language modeling loss (for next-token prediction) of the backbone model.
    Nlosslogitspast_key_values.hidden_states
attentionsdepth_decoder_lossdepth_decoder_logitsdepth_decoder_past_key_valuesdepth_decoder_hidden_statesdepth_decoder_attentionsbackbone_loss)__name__
__module____qualname____doc__r#   r   torchFloatTensor__annotations__r$   r%   r   r&   tupler'   r(   r)   r*   r+   r,   r-    r6   r6   W/home/ubuntu/.local/lib/python3.10/site-packages/transformers/models/csm/modular_csm.pyr"   1   s   
 r"   c                   @      e Zd ZdS )
CsmRMSNormNr.   r/   r0   r6   r6   r6   r7   r9   b       r9   c                   @   r8   )CsmRotaryEmbeddingNr:   r6   r6   r6   r7   r<   f   r;   r<   c                   @   r8   )CsmMLPNr:   r6   r6   r6   r7   r=   j   r;   r=   c                   @   r8   )CsmAttentionNr:   r6   r6   r6   r7   r>   n   r;   r>   c                   @   r8   )CsmDecoderLayerNr:   r6   r6   r6   r7   r?   r   r;   r?   z[
    The bare Csm Model outputting raw hidden-states without any specific head on top.
    c                       sT   e Zd ZU eed< dZdZdgZdgZdZ	dZ
dZdZeedZ fddZ  ZS )	CsmPreTrainedModelconfigmodelTr?   r%   )r&   r'   c                    sP   t  | t|tr$|j}t|d D ]}|jj| jd| j	j
d qd S d S )Nr   g        )meanstd)super_init_weights
isinstanceCsmCodebooksHeadnum_codebooksrangeweightdatanormal_rA   initializer_range)selfmodulerI   i	__class__r6   r7   rF      s   
z CsmPreTrainedModel._init_weights)r.   r/   r0   r   r4   base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_supports_sdpa_can_compile_fullgraph_supports_attention_backendr?   r>   _can_record_outputsrF   __classcell__r6   r6   rR   r7   r@   v   s   
 r@   c                       s   e Zd ZU eed<  fddZee								ddee	j
 dee	j dee	j dee	j
 d	ee d
ee	j dee dee	j
 dee deeef fddZ  ZS )CsmDepthDecoderModelrA   c                    s>   t  | t|j|j |j| _tj|j|j	dd| _
d S NF)bias)rE   __init__nn	EmbeddingrI   
vocab_sizebackbone_hidden_sizeembed_tokensLinearhidden_sizeinputs_embeds_projectorrO   rA   rR   r6   r7   ra      s   zCsmDepthDecoderModel.__init__N	input_idsbackbone_last_hidden_stateattention_maskposition_idsr%   inputs_embeds	use_cachecache_positionkwargsreturnc	              
   K   s  |durt j std d}|du |duA rtd|r(|du r(t| jd}|du rX|dur4| nd}
|dur?|j	d n|j	d }|durK|j
n|j
}t j|
|
| |d}|du rt j|d dd}|| j }| || }|d dk}|dur||dddf< nt j s|rtd	 | |}t| j|||||d
}|}|d}| ||}| jd| jj D ]}||f||||||d|	}q| |}t||r|dS ddS )aJ  
        backbone_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, backbone_hidden_size)`, *optional*):
            The last hidden state of the backbone model. Such input is required when the first codebook token (the one generated by the backbone model)
            is provided in the `input_ids` argument.
        NzCustom `position_ids` were provided but will be ignored. CSM depth decoder automatically determines position_ids from `cache_position` and as it requires them to be identical across the batch, the provided position_ids will be ignored.z;You must specify exactly one of input_ids or inputs_embeds.)rA   r   r   device)minzvWhen the first codebook token is provided, `backbone_last_hidden_state` should also be provided for correct inference.)rA   input_embedsrm   rq   r%   rn   )rm   rn   r%   rp   rq   position_embeddings)last_hidden_stater%   )r2   compileris_compilingloggerwarning_once
ValueErrorr   rA   get_seq_lengthshaperu   arangeclamprd   rf   warningri   r
   	unsqueeze
rotary_emblayersnum_hidden_layersnormr   )rO   rk   rl   rm   rn   r%   ro   rp   rq   rr   past_seen_tokensinputs_seq_lengthru   codebook_idxsoffsetinput_ids_are_first_codebookcausal_maskr&   rx   decoder_layerr6   r6   r7   forward   sr   

	

zCsmDepthDecoderModel.forward)NNNNNNNN)r.   r/   r0   r   r4   ra   r   r   r   r2   
LongTensorr3   Tensorr   boolr   r   r   r5   r   r   r]   r6   r6   rR   r7   r^      sD   
 	

r^   c                       s&   e Zd Z fddZdddZ  ZS )rH   c                    s0   t    || _tt| jd ||| _d S )Nr   )rE   ra   rI   rb   	Parameterr2   emptyrK   )rO   rh   rI   rd   rR   r6   r7   ra      s   
 zCsmCodebooksHead.__init__Nc                    sf   |d u rj d }| jt|  n	|d }| j|   fddt j d D tjddS )Nr   c              	      s2   g | ]}t jd d |d d f  | jqS N)rb   
functionallinearT).0codebook_idxcodebook_weightr&   r6   r7   
<listcomp>  s    $z,CsmCodebooksHead.forward.<locals>.<listcomp>r   dim)r   rK   r2   r   rJ   stack)rO   r&   rq   
seq_lengthr   r6   r   r7   r      s   

zCsmCodebooksHead.forwardr   r.   r/   r0   ra   r   r]   r6   r6   rR   r7   rH      s    rH   a$  
    The CsmDepthDecoder Model transformer, with a [`CsmCodebooksHead`] on top,
    which can be seen a position-specific language modeling head, allowing to use a different linear layer for each codebook
    (e.g. position 0 is the first codebook and uses the first codebook head, etc.)
    c                       s  e Zd ZdZdZdZ fddZ				ddejde	e
 de	ej de	ej de	ej f
 fd	d
Zee										dde	ej de	ej de	ej de	ej de	ee
eej f  de	ej de	ej de	e de	ej deeejf dee deeef fddZ  ZS )CsmDepthDecoderForCausalLMNc                    s2   t  | | `t|j|j|j| _t|| _	d S r   )
rE   ra   lm_headrH   rh   rI   rd   codebooks_headr^   rB   rj   rR   r6   r7   ra     s   z#CsmDepthDecoderForCausalLM.__init__rk   r%   rm   ro   rq   c           	         sH   t  j|||||fi |}|d d dk}|s|d |d |S )Nrq   r   rl   rn   )rE   prepare_inputs_for_generationpop)	rO   rk   r%   rm   ro   rq   rr   model_inputsis_first_generation_steprR   r6   r7   r     s   	


z8CsmDepthDecoderForCausalLM.prepare_inputs_for_generationr   rl   rn   labelsrp   logits_to_keeprr   rs   c                 K   s   | j d||||||||	d|}|d }t|
tr+|
dkr$tdd}n	t|
 d}n|
}| |dd|ddf |	durA|	| nd}| }d}|durg|dddf  }| jd|d| jj|d|}t	|||j
|j|jdS )	a  
        backbone_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, backbone_hidden_size)`, *optional*):
            The last hidden state of the backbone model. Such input is required when the first codebook token (the one generated by the backbone model)
            is provided in the `input_ids` argument.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
        )rk   rl   rm   rn   r%   ro   rp   rq   r   r   N.)r$   r   rd   shift_labels)r#   r$   r%   r&   r'   r6   )rB   rG   intslicer   
contiguousloss_functionrA   rd   r   r%   r&   r'   )rO   rk   rl   rm   rn   r%   ro   r   rp   rq   r   rr   outputsr&   slice_indicesr$   r#   r   r6   r6   r7   r   5  sJ   	
&z"CsmDepthDecoderForCausalLM.forwardNNNN)
NNNNNNNNNr   )r.   r/   r0   _tied_weights_keys_tp_plan_pp_planra   r2   r   r   r   r3   r   r   r   r   r   listr   r   r   r   r5   r   r   r]   r6   r6   rR   r7   r     sr    		

r   c                       s$   e Zd Z fddZdd Z  ZS )CsmBackboneModelEmbeddingsc                    sD   t    t|j|j |j| _| jdt	
|j|j dd d S )Naudio_tokens_offsetsF)
persistent)rE   ra   rb   rc   rI   rd   rh   embed_audio_tokensregister_bufferr2   r   rj   rR   r6   r7   ra   {  s
   

z#CsmBackboneModelEmbeddings.__init__c                 C   s    |  || j }|jdd}|S )Nr   r   )r   r   sum)rO   rk   rw   r6   r6   r7   r     s   z"CsmBackboneModelEmbeddings.forwardr   r6   r6   rR   r7   r   z  s    r   c                       s0   e Zd Z fddZee fddZ  ZS )CsmBackboneModelc                    s   t  | t|| _d S r   )rE   ra   r   rf   rj   rR   r6   r7   ra     s   zCsmBackboneModel.__init__c                    s   t  jdi |S )a&  
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks) or (batch_size, sequence_length)`):
            1. (batch_size, sequence_length): corresponds to the input sequence prepared with the processor from the text prompt. Such input
            requires `input_values` to be provided so that audio can be encoded in codebook tokens and then merged with the text tokens.

            2. (batch_size, sequence_length, num_codebooks): codebook tokens generated during the autoregressive decoding. Such input is not meant to be used by end users.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        Nr6   )rE   r   )rO   super_kwargsrR   r6   r7   r     s   zCsmBackboneModel.forward)r.   r/   r0   ra   r   r   r   r]   r6   r6   rR   r7   r     s
    r   z
    The Csm model consists of two llama-like auto-regressive transformer models: a backbone model that predicts the first codebook token and a depth decoder that predicts the other codebook tokens.
    c                       s  e Zd ZddgZ fddZdd Zdd Zd	d
 Ze fddZ	 fddZ
				d$deej deej deej deej deej f
ddZ				d$dejdee deej deej deej f
 fddZee											d%deej deej deej deej deej deeeeej f  deej deej dee deej d eeejf d!ee deeef fd"d#Z  ZS )&CsmForConditionalGenerationz5backbone_model.embed_tokens.embed_audio_tokens.weightz'depth_decoder.model.embed_tokens.weightc                    sp   t  | |j| _tj|j|jdd| _t|j|j| _	t
|| _t|j| _t|j| _|   d S r_   )rE   ra   rd   rb   rg   rh   r   rc   text_vocab_sizeembed_text_tokensr   _from_configbackbone_modelr   depth_decoder_configdepth_decoderr   from_configcodec_configcodec_model	post_initrj   rR   r6   r7   ra     s   z$CsmForConditionalGeneration.__init__c                 C   s   | j jS r   r   rf   rO   r6   r6   r7   get_input_embeddings  s   z0CsmForConditionalGeneration.get_input_embeddingsc                 C   s   || j _d S r   r   )rO   valuer6   r6   r7   set_input_embeddings  s   z0CsmForConditionalGeneration.set_input_embeddingsc                 C   s(   | j jr| | jjj| jjj d S d S r   )rA   tie_codebooks_embeddings_tie_or_clone_weightsr   rf   r   r   rB   r   r6   r6   r7   _tie_weights  s   z(CsmForConditionalGeneration._tie_weightsc                    s   | ddrt j|i |\}}n	t j|i |}d t  fddt|j D }t|jjddi| |D ]
}t	|j |  q?d|v rR||fS |S )Noutput_loading_infoFdepth_decoder_c                    s(   i | ]\}}|  r|d  |qS r   )
startswith)r   attrr   prefix
prefix_lenr6   r7   
<dictcomp>  s    z?CsmForConditionalGeneration.from_pretrained.<locals>.<dictcomp>_from_model_config)
getrE   from_pretrainedlenvarsgeneration_configitemsr   updatedelattr)clsargsrr   rB   loading_infodepth_decoder_attrsr   rR   r   r7   r     s   z+CsmForConditionalGeneration.from_pretrainedc                    sV   d}| j j }|dd  | D ]\}}t| j|| | qt j|i | d S )Nr   transformers_version)r   r   to_diff_dictr   r   setattrrE   save_pretrained)rO   r   rr   r   r   r   r   rR   r6   r7   r     s   z+CsmForConditionalGeneration.save_pretrainedNrk   input_valuesinput_values_cutoffsr   rs   c                    sF  |  |}|durtj|d}||dk  }||dk }tj| |jd	t
|d}||dk }t j g }t||D ]?\}	}
|
|
dk }
t|
jd d D ]+}|
| }|
|d  }|	d||f }| j|d}|jdd}||d  qUqBtdd	 |D  t fd
d|D }| j|}W d   n1 sw   Y  | jj}||k}| j|}|| ||< tjdd| jjf|jtjd| jj }| j|d}|| jj k}|!|" d||< |dur|d!dd| jj}|| ||< |||< |dkj#dd}d||d |d ddf< |}||dS )a  
        Merges the input_ids and input_values to produce a single inputs_embeds tensor:
        1 - Infers the codec model on the input_values to retrieve codebook token.
        2 - Embeds codebook tokens and places them at the correct positions in the inputs_embeds tensor.
        3 - If labels are provided, expands them to match codebook dimensions and position the target codebook tokens in the inputs_embeds tensor.

        Args:
            input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`):
                The input ids to embed.
            input_values (`torch.Tensor` of shape `(batch_size, channels, audio_sequence_length)`):
                The audio input values to embed.
            input_values_cutoffs (`torch.Tensor` of shape `(batch_size, max_num_audio)`):
                The cutoffs of the audio input values relative to its batch index, padded with -1 when no audio.
        Nr   r   r   rt   r   .c                 s   s    | ]}|j d  V  qdS )r   N)r   r   elr6   r6   r7   	<genexpr>  s    zQCsmForConditionalGeneration._merge_input_ids_with_input_values.<locals>.<genexpr>c                    s,   g | ]}t j|d d d  |jd   fqS )r   )rb   r   padr   r   max_audio_framesr6   r7   r     s   , zRCsmForConditionalGeneration._merge_input_ids_with_input_values.<locals>.<listcomp>)ru   dtypeiTas_tuple)ro   r   )$r   rb   r   r   diffr2   r   maxru   expandr   r   no_gradziprJ   r   r   encodeaudio_codes	transposeappendr   get_audio_codes_maskrA   audio_token_idr   rf   onesrI   longcodebook_eos_token_idsqueezeaudio_eos_token_idrepeatr   nonzero)rO   rk   r   r   r   ro   audio_lengthsinput_values_maskaudio_tokens_listbatch_input_valuesbatch_input_values_cutoffsrQ   	start_idxend_idxaudio_batchcodec_outputscodebook_idsbatched_audio_token_idsaudio_codes_maskr  audio_token_maskaudio_embedsaudio_eos_frame_idsaudio_eos_embedsaudio_eos_token_masklabels_expanded depth_decoder_ignore_frames_idxsr6   r   r7   "_merge_input_ids_with_input_values  s\   




z>CsmForConditionalGeneration._merge_input_ids_with_input_valuesr%   rm   ro   rq   c           	         s   t  jd	|||||d|}|d ur>|jdkr>|dd u r>| j||d|d|dd}||d |d d d |S )
N)rk   r%   rm   ro   rq   r   ro   r   r   r   )rk   r   r   r   )ro   r   rk   r6   )rE   r   ndimr   r   r   )	rO   rk   r%   rm   ro   rq   rr   r   merged_inputsrR   r6   r7   r   9  s(   	 	z9CsmForConditionalGeneration.prepare_inputs_for_generationr   rn   rp   r   rr   c                 K   s  |dur|j dkr| ||||}|d }|d }d}| jd||||||	|
d|}|d }t|tr:t| dn|}| |dd|ddf }d}d}d}d}|dur|dddddf }| jd||| jj	d|}|ddddddf d	kj
d
d }|| dd| jjd f }tjj|ddd}|jdd}||d |d d ddf }|| }| jd|||	d|d|}|j}|| }t|||||j|j|j|dur|jnd|dur|jnd|dur|jnd|dur|jdS ddS )a  
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks) or (batch_size, sequence_length)`):
            1. (batch_size, sequence_length): corresponds to the input sequence prepared with the processor from the text prompt. Such input
            requires `input_values` to be provided so that audio can be encoded in codebook tokens and then merged with the text tokens.

            2. (batch_size, sequence_length, num_codebooks): codebook tokens generated during the autoregressive decoding. Such input is not meant to be used by end users.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        input_values_cutoffs (`torch.Tensor` of shape `(batch_size, max_num_audio)`, *optional*):
            Specify the end positions of audio segments within each batch entry, relative to the concatenated audio input.
            If a batch entry has fewer segments than the maximum, it is padded with -1. For example, in a batch of 2 sequences
            where the first contains 2 audio segments of length l1, and the second contains 1 audio segment of length l2,
            the input_values_cutoffs would be: [[l1, 2 * l1], [l2, -1]].
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should be in `[config.audio_token_id, -100, -101]`.
            Requires targeted `input_values` to be provided as audio tokens will be inferred from it using the `codec_model`.
            - `config.audio_token_id` indicates an audio frames (considering sequence length elements as frames)
            - `-100` will be ignored in the loss computation
            - `-101` indicates the audio frame will be used only for the backbone model (using the first codebook token as labels)

            Such labels can be prepared using `output_labels=True` when calling [`CsmProcessor`].
        logits_to_keep (`int` or `torch.Tensor`, *optional*):
            Kept for compatibility. Does not support another value than:
            1. `0`, which is equivalent to keeping all logits, used in the training regime
            2. `1`, which is equivalent to keeping only the last logit, used in the generation regime

        Example:

        ```python
        >>> import torch
        >>> from transformers import CsmForConditionalGeneration, AutoProcessor
        >>> from datasets import load_dataset, Audio

        >>> model_id = "sesame/csm-1b"
        >>> torch_device = "cuda" if torch.cuda.is_available() else "cpu"

        >>> processor = AutoProcessor.from_pretrained(model_id)

        >>> ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
        >>> # ensure the audio is 24kHz
        >>> ds = ds.cast_column("audio", Audio(sampling_rate=24000))

        >>> conversation = []
        >>> # prepare a conversation with text and corresponding audio
        >>> for text, audio, speaker_id in zip(ds[:4]["text"], ds[:4]["audio"], ds[:4]["speaker_id"]):
        ...     conversation.append(
        ...         {
        ...             "role": f"{speaker_id}",
        ...             "content": [{"type": "text", "text": text}, {"type": "audio", "path": audio["array"]}],
        ...         }
        ...     )

        >>> inputs = processor.apply_chat_template(
        ...     conversation,
        ...     tokenize=True,
        ...     return_dict=True,
        ...     output_labels=True,
        ... ).to(torch_device)

        >>> model = CsmForConditionalGeneration.from_pretrained(model_id, device_map=torch_device)
        >>> output = model(**inputs)
        >>> output.loss.backward()
        ```Nr   ro   r   )rk   rm   rn   r%   ro   rp   rq   r   )r$   r   rd   r   r   r   r   .r   )r   Tr   )rk   rl   rp   return_dictr   )r#   r-   r(   r$   r%   r&   r'   r)   r*   r+   r,   r6   )r!  r   r   rG   r   r   r   r   rA   rd   allrI   rb   r   r   r  r   r#   r"   r%   r&   r'   r$   )rO   rk   r   rm   r   rn   r%   ro   r   rp   rq   r   rr   r"  backbone_outputsbackbone_hidden_statesr   backbone_logitsr#   r-   r(   depth_decoder_outputsbackbone_labels
train_maskdepth_decoder_input_ids
train_idxsbackbone_last_hidden_statesdepth_decoder_labelsr6   r6   r7   r   X  s   S
(	z#CsmForConditionalGeneration.forwardr   )NNNNNNNNNNr   )r.   r/   r0   r   ra   r   r   r   classmethodr   r   r   r2   r   r   r   r   r3   r   r   r   r   r   r   r   r   r   r5   r"   r   r]   r6   r6   rR   r7   r     s    

U	

r   )r@   r   r^   r   r   )?dataclassesr   typingr   r   r2   torch.nnrb   transformers.utils.genericr   cache_utilsr   r   
generationr	   masking_utilsr
   modeling_outputsr   r   modeling_utilsr   processing_utilsr   utilsr   r   r   r   autor   llama.modeling_llamar   r   r   r   r   r   r   r   configuration_csmr   r   generation_csmr    
get_loggerr.   r|   r"   r9   r<   r=   r>   r?   r@   r^   ModulerH   r   r   r   r   __all__r6   r6   r6   r7   <module>   sb   (

+_f  U