o
    i                     @   s"  d dl mZ d dlmZ d dlmZ ddlmZmZm	Z	m
Z
mZ ddlmZmZmZmZmZmZ ddlmZ eeZG d	d
 d
eZG dd deZG dd deZG dd deZG dd de	ZG dd deZG dd deZG dd de
ZG dd deZ G dd deZ!g dZ"dS )   )CausalLMOutputWithPast)Unpack)logging   )DeepseekV3DecoderLayerDeepseekV3MLPDeepseekV3MoEDeepseekV3PreTrainedModelDeepseekV3TopkRouter)Qwen3AttentionQwen3ForCausalLM
Qwen3ModelQwen3RMSNormQwen3RotaryEmbeddingTransformersKwargs   )Dots1Configc                   @      e Zd ZdS )Dots1RMSNormN__name__
__module____qualname__ r   r   d/home/ubuntu/veenaModal/venv/lib/python3.10/site-packages/transformers/models/dots1/modular_dots1.pyr   '       r   c                   @   r   )Dots1RotaryEmbeddingNr   r   r   r   r   r   +   r   r   c                   @   r   )Dots1AttentionNr   r   r   r   r   r   /   r   r   c                   @   r   )Dots1MLPNr   r   r   r   r   r   3   r   r   c                   @   r   )Dots1MoENr   r   r   r   r   r   7   r   r   c                   @   r   )Dots1TopkRouterNr   r   r   r   r   r    ;   r   r    c                       s&   e Zd Zdedef fddZ  ZS )Dots1DecoderLayerconfig	layer_idxc                    s   t  || |j| | _d S )N)super__init__layer_typesattention_type)selfr"   r#   	__class__r   r   r%   @   s   zDots1DecoderLayer.__init__)r   r   r   r   intr%   __classcell__r   r   r)   r   r!   ?   s    r!   c                   @   r   )Dots1PreTrainedModelNr   r   r   r   r   r-   E   r   r-   c                   @   r   )
Dots1ModelNr   r   r   r   r   r.   I   r   r.   c                       s*   e Zd Zdee def fddZ  ZS )Dots1ForCausalLMsuper_kwargsreturnc                    s   t  jdi |S )a~  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, Dots1ForCausalLM

        >>> model = Dots1ForCausalLM.from_pretrained("rednote-hilab/dots1.llm1.inst")
        >>> tokenizer = AutoTokenizer.from_pretrained("rednote-hilab/dots1.llm1.inst")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```Nr   )r$   forward)r(   r0   r)   r   r   r2   N   s   zDots1ForCausalLM.forward)r   r   r   r   r   r   r2   r,   r   r   r)   r   r/   M   s    r/   )r-   r.   r/   N)#modeling_outputsr   processing_utilsr   utilsr    deepseek_v3.modeling_deepseek_v3r   r   r   r	   r
   qwen3.modeling_qwen3r   r   r   r   r   r   configuration_dots1r   
get_loggerr   loggerr   r   r   r   r   r    r!   r-   r.   r/   __all__r   r   r   r   <module>   s$    
