o
    ̳i                     @   s^  d dl mZmZ d dlmZmZmZmZ d dlm	Z	 d dlm
Z
 d dlmZ d dlmZ d dlmZ d dlmZ 	 d	efd
dZd)dedee dee	 d	efddZ							d*dee dededededededed	efdd Zeed!d"Zd#e_d	efd$d%Z							d*dee dededededededed	efd&d'Zeed!d"Zd(e_dS )+    )ListOptional)mistrallora_mistralmistral_classifierlora_mistral_classifier)_TemplateType)_get_prompt_template)TransformerDecoder)MistralTokenizer)LORA_ATTN_MODULES)partialreturnc                   C   s   t dddddddddd		S )
z
    Builder for creating a Mistral 7B model initialized w/ the default 7b parameter values
    from https://mistral.ai/news/announcing-mistral-7b/


    Returns:
        TransformerDecoder: Instantiation of Mistral 7B model
     }             8             h㈵>)	
vocab_size
num_layers	num_headsnum_kv_heads	embed_dimintermediate_dimmax_seq_lenattn_dropoutnorm_eps)r    r    r    \/home/ubuntu/.local/lib/python3.10/site-packages/torchtune/models/mistral/_model_builders.py
mistral_7b   s   	r"   N,torchtune.models.mistral.MistralChatTemplatepathr   prompt_templatec                 C   s"   t | ||durt|dS ddS )a  
    Tokenizer for Mistral models.

    Args:
        path (str): path to the tokenizer
        max_seq_len (Optional[int]): maximum sequence length for tokenizing a single list of messages,
            after which the input will be truncated. Default is None.
        prompt_template (Optional[_TemplateType]): optional specified prompt template.
            If a string, it is assumed to be the dotpath of a :class:`~torchtune.data.PromptTemplateInterface`
            class. If a dictionary, it is assumed to be a custom prompt template mapping role to the
            prepend/append tags. Default is :class:`~torchtune.models.mistral.MistralChatTemplate`.

    Returns:
        MistralTokenizer: Instantiation of the Mistral tokenizer
    Nr$   r   r%   )r   r	   r&   r    r    r!   mistral_tokenizer3   s   "r'   Fr      r   lora_attn_modulesapply_lora_to_mlpapply_lora_to_output	lora_rank
lora_alphalora_dropoutuse_doraquantize_basec                 C   sv   t di d| d|d|ddddddd	d
ddddddddddddd|d|d|d|d|S )ao  
    Builder for creating a Mistral 7B model with LoRA enabled.

    Args:
        lora_attn_modules (List[LORA_ATTN_MODULES]): list of which linear layers
            LoRA should be applied to in each self-attention block. Options are
            ``{"q_proj", "k_proj", "v_proj", "output_proj"}``.
        apply_lora_to_mlp (bool): whether to apply LoRA to the MLP in each transformer layer.
            Default: False
        apply_lora_to_output (bool): whether to apply LoRA to the model's final output projection.
            Default: False
        lora_rank (int): rank of each low-rank approximation
        lora_alpha (float): scaling factor for the low-rank approximation
        lora_dropout (float): dropout probability for the low-rank approximation. Default: 0.0
        use_dora (bool): Decompose the LoRA weight into magnitude and direction, as
            introduced in "DoRA: Weight-Decomposed Low-Rank Adaptation" (https://arxiv.org/abs/2402.09353).
        quantize_base (bool): Whether to quantize base model weights

    Returns:
        TransformerDecoder: Instantiation of Mistral 7B model with LoRA applied
    r)   r*   r+   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   	rope_base'  r,   r-   r.   r/   r0   Nr    )r   r)   r*   r+   r,   r-   r.   r/   r0   r    r    r!   lora_mistral_7bF   sJ   	
r4   T)r0   z
Builder for creating a Mistral model with QLoRA enabled. Base model weights in linear layers
that LoRA is applied to are quantized per the QLoRA paper: https://arxiv.org/abs/2305.14314.
Please see `lora_mistral_7b` for full API arguments.
c                   C   s   t dddddddddd	d

S )a~  
    Builder for creating a Mistral 7B model initialized w/ the default 7b
    parameter values from:
    https://huggingface.co/Ray2333/reward-model-Mistral-7B-instruct-Unified-Feedback
    where the output layer is a classification layer projecting to a single class for reward modelling.

    Returns:
        TransformerDecoder: Instantiation of Mistral 7B classifier model
       r   r   r   r   r   r   r   r   )
num_classesr   r   r   r   r   r   r   r   r   )r   r    r    r    r!   mistral_reward_7b   s   
r7   c                 C   s|   t di d| d|d|dddddd	d
d	ddddddddddddddd|d|d|d|d|S ) av  
    Builder for creating a Mistral reward 7B model with LoRA enabled.

    Args:
        lora_attn_modules (List[LORA_ATTN_MODULES]): list of which linear layers
            LoRA should be applied to in each self-attention block. Options are
            ``{"q_proj", "k_proj", "v_proj", "output_proj"}``.
        apply_lora_to_mlp (bool): whether to apply LoRA to the MLP in each transformer layer.
            Default: False
        apply_lora_to_output (bool): whether to apply LoRA to the model's final output projection.
            Default: False
        lora_rank (int): rank of each low-rank approximation
        lora_alpha (float): scaling factor for the low-rank approximation
        lora_dropout (float): dropout probability for the low-rank approximation. Default: 0.0
        use_dora (bool): Decompose the LoRA weight into magnitude and direction, as
            introduced in "DoRA: Weight-Decomposed Low-Rank Adaptation" (https://arxiv.org/abs/2402.09353).
        quantize_base (bool): Whether to quantize base model weights

    Returns:
        TransformerDecoder: Instantiation of Mistral 7B model with LoRA applied
    r)   r*   r+   r6   r5   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r1   r2   r,   r-   r.   r/   r0   Nr    )r   r3   r    r    r!   lora_mistral_reward_7b   sN   	
r8   a  
Builder for creating a Mistral reward 7B model with QLoRA enabled. Base model weights in linear layers
that LoRA is applied to are quantized per the QLoRA paper: https://arxiv.org/abs/2305.14314.
Please see `lora_mistral_reward_7b` for full API arguments.
)Nr#   )FFr   r(   r   FF)typingr   r   ,torchtune.models.mistral._component_buildersr   r   r   r    torchtune.data._prompt_templatesr   r	   torchtune.modulesr
   #torchtune.models.mistral._tokenizerr   torchtune.modules.peftr   	functoolsr   r"   strintr'   boolfloatr4   qlora_mistral_7b__doc__r7   r8   qlora_mistral_reward_7br    r    r    r!   <module>   s   $	
5	
6
