o
    ̳iS7                     @   s"  d dl mZmZ d dlmZ d dlmZmZmZm	Z	 d dl
mZ d dlmZ d dlmZ d dlmZ d dlmZ 	 d	efd
dZd3dedee dee d	efddZ							d4dee dededededededed	efdd Zeed!d"Zd#e_d	efd$d%Z							d4dee dededededededed	efd&d'Zeed!d"Zd(e_d	efd)d*Z							d4dee dededededededed	efd+d,Z ee d!d"Z!d-e!_d	efd.d/Z"							d4dee dededededededed	efd0d1Z#eed!d"Z$d2e$_dS )5    )ListOptional)partial)llama2lora_llama2llama2_classifierlora_llama2_classifier)TransformerDecoder)Llama2Tokenizer)LORA_ATTN_MODULES)_TemplateType)_get_prompt_templatereturnc                
   C   s   t dddddddddS )z
    Builder for creating a Llama2 model initialized w/ the default 7B parameter values
    from https://arxiv.org/abs/2307.09288

    Returns:
        TransformerDecoder: Instantiation of Llama2 7B model
     }                 h㈵>)
vocab_size
num_layers	num_headsnum_kv_heads	embed_dimmax_seq_lenattn_dropoutnorm_epsr    r   r   [/home/ubuntu/.local/lib/python3.10/site-packages/torchtune/models/llama2/_model_builders.py	llama2_7b   s   r   N*torchtune.models.llama2.Llama2ChatTemplatepathr   prompt_templatec                 C   s"   t | ||durt|dS ddS )a  
    Tokenizer for Llama2.

    Args:
        path (str): path to the tokenizer
        max_seq_len (Optional[int]): maximum sequence length for tokenizing a single list of messages,
            after which the input will be truncated. Default is None.
        prompt_template (Optional[_TemplateType]): optional specified prompt template.
            If a string, it is assumed to be the dotpath of a :class:`~torchtune.data.PromptTemplateInterface`
            class. If a dictionary, it is assumed to be a custom prompt template mapping role to the
            prepend/append tags. Default is :class:`~torchtune.models.llama2.Llama2ChatTemplate`.

    Returns:
        Llama2Tokenizer: Instantiation of the Llama2 tokenizer
    Nr!   r   r"   )r
   r   r#   r   r   r   llama2_tokenizer-   s   "r$   F      r   lora_attn_modulesapply_lora_to_mlpapply_lora_to_output	lora_rank
lora_alphalora_dropoutuse_doraquantize_basec                 C   sj   t di d| d|d|ddddddd	dd
dddddddd|d|d|d|d|S )aJ  
    Builder for creating a Llama2 7B model with LoRA enabled.

    The Llama2 defaults are the same as in :func:`~torchtune.models.llama2.llama2_7b`,
    while LoRA default params are based on
    https://github.com/tloen/alpaca-lora/blob/8bb8579e403dc78e37fe81ffbb253c413007323f/finetune.py#L41-L43.

    Args:
        lora_attn_modules (List[LORA_ATTN_MODULES]): list of which linear layers
            LoRA should be applied to in each self-attention block. Options are
            ``{"q_proj", "k_proj", "v_proj", "output_proj"}``.
        apply_lora_to_mlp (bool): whether to apply LoRA to the MLP in each transformer layer.
            Default: False
        apply_lora_to_output (bool): whether to apply LoRA to the model's final output projection.
            Default: False
        lora_rank (int): rank of each low-rank approximation
        lora_alpha (float): scaling factor for the low-rank approximation
        lora_dropout (float): LoRA dropout probability. Default: 0.0
        use_dora (bool): Decompose the LoRA weight into magnitude and direction, as
            introduced in "DoRA: Weight-Decomposed Low-Rank Adaptation" (https://arxiv.org/abs/2402.09353).
        quantize_base (bool): Whether to quantize base model weights
        
    Returns:
        TransformerDecoder: Instantiation of Llama2 7B model with LoRA applied
    r'   r(   r)   r   r   r   r   r   r   r   r   r   r   r   r   r   r*   r+   r,   r-   r.   Nr   r   r'   r(   r)   r*   r+   r,   r-   r.   r   r   r   lora_llama2_7b@   sB   #	
r1   T)r.   z
Builder for creating a Llama2 7B model with QLoRA enabled. Base model weights in linear layers
that LoRA is applied to are quantized per the QLoRA paper: https://arxiv.org/abs/2305.14314.
Please see `lora_llama2_7b` for full API arguments.
c                   C   s   t dddddddddd	S )	z
    Builder for creating a Llama2 model initialized w/ the default 13B parameter values
    from https://arxiv.org/abs/2307.09288

    Returns:
        TransformerDecoder: Instantiation of Llama2 13B model
    r   (       6  r   r   r   	r   r   r   r   r   intermediate_dimr   r   r   r   r   r   r   r   
llama2_13b      r7   c                 C   sp   t di d| d|d|ddddddd	dd
dddddddddd|d|d|d|d|S )a   
    Builder for creating a Llama2 13B model with LoRA enabled.

    The Llama2 defaults are the same as in :func:`~torchtune.models.llama2.llama2_13b`,
    while LoRA default params are based on
    https://github.com/tloen/alpaca-lora/blob/8bb8579e403dc78e37fe81ffbb253c413007323f/finetune.py#L41-L43.

    Args:
        lora_attn_modules (List[LORA_ATTN_MODULES]): list of which linear layers
            LoRA should be applied to in each self-attention block. Options are
            ``{"q_proj", "k_proj", "v_proj", "output_proj"}``.
        apply_lora_to_mlp (bool): whether to apply LoRA to the MLP in each transformer layer.
            Default: False
        apply_lora_to_output (bool): whether to apply LoRA to the model's final output projection.
            Default: False
        lora_rank (int): rank of each low-rank approximation
        lora_alpha (float): scaling factor for the low-rank approximation
        use_dora (bool): Decompose the LoRA weight into magnitude and direction, as
            introduced in "DoRA: Weight-Decomposed Low-Rank Adaptation" (https://arxiv.org/abs/2402.09353).
        quantize_base (bool): Whether to quantize base model weights

    Returns:
        TransformerDecoder: Instantiation of Llama2 13B model with LoRA applied
    r'   r(   r)   r   r   r   r2   r   r   r   r3   r6   r4   r   r   r   r   r   r   r*   r+   r,   r-   r.   Nr   r/   r0   r   r   r   lora_llama2_13b   F   #	
r9   z
Builder for creating a Llama2 13B model with QLoRA enabled. Base model weights in linear layers
that LoRA is applied to are quantized per the QLoRA paper: https://arxiv.org/abs/2305.14314.
Please see `lora_llama2_13b` for full API arguments.
c                   C   s   t ddddddddd	d
	S )z
    Builder for creating a Llama2 model initialized w/ the default 70B parameter values
    from https://arxiv.org/abs/2307.09288

    Returns:
        TransformerDecoder: Instantiation of Llama2 70B model
    r   P   @   r%        p  r   r   r   r5   r   r   r   r   r   
llama2_70b   r8   r?   c                 C   sp   t di d| d|d|dddddd	d
dddddddddddd|d|d|d|d|S )aE  
    Builder for creating a Llama2 70B model with LoRA enabled.

    The Llama2 defaults are the same as in :func:`~torchtune.models.llama2.llama2_70b`,
    while LoRA default params are based on
    https://github.com/tloen/alpaca-lora/blob/8bb8579e403dc78e37fe81ffbb253c413007323f/finetune.py#L41-L43.

    Args:
        lora_attn_modules (List[LORA_ATTN_MODULES]): list of which linear layers
            LoRA should be applied to in each self-attention block. Options are
            ``{"q_proj", "k_proj", "v_proj", "output_proj"}``.
        apply_lora_to_mlp (bool): whether to apply LoRA to the MLP in each transformer layer.
            Default: False
        apply_lora_to_output (bool): whether to apply LoRA to the model's final output projection.
            Default: False
        lora_rank (int): rank of each low-rank approximation
        lora_alpha (float): scaling factor for the low-rank approximation
        lora_dropout (float): LoRA dropout probability. Default: 0.0
        use_dora (bool): Decompose the LoRA weight into magnitude and direction, as
            introduced in "DoRA: Weight-Decomposed Low-Rank Adaptation" (https://arxiv.org/abs/2402.09353).
        quantize_base (bool): Whether to quantize base model weights

    Returns:
        TransformerDecoder: Instantiation of Llama2 70B model with LoRA applied
    r'   r(   r)   r   r   r   r;   r   r<   r   r%   r   r=   r   r   r6   r>   r   r   r   r   r*   r+   r,   r-   r.   Nr   r/   r0   r   r   r   lora_llama2_70b   r:   r@   z
Builder for creating a Llama2 70B model with QLoRA enabled. Base model weights in linear layers
that LoRA is applied to are quantized per the QLoRA paper: https://arxiv.org/abs/2305.14314.
Please see `lora_llama2_70b` for full API arguments.
c                   C   s   t dddddddddd	S )a:  
    Builder for creating a Llama2 model initialized w/ the default 7B parameter values
    from https://arxiv.org/abs/2307.09288, where the output layer is a classification layer
    projecting to a single class for reward modelling.

    Returns:
        TransformerDecoder: Instantiation of Llama2 7B model
       r   r   r   r   r   )	num_classesr   r   r   r   r   r   r   r   )r   r   r   r   r   llama2_reward_7b*  s   	rC   c                 C   sp   t di d| d|d|dddddd	d
d	dd	ddddddddd|d|d|d|d|S )a  
    Builder for creating a Llama2 7B reward model with LoRA enabled.

    The Llama2 classifier defaults are the same as in :func:`~torchtune.models.llama2.llama2_reward_7b`,
    while LoRA default params are based on
    https://github.com/tloen/alpaca-lora/blob/8bb8579e403dc78e37fe81ffbb253c413007323f/finetune.py#L41-L43.

    Args:
        lora_attn_modules (List[LORA_ATTN_MODULES]): list of which linear layers
            LoRA should be applied to in each self-attention block. Options are
            ``{"q_proj", "k_proj", "v_proj", "output_proj"}``.
        apply_lora_to_mlp (bool): whether to apply LoRA to the MLP in each transformer layer.
            Default: False
        apply_lora_to_output (bool): whether to apply LoRA to the model's final output projection.
            Default: False
        lora_rank (int): rank of each low-rank approximation
        lora_alpha (float): scaling factor for the low-rank approximation
        lora_dropout (float): LoRA dropout probability. Default: 0.0
        quantize_base (bool): Whether to quantize base model weights

    Returns:
        TransformerDecoder: Instantiation of Llama2 7B model with LoRA applied
    r'   r(   r)   rB   rA   r   r   r   r   r   r   r   r   r   r   r   r   r   r*   r+   r,   r-   r.   Nr   )r   r0   r   r   r   lora_llama2_reward_7b@  sF   !	
rD   z
Builder for creating a Llama2 reward 7b model with QLoRA enabled. Base model weights in linear layers
that LoRA is applied to are quantized per the QLoRA paper: https://arxiv.org/abs/2305.14314.
Please see `lora_llama2_reward_7b` for full API arguments.
)Nr    )FFr%   r&   r   FF)%typingr   r   	functoolsr   +torchtune.models.llama2._component_buildersr   r   r   r   torchtune.modulesr	   "torchtune.models.llama2._tokenizerr
   torchtune.modules.peftr    torchtune.data._prompt_templatesr   r   r   strintr$   boolfloatr1   qlora_llama2_7b__doc__r7   r9   qlora_llama2_13br?   r@   qlora_llama2_70brC   rD   qlora_llama2_reward_7br   r   r   r   <module>   s   $	
7	
8	
8	
6
