o
    }oi                     @   s   d dl mZ d dlmZ d dlZd dlZd dlm	Z	m
Z
 d dlmZ d dlmZ d dlmZ dZejjedd	ejej fd
dZ		ddeded	ejfddZejje
ed				ddee dededed	ejf
ddZdS )    )OptionalN)finetunepretrain)MockDataModule)SquadDataModule)
llama3_70bllama3_70b_16k)namereturnc                  C   s   t  } d| j_| S )a  
    Factory function to create a Llama3 70B model configuration with 16k sequence length.

    Returns:
        run.Config[pl.LightningModule]: Configuration for the Llama3 70B model with 16k sequence length.

    Examples:
        CLI usage:
            $ nemo llm pretrain model=llama3_70b_16k ...

        Python API usage:
            >>> model_config = model()
            >>> print(model_config)
     @  )r   modelconfig
seq_length)model_config r   _/home/ubuntu/.local/lib/python3.10/site-packages/nemo/collections/llm/recipes/llama3_70b_16k.pyr      s   r         	num_nodesnum_gpus_per_nodec              
   C   s   t jddtjddd| |dS )aD  
    Configure the NeMo Lightning Trainer for Llama3 70B model with 16k sequence length.

    This function sets up the distributed training strategy optimized for the large 70B model with longer sequences.

    Args:
        num_nodes (int, optional): Number of compute nodes to use. Defaults to 4.
        num_gpus_per_node (int, optional): Number of GPUs per node. Defaults to 8.

    Returns:
        run.Config: Configuration for the NeMo Lightning Trainer.

    Examples:
        CLI usage:
            $ nemo llm pretrain trainer=llama3_70b_16k ...

        Python API usage:
            >>> trainer_config = trainer(num_nodes=4, num_gpus_per_node=8)
            >>> print(trainer_config)

    Note:
        This configuration uses extensive parallelism to handle the large model size and longer sequence length efficiently.
    r      NT)tensor_parallelismpipeline_parallelismpipeline_parallelism_typevirtual_pipeline_parallelismcontext_parallelismsequence_parallelismr   r   )r   trainertorchbfloat16r   r   r   r   r   r   3   s   r   )targetr	   defaultdirr	   c                 C   s@   t j|| ||d}t |_t||d|_tjtdddd|_|S )aK  
    Create a pre-training recipe for Llama3 70B model with 16k sequence length.

    This function sets up a complete configuration for pre-training, including
    model, trainer, and data settings optimized for 16k sequence length.

    Args:
        dir (Optional[str]): Directory for saving logs and checkpoints.
        name (str): Name of the pre-training run.
        num_nodes (int, optional): Number of compute nodes to use. Defaults to 4.
        num_gpus_per_node (int, optional): Number of GPUs per node. Defaults to 8.

    Returns:
        run.Partial: Partial configuration for pre-training.

    Examples:
        CLI usage:
            $ nemo llm pretrain --factory llama3_70b_16k
            $ nemo llm pretrain --factory "llama3_70b_16k(num_nodes=4, name='my_70b_16k_pretrain')"

        Python API usage:
            >>> recipe = pretrain_recipe(name="llama3_70b_16k_pretrain", num_nodes=4)
            >>> print(recipe)

    Note:
        This recipe is optimized for the large 70B model with longer sequences (16k).
        It requires significant computational resources.
    )r	   r#   r   r   r    r   i      )r   global_batch_sizemicro_batch_size)r   pretrain_reciper   r   runConfigr   data)r#   r	   r   r   reciper   r   r   r'   Z   s
   #r'   )r   r   )Nr"   r   r   )typingr   lightning.pytorchpytorchplnemo_runr(   r   nemo.collections.llm.apir   r   "nemo.collections.llm.gpt.data.mockr   #nemo.collections.llm.gpt.data.squadr   nemo.collections.llm.recipesr   NAMEclifactoryr)   LightningModuler   intr   strPartialr'   r   r   r   r   <module>   sH   
'