o
    }oi#                     @   s  d dl mZmZ d dlmZ d dlZd dlZd dl	m
Z
 d dlmZ d dlmZ d dlmZ d dlmZ d dlmZ d d	lmZmZmZ d d
lmZ d dlmZ d dlmZ d dl m!Z! dZ"ej#j$e"ddej%ej& fddZ'ddej(ddddddddfde)de)deej* dee) de)de+d e)d!e)d"e)d#e)d$ee,ej%e
   dej%ej- fd%d&Z.ej#j$ee"d'dd(ddd)efd*ee/ d+e/d!e)d"e)d,e+d-edej0fd.d/Z1d0ej0dej0fd1d2Z2dS )3    )CallableOptionalN)Callback)DistributedDataParallelConfig)	lightning)pretrain)MockDataModule)mixtral_8x22b)default_logdefault_resumetensorboard_logger),distributed_fused_adam_with_cosine_annealing)MegatronCommOverlapCallback)MegatronTokenDropCallback)TimingCallbackmixtral_8x22b_64knamereturnc                  C   s   t  } d| j_| S )an  
    Factory function to create a Mixtral 8x22B model configuration.

    Returns:
        run.Config[pl.LightningModule]: Configuration for the Mixtral 8x22B model.

    Examples:
        CLI usage:
            $ nemo llm pretrain model=mixtral_8x22b_64k ...

        Python API usage:
            >>> model_config = model()
            >>> print(model_config)
       )r	   modelconfig
seq_length)model_config r   b/home/ubuntu/.local/lib/python3.10/site-packages/nemo/collections/llm/recipes/mixtral_8x22b_64k.pyr   %   s   r            T      i{ tensor_parallelismpipeline_parallelismpipeline_parallelism_typevirtual_pipeline_parallelismcontext_parallelismsequence_parallelismexpert_parallelism	num_nodesnum_gpus_per_node	max_steps	callbacksc                 C   sn   t jtj| ||||||dddt jtddddddd}t jtjdd|
|ddd|	|t jtjd	d
|ddd}|S )a  
    Configure the NeMo Lightning Trainer for Mixtral 8x22B model.

    This function sets up the distributed training strategy optimized for the large Mixtral 8x22B model.

    Args:
        tensor_parallelism (int): Degree of tensor model parallelism.
        pipeline_parallelism (int): Degree of pipeline model parallelism.
        pipeline_parallelism_type (Optional[torch.dtype]): Data type for pipeline parallelism.
        virtual_pipeline_parallelism (Optional[int]): Size of virtual pipeline parallelism.
        context_parallelism (int): Degree of context parallelism.
        sequence_parallelism (bool): Whether to use sequence parallelism.
        expert_parallelism (int): Degree of expert parallelism.
        num_nodes (int): Number of compute nodes to use.
        num_gpus_per_node (int): Number of GPUs per node.
        max_steps (int): Maximum number of training steps.
        callbacks (Optional[list[run.Config[Callback]]]): List of callback configurations.

    Returns:
        run.Config[nl.Trainer]: Configuration for the NeMo Lightning Trainer.

    Examples:
        CLI usage:
            $ nemo llm pretrain trainer=mixtral_8x22b_64k ...

        Python API usage:
            >>> trainer_config = trainer(num_nodes=16, num_gpus_per_node=8)
            >>> print(trainer_config)

    Note:
        This configuration uses extensive parallelism to handle the large model size efficiently.
    T)check_for_nan_in_gradgrad_reduce_in_fp32overlap_grad_reduceoverlap_param_gatheraverage_in_collective)tensor_model_parallel_sizepipeline_model_parallel_sizepipeline_dtype$virtual_pipeline_model_parallel_sizecontext_parallel_sizesequence_parallelexpert_model_parallel_sizegradient_as_bucket_viewckpt_async_saveckpt_parallel_loadddpgpu   2       
   z
bf16-mixed)	precisionFi  )acceleratoraccumulate_grad_batchesr+   deviceslimit_test_batcheslimit_val_batcheslog_every_n_stepsr*   r(   pluginsstrategyuse_distributed_samplerval_check_interval)runConfignlMegatronStrategyr   TrainerMegatronMixedPrecision)r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   rI   trainerr   r   r   rR   :   sL   -rR   )targetr   defaultFdirr   performance_modefnc                 C   sb   t j|t t||t tgdt jtddddt| |t|ddt	dd	t
 d
}|r/t|}|S )a  
    Create a pre-training recipe for Mixtral 8x22B model.

    This function sets up a complete configuration for pre-training, including
    model, trainer, data, logging, optimization, and resumption settings.

    Args:
        dir (Optional[str]): Directory for saving logs and checkpoints.
        name (str): Name of the pre-training run.
        num_nodes (int): Number of compute nodes to use.
        num_gpus_per_node (int): Number of GPUs per node.
        performance_mode (bool): If true, enables optimizations for maximum performance.
        fn (Callable): The pre-training function to use.

    Returns:
        run.Partial: Partial configuration for pre-training.

    Examples:
        CLI usage:
            $ nemo llm pretrain --factory mixtral_8x22b_64k
            $ nemo llm pretrain --factory "mixtral_8x22b_64k(num_nodes=16, name='my_mixtral_pretrain')"

        Python API usage:
            >>> recipe = pretrain_recipe(name="mixtral_pretrain", num_nodes=16)
            >>> print(recipe)
    )r(   r)   r+   r   i   r=   )r   global_batch_sizemicro_batch_sizer   )rU   r   r   ga2U0*3?)max_lr)r   rR   datalogoptimresume)rL   Partialr   rR   rM   r   r   r
   r   r   r   "pretrain_performance_optimizations)rU   r   r(   r)   rV   rW   reciper   r   r   pretrain_recipe   s   #rb   ra   c                 C   sP   | j jtttjtdddg d| j j_d| j j_	d| j j_
d| jj_| S )a  
    Create a performance-optimized pre-training recipe for Mixtral 8x22B model.

    This method enables performance optimizations that may not be suitable for all use cases.
    It builds upon the standard pre-training recipe and adds additional performance enhancements.

    Args:
        recipe (run.Partial): Base pre-train recipe to which performance optimizations will be added

    Returns:
        run.Partial: Partial configuration for performance-optimized pre-training.

    Note:
        Use this method with caution and only when you need maximum performance.
        It may not be suitable for all hardware configurations or use cases.
    FT)(overlap_param_gather_with_optimizer_stepalign_param_gatherr=   r   )rR   r+   extendrL   rM   r   r   rI   r7   r1   r6   r]   r   use_precision_aware_optimizer)ra   r   r   r   r`      s    



r`   )3typingr   r   lightning.pytorchpytorchplnemo_runrL   torch$lightning.pytorch.callbacks.callbackr   megatron.core.distributedr   nemor   rN   nemo.collections.llm.apir   "nemo.collections.llm.gpt.data.mockr   nemo.collections.llm.recipesr	   (nemo.collections.llm.recipes.log.defaultr
   r   r   'nemo.collections.llm.recipes.optim.adamr   6nemo.lightning.pytorch.callbacks.megatron_comm_overlapr   /nemo.lightning.pytorch.callbacks.moe_token_dropr   nemo.utils.exp_managerr   NAMEclifactoryrM   LightningModuler   bfloat16intdtypeboollistrP   rR   strr_   rb   r`   r   r   r   r   <module>   s   	


W4