o
    wi,                  *   @   s  d dl mZ d dlmZ d dlZd dlZd dlm	Z	m
Z
 d dlmZ d dlmZ d dlmZ d dlmZmZmZ d dlmZmZ d d	lmZ d d
lmZ d dlmZ d dlmZ dZ ej!j"e ddej#ej$ fddZ%ej!j"e
e dddddej&dddddddddddddd dd!d"d d#d$d%e
fd&ee' d'e'd(e(d)e(d*eej) d+ee( d,e(d-e*d.e(d/e(d0e(d1e'd2e(d3e+d4e(d5e(d6e(d7e(d8e*dej,f(d9d:Z-d;ej,dej,fd<d=Z.ej!j"e	e d			>		?	%dDd&ee' d'e'd.e(d/e(d@ee' dAe*dej,fdBdCZ/dS )E    )OptionalN)finetunepretrain)MockDataModule)PEFT_STR2CLS)default_finetune_recipe)default_logdefault_resumetensorboard_logger)nemotron_modelnemotron_trainer),distributed_fused_adam_with_cosine_annealing)GarbageCollectionCallback)MegatronCommOverlapCallback)TimingCallbacknemotron4_340bnamereturnc                   C   s
   t tdS )am  
    Factory function to create a Nemotron4 340B model configuration.

    Returns:
        run.Config[pl.LightningModule]: Configuration for the Nemotron4 340B model.

    Examples:
        CLI usage:
            $ nemo llm pretrain model=nemotron4_340b ...

        Python API usage:
            >>> model_config = model()
            >>> print(model_config)
    )version)r   NAME r   r   h/home/ubuntu/sommelier/.venv/lib/python3.10/site-packages/nemo/collections/llm/recipes/nemotron4_340b.pymodel#   s   
r   )targetr   default         Ti   i z
bf16-mixed   g      ?    
   i  i 	  i   i  gh㈵>-C6?Fdirr   tensor_parallelismpipeline_parallelismpipeline_parallelism_typevirtual_pipeline_parallelismcontext_parallelismsequence_parallelism	num_nodesnum_gpus_per_node	max_steps	precisionaccumulate_grad_batchesgradient_clip_vallimit_test_batcheslimit_val_batcheslog_every_n_stepsval_check_intervalperformance_modec                 C   s   t j|t tdi d|d|d|d|d|d|d|d|	d	|
d
|d|d|d|d|d|dt tgt jt|||dt| |t|ddt	||||||dt
 d}|rbt|}|S )a	  
    Create a pre-training recipe for Nemotron4 340B model.

    This function sets up a complete configuration for pre-training, including
    model, trainer, data, logging, optimization, and resumption settings.

    Args:
        dir (Optional[str]): Directory for saving logs and checkpoints.
        name (str): Name of the pre-training run.
        tensor_parallelism (int): Degree of tensor model parallelism.
        pipeline_parallelism (int): Degree of pipeline model parallelism.
        pipeline_parallelism_type (Optional[torch.dtype]): Data type for pipeline parallelism.
        virtual_pipeline_parallelism (Optional[int]): Size of virtual pipeline parallelism.
        context_parallelism (int): Degree of context parallelism.
        sequence_parallelism (bool): Whether to use sequence parallelism.
        num_nodes (int): Number of compute nodes to use.
        num_gpus_per_node (int): Number of GPUs per node.
        max_steps (int): Maximum number of training steps.
        precision (str): Precision configuration, one of fp32, 16-mixed or bf16-mixed.
        accumulate_grad_batches (int): Number of steps per gradient accumulation.
        gradient_clip_val (float): Value for gradient clipping.
        limit_test_batches (int): Limit the number of test batches.
        limit_val_batches (int): Limit the number of validation batches.
        log_every_n_steps (int): Log every n steps.
        val_check_interval (int): Run validation every N steps.
        global_batch_size (int): Global batch size.
        micro_batch_size (int): Micro batch size.
        seq_length (int): Sequence length.
        warmup_steps (int): Number of warmup steps.
        constant_steps (int): Number of constant steps.
        min_lr (float): Minimum learning rate.
        max_lr (float): Maximum learning rate.
        performance_mode (bool): If true, enables optimizations for maximum performance.
        fn (Callable): The pre-training function to use.

    Returns:
        run.Partial: Partial configuration for pre-training.

    Examples:
        CLI usage:
            $ nemo llm pretrain --factory nemotron4_340b
            $ nemo llm pretrain --factory "nemotron4_340b(num_nodes=1, name='my_nemotron_pretrain')"

        Python API usage:
            >>> recipe = pretrain_recipe(name="nemotron_pretrain", num_nodes=1)
            >>> print(recipe)

    Note:
        This recipe uses a mock dataset, look for the finetune examples to see how to change the dataset.
    r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r0   r1   r2   r3   	callbacks)
seq_lengthglobal_batch_sizemicro_batch_sizer   )r#   r   r
   )r-   warmup_stepsconstant_stepsmin_lrmax_lr	clip_grad)r   trainerdatalogoptimresumeNr   )runPartialr   r   Configr   r   r   r
   r   r	   "pretrain_performance_optimizations)r#   r   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r7   r8   r6   r9   r:   r;   r<   r4   fnreciper   r   r   pretrain_recipe7   sp   U	
'rI   rH   c                 C   s^   | j jsg | j _tjtddd}tjtddddd}| j j||g d| j j_d| j	j
_| S )a  
    Create a performance-optimized pre-training recipe for Nemotron4 340B model.

    This method enables performance optimizations that may not be suitable for all use cases.
    It builds upon the standard pre-training recipe and adds additional performance enhancements.

    Args:
        recipe (run.Partial): Base pre-train recipe to which performance optimizations will be added

    Returns:
        run.Partial: Partial configuration for performance-optimized pre-training.

    Note:
        Use this method with caution and only when you need maximum performance.
        It may not be suitable for all hardware configurations or use cases.
    d   )gc_interval_traingc_interval_valT   F)tp_comm_overlapdefer_embedding_wgrad_computewgrad_deferral_limit(overlap_param_gather_with_optimizer_step)r>   r5   rC   rE   r   r   extendpluginsgrad_reduce_in_fp32rA   configuse_precision_aware_optimizer)rH   garbage_collection_callbackmcomm_overlap_callbackr   r   r   rF      s,   

rF      lorapeft_schemepacked_sequencec                 C   s   t t d| ||||}|du s| dkr+|dksJ d|jj_d|jj_d|jj_	n'| dv rKt
t|  |_d|jj_d|jj_d	|jj_	ntd
| d|jj_|S )a  
    Create a fine-tuning recipe for Nemotron4 340B model.

    This function sets up a complete configuration for fine-tuning, including
    model, trainer, data, logging, optimization, and resumption settings.
    The recipe uses LoRA (Low-Rank Adaptation) for efficient fine-tuning, unless peft_scheme is set to None.

    Args:
        dir (Optional[str]): Directory for saving logs and checkpoints.
        name (str): Name of the fine-tuning run.
        num_nodes (int): Number of compute nodes to use.
        num_gpus_per_node (int): Number of GPUs per node.
        peft_scheme (Optional[str]): Name of the peft scheme to use for fine-tuning.
            Allowed values: 'lora'/'dora'/'none'/None.
        packed_sequence (Optional[bool]): Packing multiple training sequences into one long sequence for training
            efficiency. Default sequence length is 2048.

    Returns:
        run.Partial: Partial configuration for fine-tuning.

    Examples:
        CLI usage:
            $ nemo llm finetune --factory nemotron3_22b

        Python API usage:
            >>> recipe = finetune_recipe(name="nemotron4_340b_finetune", num_nodes=2)
            >>> print(recipe)

    Note:
        This recipe uses the SQuAD dataset for fine-tuning.
    zmgoin/Nemotron-4-340B-Base-hfNnoner   r   gh㈵>)rZ   dorarY   r"   zUnrecognized peft scheme: F)r   r   lowerr>   strategytensor_model_parallel_sizepipeline_model_parallel_sizerA   rU   lrrC   rE   r   peft
ValueErrorcross_entropy_loss_fusion)r#   r   r*   r+   r[   r\   rH   r   r   r   finetune_recipe   s    )




rg   )Nr   rY   r   rZ   F)0typingr   lightning.pytorchpytorchplnemo_runrC   torchnemo.collections.llm.apir   r   "nemo.collections.llm.gpt.data.mockr   nemo.collections.llm.peftr   -nemo.collections.llm.recipes.finetune_defaultr   (nemo.collections.llm.recipes.log.defaultr   r	   r
   %nemo.collections.llm.recipes.nemotronr   r   'nemo.collections.llm.recipes.optim.adamr   3nemo.lightning.pytorch.callbacks.garbage_collectionr   6nemo.lightning.pytorch.callbacks.megatron_comm_overlapr   nemo.utils.exp_managerr   r   clifactoryrE   LightningModuler   bfloat16strintdtypeboolfloatrD   rI   rF   rg   r   r   r   r   <module>   s   	
! /