o
    oi                    @   s  d Z ddlZddlZddlZddlZddlZddlZddlZddlZddl	Z	ddl
Z
ddlmZ ddlmZ ddlmZ ddlmZmZ ddlmZ ddlZddlZddlmZ dd	lmZ dd
lmZ ddlmZ  ddl!m"Z"m#Z#m$Z$ ddl%m&Z& ddl'Z(ddl)m*Z*m+Z+ ddl,m-Z-m.Z. ddl/m0Z0 ddl1m2Z2 ddl3m4Z4 ddl5m6Z6 e(j7j89  e2e:Z;ej<=ej<>e?Z@ej<Ae@dZ@dZBdZCdZDi dddddddddddd d!dd"d#d$dd%dd&dd'dd(dd)d*d+dd,dd-d.i d/d.d0dd1dd2d3d4dd5dd6dd7dd8dd9d:d;dd<dd=d>d?d@dAdBddCddDdEdFdEZEeG dGdH dHZFdi e@dIfdJdKZGdLdM ZHdVdNdOZIdPdQ ZJG dRdS dSeZKe(j7jLjMG dTdU dUZNdS )WzCore SpeechBrain code for running experiments.

Authors
 * Peter Plantinga 2020, 2023
 * Abdel Heba 2020
 * Mirco Ravanelli 2020
 * Aku Rouhe 2021
 * Andreas Nautsch 2022
 * Sylvain de Langen 2023
 * Adel Moumen 2023, 2024
    N)contextmanager)	dataclass)date)Enumauto)SimpleNamespace)resolve_references)DataParallel)SyncBatchNorm)DistributedDataParallel)
DataLoaderDistributedSamplerIterableDataset)tqdm)LoopedLoaderSaveableDataLoader)DistributedSamplerWrapperReproducibleRandomSampler)is_distributed_initialized)
get_logger)rm_vector_weight_decay)prepare_profilerzlog-config.yamlbrain_intra_epoch_ckpt      	test_onlyFdebugdebug_batches   debug_epochsdebug_persistentlydevicecpudata_parallel_backenddistributed_backendncclfind_unused_parametersjitjit_module_keyscompilecompile_module_keyscompile_modezreduce-overheadcompile_using_fullgraph#compile_using_dynamic_shape_tracing	precisionfp32eval_precisionauto_mix_precbfloat16_mix_precmax_grad_normg      @skip_nonfinite_gradsnonfinite_patiencenoprogressbarckpt_interval_minutesckpt_interval_stepsgrad_accumulation_factor   optimizer_step_limittqdm_colored_bartqdm_barcolorGREENMAGENTACYAN)trainvalidtestremove_vector_weight_decayprofile_trainingprofile_warmup   profile_stepsc                   @   s(   e Zd ZU dZejed< edd ZdS )	AMPConfigzConfiguration for automatic mixed precision (AMP).

    Arguments
    ---------
    dtype : torch.dtype
        The dtype to use for AMP.
    dtypec                 C   sN   |du s|dkrt tjS |dkrt tjS |dkrt tjS td| d)a8  Create an AMPConfig from a string name.

        Arguments
        ---------
        name : str
            The name of the AMPConfig to create.  Must be one of `fp32`,
            `fp16`, or `bf16`.

        Returns
        -------
        AMPConfig
            The AMPConfig corresponding to the name.
        Nr/   fp16bf16zSpecified autocast mode (z4) incorrect, expected one of `fp32`, `fp16`, `bf16`.)rI   torchfloat32float16bfloat16
ValueError)selfname rT   N/home/ubuntu/SoloSpeech/.venv/lib/python3.10/site-packages/speechbrain/core.py	from_namem   s   



zAMPConfig.from_nameN)	__name__
__module____qualname____doc__rM   rJ   __annotations__classmethodrV   rT   rT   rT   rU   rI   a   s
   
 
rI   Tc                 C   s
  zt jj rtj| st|  |durmtj| d}t	|}t
||}W d   n1 s1w   Y  t	|d*}tdt  |d tdtj| |d td|d t|| W d   n1 shw   Y  tt j}	|	durtj|	j}
t|
|  tj| d}d	d
d|iii}t jj|| tt_t jj  td td|   |rt jj  }t	tj| dd}|!| W d   n#1 sw   Y  W t jj"  dS W t jj"  dS W t jj"  dS W t jj"  dS t jj"  w )a*  Create the output folder and relevant experimental files.

    Arguments
    ---------
    experiment_directory : str
        The place where the experiment directory should be created.
    hyperparams_to_save : str
        A filename of a yaml file representing the parameters for this
        experiment. If passed, references are resolved, and the result is
        written to a file in the experiment directory called "hyperparams.yaml".
    overrides : dict
        A mapping of replacements made in the yaml file, to save in yaml.
    log_config : str
        A yaml filename containing configuration options for the logger.
    save_env_desc : bool
        If True, an environment state description is saved to the experiment
        directory, in a file called env.log in the experiment directory.
    Nzhyperparams.yamlwz# Generated %s from:)filez# %sz# yamllint disablezlog.txthandlersfile_handlerfilenamezBeginning experiment!zExperiment folder: zenv.log)#sbutilsdistributedif_main_processospathisdirmakedirsjoinopenr   printr   todayabspathshutilcopyfileobjinspect	getmodulecurrentframef_backrealpath__file__copyloggersetup_logging_logging_excepthooksys
excepthookquirkslog_applied_quirksinfoget_environment_descriptionwriteddp_barrier)experiment_directoryhyperparams_to_save	overrides
log_configsave_env_deschyperparams_filenamefresolved_yamlr]   modulecallingfilelog_filelogger_overridesdescription_strforT   rT   rU   create_experiment_directory   sX   


1r   c                 C   s   t jd| ||fd dS )z-Interrupt exception raising to log the error.z
Exception:)exc_infoN)rx   error)exc_type	exc_valueexc_tracebackrT   rT   rU   rz      s   rz   c                 C   s*  | du rt jdd } tjdd}|jdtdd |jdd	d
dd |jdd	d
dd |jdtddd |jdtddd |jdd	d
dd |jdtdd |jdtddd |jdd	d
dd |jdtdd d |jd!d	d
d"d |jd#d	d
d$d |jd%td&d'd( |jd)d	d
d*d |jd+td&d,d( |jd-td&d.d( |jd/td&d0d( |jd1td&d2d( |jd3td4d |jd5td6d |jd7dd
d8d |jd9dd
d:d |jd;td<d |jd=d	d
d>d |jd?td@d |jdAdd
dBd |jdCtdDd |jdEtdFd |jdGtdHd |jdItdJd |jdKd	d
dLd |jdMd	d
dNd |jdOd	d
dPd |jdQdRtdSdT |jdUdRtdVdT |	| \}}dWdX t
| D }|d }|d= t|}|dY rptj dZkrptd[tjd\}|durd]|d^ v r|d^ dd_ t| |d^< |||fS )`a  Parse command-line arguments to the experiment.

    Arguments
    ---------
    arg_list : list, None
        A list of arguments to parse.  If not given, this is read from
        `sys.argv[1:]`

    Returns
    -------
    param_file : str
        The location of the parameters file.
    run_opts : dict
        Run options, such as distributed, device, etc.
    overrides : dict
        The overrides to pass to ``load_hyperpyyaml``.

    Example
    -------
    >>> argv = ['hyperparams.yaml', '--device', 'cuda:1', '--seed', '10']
    >>> filename, run_opts, overrides = parse_arguments(argv)
    >>> filename
    'hyperparams.yaml'
    >>> run_opts["device"]
    'cuda:1'
    >>> overrides
    'seed: 10'
    Nr:   zRun a SpeechBrain experiment)description
param_filezMA yaml-formatted file using the extended YAML syntax. defined by SpeechBrain.)typehelpz--test_onlyF
store_truezRun the experiment in evaluate only mode.It skips the training and goes directly to the evaluation.The model is expected to be already trained.)defaultactionr   z--debugzbRun the experiment with only a few batches for all datasets, to ensure code runs without crashing.z--debug_batchesr   z'Number of batches to run in debug mode.)r   r   r   z--debug_epochsz^Number of epochs to run in debug mode. If a non-positive number is passed, all epochs are run.z--debug_persistentlyz4Keep data stored during debug mode (not using /tmp).z--log_configz4A file storing the configuration options for loggingz--devicezcuda:0z3The device to run the experiment on (e.g. 'cuda:0')z--data_parallel_backendz.This flag enables training with data_parallel.z--distributed_backendr%   zOne of {nccl, gloo, mpi}z--find_unused_parametersz-This flag disable unused parameters detectionz--jitzEnables jit compilation for all modules. Compilation may fail depending on the modules. Use --jit_module_keys to compile a subset of modules.z--jit_module_keys*z.A list of keys in the 'modules' dict to jitify)r   nargsr   z	--compilezEnabling this flag compiles all modules using torch.compile (if available). Beta feature. Use --compile_module_keys to compile a subset of modules. Set the compilation flags below properly. Compilation can be time-consuming and might fail.z--compile_module_keyszA list of keys in the 'modules' dict to compile using TorchInductor. If a module also has a JIT key specified, TorchInductor will take precedence when available.z--compile_modez/One of {default, reduce-overhead, max-autotune}z--compile_using_fullgraphz6Whether it is ok to break model into several subgraphsz%--compile_using_dynamic_shape_tracingz)Use dynamic shape tracing for compilationz--precisionzeThis flag enables training with automatic mixed-precision.It can be set to `fp32`, `fp16`, or `bf16`.z--eval_precisionzfThis flag enables inference with automatic mixed-precision.It can be set to `fp32`, `fp16`, or `bf16`.z--auto_mix_precz:This flag enables training with automatic mixed-precision.z--bfloat16_mix_precz9This flag enables training with bfloat16 mixed-precision.z--max_grad_normzMGradient norm will be clipped to this value, enter negative value to disable.z--skip_nonfinite_gradsz=Set the gradients to None if they are nonfinite (inf or nan).z--nonfinite_patiencez=Max number of batches per epoch to skip if loss is nonfinite.z--noprogressbarz.This flag disables the data loop progressbars.z--ckpt_interval_minuteszyAmount of time between saving intra-epoch checkpoints in minutes. If non-positive, intra-epoch checkpoints are not saved.z--ckpt_interval_stepszlSave an intra-epoch checkpoint after this many steps.If non-positive, intra-epoch checkpoints are not saved.z--grad_accumulation_factorz?Number of batches to accumulate gradients before optimizer stepz--optimizer_step_limitzDNumber of optimizer steps to run. If not passed, all epochs are run.z--tqdm_colored_barzUEnable colored progress-bar in tqdm. If this is false, tqdm shall use default colors.z--remove_vector_weight_decayzUMake vectors (e.g. norms and biases) a separate parameter group without weight_decay.z--profile_trainingzIf set to True, a profiler will be initiated and tensorboard logs will be generated. Please ensure you have installed the torch.TensorBoard profiler with 'pip install torch_tb_profiler'.z--profile_warmuprG   z7Number of warmup steps before logging for the profiler.)r   r   r   z--profile_stepsz+Number of steps of logging for the profilerc                 S   s   i | ]\}}|d ur||qS NrT   ).0kvrT   rT   rU   
<dictcomp>  s    z#parse_arguments.<locals>.<dictcomp>r#   r   zYou must have at least 1 GPU.
LOCAL_RANKcudar!   )r{   argvargparseArgumentParseradd_argumentstrintboolfloatparse_known_argsvarsitems_convert_to_yamlrM   r   device_countrQ   rf   environget)arg_listparserrun_optsr   r   
local_rankrT   rT   rU   parse_arguments   s  		

r   c                 C   s^   d}d | }|d}|D ]}|dr$|d|tdd  d 7 }q|d| 7 }q| S )z"Convert args to yaml for overrides =z--
N: )rj   split
startswithlenstrip)r   yaml_stringjoined_args
split_argsargrT   rT   rU   r     s   


r   c                   @   s"   e Zd ZdZe Ze Ze ZdS )Stagez*Simple enum to track stage of experiments.N)rW   rX   rY   rZ   r   TRAINVALIDTESTrT   rT   rT   rU   r     s
    
r   c                   @   sj  e Zd ZdZ					dHddZdd Zdd Zd	d
 ZdIddZdIddZ		dJddZ
dd Zdd Zdd ZdKddZdLddZdd Zdd  Zd!d" Zd#d$ Zd%d& Zd'd( Zd)d* Ze d+d, Zd-d. Zd/d0 Zd1d2 Zddi i fd3d4Zed5d6 Zd7d8 Z d9d: Z!d;d< Z"dddi fd=d>Z#d?d@ Z$e%dMdBdCZ&e'j(j)j*dDdE Z+e'j(j)j,dFdG Z-dS )NBrainaT  Brain class abstracts away the details of data loops.

    The primary purpose of the `Brain` class is the implementation of
    the ``fit()`` method, which iterates epochs and datasets for the
    purpose of "fitting" a set of modules to a set of data.

    In order to use the ``fit()`` method, one should sub-class the ``Brain``
    class and override any methods for which the default behavior does not
    match the use case. For a simple use case (e.g., training a single model
    with a single dataset) the only methods that need to be overridden are:

    * ``compute_forward()``
    * ``compute_objectives()``

    The example below illustrates how overriding these two methods is done.

    For more complicated use cases, such as multiple modules that need to
    be updated, the following methods can be overridden:

    * ``fit_batch()``
    * ``evaluate_batch()``

    Arguments
    ---------
    modules : dict of str:torch.nn.Module pairs
        These modules are passed to the optimizer by default if they have
        trainable parameters, and will have ``train()``/``eval()`` called on them.
    opt_class : torch.optim class
        A torch optimizer constructor that takes only the list of
        parameters (e.g. a lambda or partial function definition). By default,
        this will be passed all modules in ``modules`` at the
        beginning of the ``fit()`` method. This behavior can be changed
        by overriding the ``configure_optimizers()`` method.
    hparams : dict
        Each key:value pair should consist of a string key and a hyperparameter
        that is used within the overridden methods. These will
        be accessible via an ``hparams`` attribute, using "dot" notation:
        e.g., self.hparams.model(x).
    run_opts : dict
        A set of options to change the runtime environment, including

        debug (bool)
            If ``True``, this will only iterate a few batches for all
            datasets, to ensure code runs without crashing.
        debug_batches (int)
            Number of batches to run in debug mode, Default ``2``.
        debug_epochs (int)
            Number of epochs to run in debug mode, Default ``2``.
            If a non-positive number is passed, all epochs are run.
        debug_persistently (bool)
            Keep data stored during debug mode (not using /tmp), Default ``False``.
        jit (bool)
            Enable to compile all modules using jit, Default ``False``.
        jit_module_keys (list of str)
            List of keys in ``modules`` that should be jit compiled.
        compile (bool)
            Enable to compile all modules using torch.compile, Default ``False``.
        compile_module_keys (list of str)
            List of keys in ``modules`` that should be compiled using
            ``torch.compile``. If ``torch.compile`` is unavailable,
            an error is raised.
        compile_mode (str)
            One of ``default``, ``reduce-overhead``, ``max-autotune``, Default ``reduce-overhead``.
        compile_using_fullgraph (bool)
            Whether it is ok to break model into several subgraphs, Default ``False``.
        compile_using_dynamic_shape_tracing (bool)
            Use dynamic shape tracing for compilation, Default ``False``.
        distributed_backend (str)
            One of ``nccl``, ``gloo``, ``mpi``.
        device (str)
            The location for performing computations.
        precision (str)
            One of ``fp32``, ``fp16``, ``bf16``.
        eval_precision (str)
            One of ``fp32``, ``fp16``, ``bf16``.
        auto_mix_prec (bool)
            If ``True``, automatic mixed-precision (fp16) is used.
            Activate it only with cuda. Note: this is a
            deprecated feature, and will be removed in the future.
        bfloat16_mix_prec (bool)
            If ``True``, automatic mixed-precision (bf16) is used.
            Activate it only with cuda. Note: this is a
            deprecated feature, and will be removed in the future.
        max_grad_norm (float)
            Default implementation of ``fit_batch()`` uses
            ``clip_grad_norm_`` with this value. Default: ``5``.
        skip_nonfinite_grads (bool)
            If ``True``, sets gradients to zero if they are non-finite
            (e.g., NaN, Inf). Default: ``False``.
        nonfinite_patience (int)
            Number of times to ignore non-finite losses before stopping.
            Default: ``3``.
        noprogressbar (bool)
            Whether to turn off progressbar when training. Default: ``False``.
        ckpt_interval_minutes (float)
            Amount of time between saving intra-epoch checkpoints,
            in minutes, default: ``15.0``. If non-positive, these are not saved.
        ckpt_interval_steps (int)
            Number of steps between saving intra-epoch checkpoints.
            If non-positive, these are not saved. Default: ``0``.


        Typically in a script this comes from ``speechbrain.parse_args``, which
        has different defaults than Brain. If an option is not defined here
        (keep in mind that parse_args will inject some options by default),
        then the option is also searched for in hparams (by key).
    checkpointer : speechbrain.Checkpointer
        By default, this will be used to load checkpoints, and will have the
        optimizer added to continue training if interrupted.

    Example
    -------
    >>> from torch.optim import SGD
    >>> class SimpleBrain(Brain):
    ...     def compute_forward(self, batch, stage):
    ...         return self.modules.model(batch[0])
    ...     def compute_objectives(self, predictions, batch, stage):
    ...         return torch.nn.functional.l1_loss(predictions, batch[0])
    >>> model = torch.nn.Linear(in_features=10, out_features=10)
    >>> brain = SimpleBrain({"model": model}, opt_class=lambda x: SGD(x, 0.1))
    >>> brain.fit(range(1), ([torch.rand(10, 10), torch.rand(10, 10)],))
    Nc                 C   sX  d | _ || _|| _t D ]L\}}|d ur9||v r9|d ur0||v r0td| d t||   t| |||  q|d urS||v rStd| d  t| |||  qt| || qt	j
jtkrft	j
jtkstdtt	j
j d tt	j
j d tt d tt  tjdd uotjdd u| _| jr| jrtd	| jd
kr| jd
krt	d | jdkrtjd
 nd| jv rtjt| jd  tj| | j| _!| j!D ]}t"| j!| dr| j!|  | j| j!|< q|d urt#d(i || _$| j%r*| j&s*| jd ur*t"| jdr*t'( }	td|	j)  t*+|	j)| j_,|	| j_-d | _.| j/r9td d| _0| j1rEtd d| _0| jdkr[| j0dksW| j2dkr[td| j0dkoed| jv }
| j3rr|
rrtd td|
 d| j0 d tjj4j5|
d| _6d| _7| jdkr| j0dkrd| _7nd| jv r| j0dv rd| _7| j7r| jd ur| jj8d| j6dd | 9  | jrttjd | _:t; s| j:d
krtd td! td" d#| _<d
| _=d
| _>| jd ur| j8d$|  | j?st@A| jBd%| _Bd | _C| jDr*td& | jE| jF d' | _GtH| jF| jE| j$jI| _Cd S d S ))NzInfo: z* arg overridden by command line input to: z arg from hparam file is usedzDetected Python .z-. We suggest using SpeechBrain with Python >=RANKr   zTo use data_parallel backend, start your script with:
	python experiment.py hyperparams.yaml --data_parallel_backend=True
To use DDP backend, start your script with:
	torchrun [args] experiment.py hyperparams.yamlr   zThe options `ckpt_interval_minutes` and `ckpt_interval_steps` are mutually exclusive. Please keep only one active per experiment run.r   r   tocheckpoints_dirzRSince debug mode is active, switching checkpointer output to temporary directory: ztThe option `--auto_mix_prec` is deprecated and will be removed in the future. Please use `--precision=fp16` instead.rK   zxThe option `--bfloat16_mix_prec` is deprecated and will be removed in the future. Please use `--precision=bf16` instead.rL   r"   zThe option `--precision` or `--eval_precision` is set to fp16. This option is not yet supported on CPU. Please use `--precision=bf16` or `--eval_precision=bf16` instead to enable mixed precision on CPU.zThe option `skip_nonfinite_grads` will be ignored because GradScaler is enabled and will automatically skip nonfinite gradients.zGradscaler enabled: z. Using precision: )enabledFT)rK   rL   scaler)optional_loadz ================ WARNING ===============Please add sb.ddp_init_group() into your exp.pyTo use DDP backend, start your script with:
	torchrun [args] experiment.py hyperparams.yamlzMTo use DDP, please add sb.utils.distributed.ddp_init_group() into your exp.pyzAOnly the main process is alive, all other subprocess were killed.        brainr   z$Pytorch profiler has been activated.r:   rT   )Joptimizers_dict	opt_classcheckpointerrun_opt_defaultsr   rx   r   r   setattrr{   version_infomajorPYTHON_VERSION_MAJORminorPYTHON_VERSION_MINORwarningrf   r   r   distributed_launchr#   rQ   r7   r8   exitr!   rM   r   
set_devicer   nn
ModuleDictr   moduleshasattrr   hparamsr   r    tempfileTemporaryDirectoryrS   pathlibPathr   tempdirtrain_samplerr1   r.   r2   r0   r4   amp
GradScalerr   use_ampadd_recoverableprint_trainable_parametersrankr   avg_train_lossstepoptimizer_stepr<   dictfromkeysr=   profilerrE   rH   rF   tot_prof_stepsr   output_folder)rR   r   r   r   r   r   r   r   r   r   gradscaler_enabledrT   rT   rU   __init__  s  









zBrain.__init__c              
   C   s  d}d}| j  D ]}|| 7 }|jr|| 7 }q	| jj}|dkr<td t| d| d| dddd dS |dkr`td t	j
j|}t| d| d| ddd	d dS d
| | }t	j
j|}t	j
j|}t| d| d| d|d	d dS )z7Prints the number of trainable parameters in the model.r   zThe model has no parameters!z; Model Statistics:
* Total Number of Trainable Parameters: z
* Total Number of Parameters: z"
* Trainable Parameters represent z.2fz% of the total size.z&The model has no trainable parameters!z.4fd   N)r   
parametersnumelrequires_grad	__class__rW   rx   r   r   rb   rc   format_order_of_magnitude)rR   total_trainable_paramstotal_parameters	parameter
class_nameformatted_total_paramspercentage_trainableformatted_trainable_paramsrT   rT   rU   r   X  sf   


z Brain.print_trainable_parametersc                 C      t )a  Forward pass, to be overridden by sub-classes.

        Arguments
        ---------
        batch : torch.Tensor or tensors
            An element from the dataloader, including inputs for processing.
        stage : Stage
            The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST

        Returns
        -------
        torch.Tensor or torch.Tensors
            The outputs after all processing is complete.
            Directly passed to ``compute_objectives()``.
        NotImplementedError)rR   batchstagerT   rT   rU   compute_forward  s   zBrain.compute_forwardc                 C   r  )aA  Compute loss, to be overridden by sub-classes.

        Arguments
        ---------
        predictions : torch.Tensor or torch.Tensors
            The output tensor or tensors to evaluate.
            Comes directly from ``compute_forward()``.
        batch : torch.Tensor or tensors
            An element from the dataloader, including targets for comparison.
        stage : Stage
            The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST

        Returns
        -------
        loss : torch.Tensor
            A tensor with the computed loss.
        r  )rR   predictionsr  r  rT   rT   rU   compute_objectives  s   zBrain.compute_objectivesc                 C      dS )a/  Gets called when a stage starts.

        Useful for defining class variables used during the stage.

        Arguments
        ---------
        stage : Stage
            The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST
        epoch : int
            The current epoch count.
        NrT   )rR   r  epochrT   rT   rU   on_stage_start  s   zBrain.on_stage_startc                 C   r  )a  Gets called at the end of a stage.

        Useful for computing stage statistics, saving checkpoints, etc.

        Arguments
        ---------
        stage : Stage
            The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST
        stage_loss : float
            The average loss over the completed stage.
        epoch : int
            The current epoch count.
        NrT   )rR   r  
stage_lossr  rT   rT   rU   on_stage_end     zBrain.on_stage_enddataloader-c                 K   sp   |t jjkr| ||}t jjj|fi |}| jdur6|dur6t|t	s*t|t
r6||j }| j|| |S )aH  Creates DataLoaders for Datasets.

        This is used by ``fit()`` and ``evaluate()`` if they just receive
        Datasets.

        Alternatively, this can be called from outside the Brain subclass.
        In that case, the DataLoader should be passed to ``fit()`` in place
        of the dataset.

        The Stage.TRAIN DataLoader is handled specially. It has extra args for
        shuffle and drop_last. In DDP a DistributedSampler is created (unless
        the dataset is an IterableDataset).

        NOTE
        ----
        Some important DataLoader arguments are passed via **loader_kwargs,
        e.g., batch_size, num_workers, pin_memory.

        NOTE
        ----
        By default, ``evaluate()`` specifies ckpt_prefix=None to stop the test
        DataLoader being added to the checkpointer. If you need to add a
        recoverable after saving checkpoints (e.g., at test time, after
        checkpointing the training), and still be able to recover reasonably,
        you should probably specify ``allow_partial_load=True``.

        Arguments
        ---------
        dataset : Dataset
            A set of data to use to create data loader. If the Dataset is a
            DynamicItemDataset, PaddedBatch is used as the default collate_fn,
            unless specified in loader_kwargs.
        stage : Stage
            The stage of the experiment: Stage.TRAIN, Stage.VALID, Stage.TEST
        ckpt_prefix : str, None
            Prefix to use for SaveableDataLoader Checkpoint name. The Stage
            name is added to this to create the full key. Set to None to not
            save the DataLoader.
        **loader_kwargs : dict
            Additional keyword arguments to the DataLoader.
            E.g., batch_size, num_workers, pin_memory.

        Returns
        -------
        DataLoader for the input dataset
        N)rb   r   r   _train_loader_specificsdataio
dataloadermake_dataloaderr   
isinstancer   r   rS   r   )rR   datasetr  ckpt_prefixloader_kwargsr  ckpt_keyrT   rT   rU   r    s    2

zBrain.make_dataloaderc                 C   sL  | dd }| dd}|r1| js1|d urtdtj dd}t||d}|| _| j|d< |d= | jrt|tst	| j
drF| j
jd	k}nd
}| dd}|d urgt|| j||d| _d|d< | j|d< |S | dd u rt|| j||d| _d|d< | j|d< |S t| dd | j|d| _| j|d< |S | jrt|trtd |S )NsamplershuffleFz>Cannot specify both shuffle=Trueand a sampler in loader_kwargsSB_GLOBAL_SEEDi&l!)seedsortingrandomT	drop_last)r   r+  r&  batch_sampler)r   r&  r+  )r   r&  zDCannot automatically solve distributed sampling for IterableDataset.)r   r   rQ   rf   r   r   r   r   r   r   r   r)  r   r   r   rx   r   )rR   r!  r#  r%  r&  r(  shuffle_ddpr+  rT   rT   rU   r    sd   






zBrain._train_loader_specificsc                 C   s4   |    |   |   | jdur| j  dS dS )a  Gets called at the beginning of ``fit()``, on multiple processes
        if ``distributed_count > 0`` and backend is ddp.

        Default implementation compiles the jit modules, initializes
        optimizers, and loads the latest checkpoint to resume training.
        N)_compile_wrap_distributedinit_optimizersr   recover_if_possiblerR   rT   rT   rU   on_fit_start^  s   	
zBrain.on_fit_startc                 C   sb   | j  }| jdur-| jrt| j }| || _d| ji| _| jdur/| jd| j dS dS dS )a  Called during ``on_fit_start()``, initialize optimizers
        after parameters are fully configured (e.g. DDP, jit).

        The default implementation of this method depends on an optimizer
        class being passed at initialization that takes only a list
        of parameters (e.g., a lambda or a partial function definition).
        This creates a single optimizer that optimizes all trainable params.

        Override this class if there are multiple optimizers.
        Nr   	optimizer)	r   r  r   rD   r   r4  r   r   r   )rR   
all_paramsrT   rT   rU   r0  s  s   



zBrain.init_optimizersFc                 C   sP   | j dur| | j  D ]}|j|d qdS | jdur&| jj|d dS dS )a
  Sets the gradients of all optimized ``torch.Tensor``s to zero
        if ``set_to_none=False`` (default) or to None otherwise.

        Setting gradients to None should save the memory, e.g.
        during ``evaluate()`` and thus larger batch might be used.
        Nset_to_none)r   freeze_optimizersvalues	zero_gradr   r4  )rR   r7  optrT   rT   rU   r:    s   

zBrain.zero_gradc                 C   s"   | j dur| j j||d dS dS )a)  Gets called at the beginning of ``evaluate()``

        Default implementation loads the best-performing checkpoint for
        evaluation, based on stored metrics.

        Arguments
        ---------
        max_key : str
            Key to use for finding best checkpoint (higher is better).
            By default, passed to ``self.checkpointer.recover_if_possible()``.
        min_key : str
            Key to use for finding best checkpoint (lower is better).
            By default, passed to ``self.checkpointer.recover_if_possible()``.
        Nmax_keymin_key)r   r1  )rR   r=  r>  rT   rT   rU   on_evaluate_start  s
   

zBrain.on_evaluate_startc              	   C   s&  t | j}| j| j dk}| || | | ] | jrMtj	|j
t| jjd | |tjj}| ||tjj}W d   n1 sGw   Y  n| |tjj}| ||tjj}| j|| j }| | |  W d   n1 szw   Y  |r|   | |||| |  S )a]  Fit one batch, override to do multiple updates.

        The default implementation depends on a few methods being defined
        with a particular behavior:

        * ``compute_forward()``
        * ``compute_objectives()``
        * ``optimizers_step()``

        Also depends on having optimizers passed at initialization.

        Arguments
        ---------
        batch : list of torch.Tensors
            Batch of data to use for training. Default implementation assumes
            this batch has two elements: inputs and targets.

        Returns
        -------
        detached loss
        r   rJ   device_typeN)rI   rV   r.   r   r9   on_fit_batch_startno_syncr   rM   autocastrJ   r!   r   r  rb   r   r   r  r   scalecheck_loss_isfinitebackwardoptimizers_stepon_fit_batch_enddetachr"   )rR   r  r   should_stepoutputslossscaled_lossrT   rT   rU   	fit_batch  s4   


zBrain.fit_batchc                 C   s>   t |s|  jd7  _| j| jkrtdtd dS dS )a  Check if the loss is finite.

        If the loss is not finite, log a helpful message and increment the `nonfinite_count`.
        If the `nonfinite_count` exceeds the `--nonfinite_patience` threshold, stop the training
        and raise an error.

        This check is particularly useful when the loss becomes NaN or inf, while the
        parameters and gradients remain finite. It helps prevent getting stuck in an
        infinite loop during training.

        Arguments
        ---------
        loss : tensor
            The loss tensor after ``backward()`` has been called but
            before the optimizers ``step()``.
        r:   zLoss is not finite and patience is exhausted. To debug, wrap `fit()` with autograd's `detect_anomaly()`, e.g.

with torch.autograd.detect_anomaly():
	brain.fit(...)zPatience not yet exhausted.N)rM   isfinitenonfinite_countr5   rQ   rx   r   )rR   rM  rT   rT   rU   rF    s   
zBrain.check_loss_isfinitec                 C   sN   | j  D ]}|jr$|jdur$t|j s$d|_td|j	 d qdS )zXChecks if the gradients are finite. If not, it will emit a warning and set them to zero.Nz
Gradients z% contain NaN or Inf. Setting to None.)
r   r  r  gradrM   rP  allrx   r   rS   )rR   paramrT   rT   rU   check_gradients  s   zBrain.check_gradientsc                 C   s   |S )zBy default, this method returns the passed optimizers.
        Override this method if you want to freeze some optimizers
        during training. To do so, return a of active optimizers.
        rT   )rR   
optimizersrT   rT   rU   r8    s   zBrain.freeze_optimizersc                 C   s   | j dur| | j }n| jdurd| ji}ndS | D ]}| j| q| D ]}tjj	
|jd d | j q*| j sG| jrG|   | D ]}| j| qK| j  | D ]}|jdd q]|  jd7  _dS )z~Performs a step of gradient descent on the optimizers. This method is called every
        ``grad_accumulation_factor`` steps.Nr4  r   paramsTr6  r:   )r   r8  r   r4  r9  r   unscale_rM   r   rc   clip_grad_norm_param_groupsr3   
is_enabledr4   rU  r   updater:  r   )rR   valid_optimizersr;  rT   rT   rU   rH    s&   


zBrain.optimizers_stepc                 C   r  )a  Called at the beginning of ``fit_batch()``.

        This method is not called under the AMP context manager. Do not assume
        automatic casting of the input batch to a lower precision (e.g. fp16).

        Arguments
        ---------
        batch : list of torch.Tensors
            Batch of data to use for training. Default implementation assumes
            this batch has two elements: inputs and targets.
        should_step : boolean
            Whether optimizer.step() was called or not.
        NrT   )rR   r  rK  rT   rT   rU   rB  A  r  zBrain.on_fit_batch_startc                 C   r  )a	  Called after ``fit_batch()``.

        Arguments
        ---------
        batch : list of torch.Tensors
            Batch of data to use for training. Default implementation assumes
            this batch has two elements: inputs and targets.
        outputs : list or dictionary of torch.Tensors
            Returned value of compute_forward().
        loss : torch.Tensor
            Returned value of compute_objectives().
        should_step : boolean
            Whether optimizer.step() was called or not.
        NrT   )rR   r  rL  rM  rK  rT   rT   rU   rI  Q  s   zBrain.on_fit_batch_endc                 C   s   t | j}| jr6tj|jt| jjd | j	||d}| j
|||d}W d   n1 s0w   Y  n| j	||d}| j
|||d}|  S )a`  Evaluate one batch, override for different procedure than train.

        The default implementation depends on two methods being defined
        with a particular behavior:

        * ``compute_forward()``
        * ``compute_objectives()``

        Arguments
        ---------
        batch : list of torch.Tensors
            Batch of data to use for evaluation. Default implementation assumes
            this batch has two elements: inputs and targets.
        stage : Stage
            The stage of the experiment: Stage.VALID, Stage.TEST

        Returns
        -------
        detached loss
        r@  r  N)rI   rV   r0   r   rM   rD  rJ   r!   r   r  r  rJ  r"   )rR   r  r  r   outrM  rT   rT   rU   evaluate_batchb  s   zBrain.evaluate_batchc           	      C   s  |  tj| | j  |   d| _| jd ur$t| jdr$| j	| t

 }d}t|| jd| | jd dy}| jd urC| j  |D ]d}| jrQtd  nY|  jd7  _|d7 }| |}| || j| _|j| jd | jd ur| j  | jj| jkrtd	 | j  t  | jr| j| jkr n| ||r|   t

 }d}qEW d    n1 sw   Y  | jdd
 | tj| j| d| _d| _d S )Nr   	set_epochTrA   )initialdynamic_ncolsdisablecolourzTrain iteration limit exceededr:   )
train_lossz+The profiler finished, training is stopped.r6  r   ) r  r   r   r   rA   r:  rQ  r   r   ra  timer   r   r=   r   start_optimizer_step_limit_exceededrx   r   rO  update_averager   set_postfixstep_numr   stopquitr   r   _should_save_intra_epoch_ckpt_save_intra_epoch_ckptr  )	rR   	train_setr  enablelast_ckpt_timesteps_since_ckpttr  rM  rT   rT   rU   
_fit_train  sj   







+
zBrain._fit_trainc                 C   s   | j du rdS | jdkr| jdkrdS t | d }d| j  k o%|k n  }|p6d| j  k o4|kn  }t s<|S |g}tjj|dd |d S )zDetermines if an intra-epoch checkpoint should be saved.

        Returns True if there's a checkpointer and time or steps has exceeded limit.
        NFr   g      N@)src)r   r7   r8   rg  r   rM   rd   broadcast_object_list)rR   rs  rt  elapsed_minutesdecisionbroadcast_listrT   rT   rU   ro    s   
z#Brain._should_save_intra_epoch_ckptc                 C   s   |d urc|  tj| | j  d}t C t|d| | jd dD ]"}|  j	d7  _	| j
|tjd}| ||}| jrE| j	| jkrE nq#d| _	| tj|| W d    d S 1 s\w   Y  d S d S )Nr   TrB   rc  rd  re  r:   r^  r   )r  r   r   r   evalrM   no_gradr   r=   r   r`  rj  r   r   r  )rR   	valid_setr  rr  avg_valid_lossr  rM  rT   rT   rU   
_fit_valid  s*   


"zBrain._fit_validc           	      C   s   | j r
td dS t|ts!t|ts!| j|fdtjj	i|}|dur=t|ts=t|ts=| j|ftjj
dd|}|   |du rI| j }|oPtjj }|D ] }| j|||d | j|||d | jrm|| jksp| jrs dS qSdS )aE  Iterate epochs and datasets to improve objective.

        Relies on the existence of multiple functions that can (or should) be
        overridden. The following methods are used and expected to have a
        certain behavior:

        * ``fit_batch()``
        * ``evaluate_batch()``
        * ``update_average()``

        If the initialization was done with distributed_count > 0 and the
        distributed_backend is ddp, this will generally handle multiprocess
        logic, like splitting the training data into subsets for each device and
        only saving a checkpoint on the main process.

        Arguments
        ---------
        epoch_counter : iterable
            Each call should return an integer indicating the epoch count.
        train_set : Dataset, DataLoader
            A set of data to use for training. If a Dataset is given, a
            DataLoader is automatically created. If a DataLoader is given, it is
            used directly.
        valid_set : Dataset, DataLoader
            A set of data to use for validation. If a Dataset is given, a
            DataLoader is automatically created. If a DataLoader is given, it is
            used directly.
        progressbar : bool
            Whether to display the progress of each epoch in a progressbar.
        train_loader_kwargs : dict
            Kwargs passed to `make_dataloader()` for making the train_loader
            (if train_set is a Dataset, not DataLoader).
            E.G. batch_size, num_workers.
            DataLoader kwargs are all valid.
        valid_loader_kwargs : dict
            Kwargs passed to `make_dataloader()` for making the valid_loader
            (if valid_set is a Dataset, not DataLoader).
            E.g., batch_size, num_workers.
            DataLoader kwargs are all valid.

        Returns
        -------
        None
        z8Test only mode, skipping training and validation stages.Nr  )r  r"  )rq  r  rr  )r  r  rr  )r   rx   r   r   r   r   r  rb   r   r   r   r3  r6   rc   rd   re   rv  r  r   r   ri  )	rR   epoch_counterrq  r  progressbartrain_loader_kwargsvalid_loader_kwargsrr  r  rT   rT   rU   fit  sX   5
z	Brain.fitc                 C   s   | j d uo
| j| j kS r   )r;   r   r2  rT   rT   rU   ri  ^  s   

z$Brain._optimizer_step_limit_exceededc                 C   s$   | j jdddd tditjd dS )z,Saves a CKPT with specific intra-epoch flag.Fr:   c                 S   s
   t | jv S r   )INTRA_EPOCH_CKPT_FLAGmeta)crT   rT   rU   <lambda>j  s   
 z.Brain._save_intra_epoch_ckpt.<locals>.<lambda>T)end_of_epochnum_to_keepckpt_predicater  	verbosityN)r   save_and_keep_onlyr  loggingDEBUGr2  rT   rT   rU   rp  e  s   
zBrain._save_intra_epoch_ckptc                 C   sj  t td}|s| jdurtdt }| jr+| jdu r!t| j}n
t| j}td t }| j	rF| j
du r<t| j}n
t| j
}td ||B D ]}|| jvrYtd| dqJ|D ]?}ztj| j| | j| j| jd}W n ty } ztd	| d
|  W Y d}~q\d}~ww || j| j|< || q\|D ]}tj	| j| }|| j| j|< qdS )z;Compile requested modules with either JIT or TorchInductor.r)   Nz_'compile_module_keys' specified, but this install of PyTorch seems to be too old to support it.zy--compile and --compile_module_keys are both specified. Only modules specified in --compile_module_keys will be compiled.zm--jit and --jit_module_keys are both specified. Only modules specified in --jit_module_keys will be compiled.zmodule z% is not defined in your hparams file.)mode	fullgraphdynamic'zh' in 'compile_module_keys' failed to compile and will be skipped (may fallback onto JIT, if specified): )r   rM   r*   rQ   setr)   r   rx   r   r'   r(   r+   r,   r-   	Exceptionr   r!   discardscript)rR   compile_availabler*   r(   rS   r   erT   rT   rU   r.  o  sd   







zBrain._compilec                 C   s   | j s| jsdS | j rD| j D ]1\}}tdd | D rAt|}| jdkr2t	|d| j
d}n
t	|| jg| j
d}|| j|< qdS | j D ]\}}tdd | D rat|}|| j|< qIdS )z5Wrap modules with distributed wrapper when requested.Nc                 s       | ]}|j V  qd S r   r  r   prT   rT   rU   	<genexpr>      z*Brain._wrap_distributed.<locals>.<genexpr>gloo)
device_idsr&   c                 s   r  r   r  r  rT   rT   rU   r    r  )r   r#   r   r   anyr  r
   convert_sync_batchnormr$   DDPr&   r!   DP)rR   rS   r   rT   rT   rU   r/    s4   



zBrain._wrap_distributedc           
      C   s  |du r| j  }|otjj }t|ts)t|ts)d|d< | j|t	j
fi |}| j||d | jt	j
dd | j  d}t ? t|d| | jd dD ]"}|  jd	7  _| j|t	j
d
}	| |	|}| jrr| j| jkrr nqP| t	j
|d W d   n1 sw   Y  d| _|S )a  Iterate test_set and evaluate brain performance. By default, loads
        the best-performing checkpoint (as recorded using the checkpointer).

        Arguments
        ---------
        test_set : Dataset, DataLoader
            If a DataLoader is given, it is iterated directly. Otherwise passed
            to ``self.make_dataloader()``.
        max_key : str
            Key to use for finding best checkpoint, passed to
            ``on_evaluate_start()``.
        min_key : str
            Key to use for finding best checkpoint, passed to
            ``on_evaluate_start()``.
        progressbar : bool
            Whether to display the progress in a progressbar.
        test_loader_kwargs : dict
            Kwargs passed to ``make_dataloader()`` if ``test_set`` is not a
            DataLoader. NOTE: ``loader_kwargs["ckpt_prefix"]`` gets
            automatically overwritten to ``None`` (so that the test DataLoader
            is not added to the checkpointer).

        Returns
        -------
        average test loss
        Nr"  r<  )r  r   TrC   r|  r:   r^  r   )r6   rb   rc   rd   re   r   r   r   r  r   r   r?  r  r   r}  rM   r~  r   r=   r   r`  rj  r   r   r  )
rR   test_setr=  r>  r  test_loader_kwargsrr  avg_test_lossr  rM  rT   rT   rU   evaluate  sD   "


zBrain.evaluatec                 C   s.   t |r||| j 8 }|t|| j 7 }|S )a4  Update running average of the loss.

        Arguments
        ---------
        loss : torch.tensor
            detached loss, a single float value.
        avg_loss : float
            current running average.

        Returns
        -------
        avg_loss : float
            The average loss.
        )rM   rP  r   r   )rR   rM  avg_lossrT   rT   rU   rj    s   
zBrain.update_averageTc                 c   s~    |r:g }| j  D ]}t|dsq
||j d|_q
dV  d}| j  D ]}t|ds.q&|| |_|d7 }q&dS dV  dS )ao  Copies pytorch's implementation for doing no_sync across all modules.

        Explanation: nn.module.no_sync() is a context manager for when one does
        not want to sync gradients, which happens when using both DDP and gradient accumulation.
        Speechbrain brain's class can contain multiple modules and calling no_sync on these
        individually would be very awkward, therefore this contextmanager exists.

        Arguments
        ---------
        use : bool
            If set to `False` will still sync gradients, useful to make behavior toggleable.

        Yields
        ------
        None
        require_backward_grad_syncFNr   r:   )r   r9  r   appendr  )rR   useold_values_listr   irT   rT   rU   rC  &  s"   




zBrain.no_syncc                 C   sR   | j | j| jd}t|d}|t| W d    d S 1 s"w   Y  d S )N)r   r   r   r]   )r   r   r   rk   r   yamldump)rR   rg   	save_dictr]   rT   rT   rU   _saveJ  s   "zBrain._savec                 C   s   ~t |}t|}W d    n1 sw   Y  |d | _|d | _d|vrA| jj}d| d}|d7 }t| | j| _	d S |d | _	d S )Nr   r   r   z'optimizer_step' not found in z checkpoint.z7 Using the saved 'step' value (BACKWARDS COMPATIBILITY))
rk   r  	safe_loadr   r   r  rW   warningswarnr   )rR   rg   r  r   r  clsnameMSGrT   rT   rU   _recoverT  s   



zBrain._recover)NNNNNr   )r  )F)NN)T).rW   rX   rY   rZ   r   r   r  r  r  r  r  r  r3  r0  r:  r?  rO  rF  rU  r8  rH  rB  rI  rM   r~  r`  rv  ro  r  r  propertyri  rp  r.  r/  r  rj  r   rC  rb   rc   checkpointsmark_as_saverr  mark_as_loaderr  rT   rT   rT   rU   r     sn    }
 Q/


JG

3
/
!A
b

@
G#
	r   r   )OrZ   r   rq   r  rf   r   ro   r{   r   rg  r  
contextlibr   dataclassesr   datetimer   enumr   r   typesr   rM   r  hyperpyyamlr   torch.nnr	   r  r
   torch.nn.parallelr   r  torch.utils.datar   r   r   tqdm.contribr   speechbrainrb   speechbrain.dataio.dataloaderr   r   speechbrain.dataio.samplerr   r   speechbrain.utils.distributedr   speechbrain.utils.loggerr   speechbrain.utils.optimizersr   speechbrain.utils.profilingr   rc   r}   apply_quirksrW   rx   rg   dirnamern   rv   DEFAULT_LOG_CONFIGrj   r  r   r   r   rI   r   rz   r   r   r   r  register_checkpoint_hooksr   rT   rT   rT   rU   <module>   s    	
 !"&(
O
  