o
    „o™i  ã                   @   s¬  d Z ddlZddlZddlZddlmZ ddlmZ ddlm	Z	m
Z
mZmZmZmZmZ ddlmZ ddlZddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddl m!Z!m"Z"m#Z#m$Z$ ddl%m&Z& ddl'm(Z( ddl)m*Z* ddl+m,Z, ddl-m.Z. ddl/m0Z0m1Z1 ddl2m3Z3 ddl4m5Z5 ddl6m7Z7m8Z8 ddl9m:Z:m;Z; ddl<m=Z= ddl>m?Z?m@Z@ ddlAmBZBmCZC ddlDmEZE ddlFmGZGmHZHmIZImJZJ ddlKmLZL ddlMmNZN ddlOmPZP ddlQmRZR dd lSmTZTmUZUmVZV dd!lWmXZX dd"lYmZZZm[Z[m\Z\m]Z] dd#l^m_Z_m`Z` dd$lambZb dd%lcmdZdmeZe dd&lfmgZg dd'lhmiZi dd(ljmkZkmlZl dd)lmmnZn dd*lompZpmqZqmrZrmsZsmtZt dd+lumvZv e wex¡ZyG d,d-„ d-ƒZzdS ).z!Trainer to automate the training.é    N)Úcontextmanager)Ú	timedelta)ÚAnyÚDictÚ	GeneratorÚIterableÚListÚOptionalÚUnion)Úproxy)Ú	Optimizer)Úconvert_tensors_to_scalars)Ú_is_local_file_protocol)Ú_PATH)ÚAccelerator)ÚCallbackÚ
CheckpointÚEarlyStoppingÚProgressBar)ÚLightningDataModule)ÚLogger)Ú	CSVLogger)ÚTensorBoardLogger)Ú_log_hyperparams)Ú_PredictionLoopÚ_TrainingEpochLoop)Ú_EvaluationLoop)Ú_FitLoop)Ú_parse_loop_limitsÚ_reset_progress)Ú_PLUGIN_INPUTÚ	Precision)ÚProfiler)ÚParallelStrategyÚStrategy)ÚcallÚsetup)Ú_verify_loop_configurations)Ú_LITERAL_WARNÚ_PRECISION_INPUTÚ_PRECISION_INPUT_STRÚ_AcceleratorConnector)Ú_CallbackConnector)Ú_CheckpointConnector)Ú_DataConnector)Ú_LoggerConnector)Ú	_OUT_DICTÚ
_PBAR_DICTÚ_ResultCollection)Ú_SignalConnector)ÚRunningStageÚ	TrainerFnÚTrainerStateÚTrainerStatus)ÚGradClipAlgorithmTypeÚparsing)Ú_defaults_from_env_vars)Ú_maybe_unwrap_optimizedÚ!_verify_strategy_supports_compile)ÚMisconfigurationException)Úis_overridden)Úrank_zero_infoÚrank_zero_warn)Úisolate_rng)Ú_EVALUATE_OUTPUTÚ_PREDICT_OUTPUTÚEVAL_DATALOADERSÚTRAIN_DATALOADERSÚLRSchedulerConfig)ÚPossibleUserWarningc                R       s
  e Zd Zedddddddddddddddddddddddddddddddddddddddd	œ'd
eeef deeef deee	 ee	f de	de
e de
eeee ef  de
eee ef  dee	ef de
e	 de
e	 de	de
e	 de
eeeeee	f f  de
ee	ef  de
ee	ef  de
ee	ef  de
ee	ef  dee	ef de
ee	ef  de
e	 de
e	 de
e	 d e
e d!e
e d"e
e d#e	d$e
ee	ef  d%e
e d&e
eeef  d'e
e d(ed)ed*e
eeef  d+ed,ed-e
eeee f  d.ed/e	d0e
e d1dfP‡ fd2d3„ƒZ				dçd4d5d6e
eeef  d7e
e d8e
e d9e
e d1dfd:d;„Z				dçd4d5d6e
eeef  d7e
e d8e
e d9e
e d1dfd<d=„Z					dèd4e
d5 d>e
eeef  d9e
e d?ed8e
e d1efd@dA„Z					dèd4e
d5 d>e
eeef  d9e
e d?ed8e
e d1e
eeef  fdBdC„Z 					dèd4e
d5 d>e
eeef  d9e
e d?ed8e
e d1efdDdE„Z!					dèd4e
d5 d>e
eeef  d9e
e d?ed8e
e d1e
eeef  fdFdG„Z"					déd4e
d5 d>e
eeef  d8e
e dHe
e d9e
e d1e
e fdIdJ„Z#					déd4e
d5 d>e
eeef  d8e
e dHe
e d9e
e d1e
e fdKdL„Z$	dêd4d5d9e
e d1e
eeef  fdMdN„Z%dëdOdP„Z&d1e
eeef  fdQdR„Z'dëdSdT„Z(dëdUdV„Z)e*dêdWe
e d1e+fdXdY„ƒZ,dZe-d[e-d1dfd\d]„Z.	 e/d1efd^d_„ƒZ0e/d1efd`da„ƒZ1e/d1e2fdbdc„ƒZ3e/d1e	fddde„ƒZ4e/d1e	fdfdg„ƒZ5e/d1e	fdhdi„ƒZ6e/d1e	fdjdk„ƒZ7e/d1e	fdldm„ƒZ8e/d1ee	 fdndo„ƒZ9e/d1e	fdpdq„ƒZ:e/dìdrds„ƒZ;e/d1ee< fdtdu„ƒZ=e=j>dvee< d1dfdwdu„ƒZ=e/d1ee? fdxdy„ƒZ@e/d1eAfdzd{„ƒZBe/d1e
e- fd|d}„ƒZCe/d1e
eDjEjF fd~d„ƒZG	 e/d1e
e fd€d„ƒZHe/d1efd‚dƒ„ƒZIe/d1e
eee-f  fd„d…„ƒZJe/d1efd†d‡„ƒZKe/d1efdˆd‰„ƒZLe/d1e
eM fdŠd‹„ƒZNe/d1eeM fdŒd„ƒZOe/d1e
eP fdŽd„ƒZQe/d1eeP fdd‘„ƒZRe/d1e
eS fd’d“„ƒZTe/d1e
e fd”d•„ƒZUeUj>d9e
e d1dfd–d•„ƒZU	díd—ed˜ed™e
e- d1dfdšd›„ZV	 e/d1efdœd„ƒZWe/d1efdždŸ„ƒZXeXj>d ed1dfd¡dŸ„ƒZXe/d1efd¢d£„ƒZYeYj>d ed1dfd¤d£„ƒZYe/d1efd¥d¦„ƒZZeZj>d ed1dfd§d¦„ƒZZe/d1efd¨d©„ƒZ[e[j>d ed1dfdªd©„ƒZ[e/d1efd«d¬„ƒZ\e/d1efd­d®„ƒZ]e]j>d ed1dfd¯d®„ƒZ]e/d1efd°d±„ƒZ^	 e/d1e	fd²d³„ƒZ_e/d1e	fd´dµ„ƒZ`e/d1e
e	 fd¶d·„ƒZae/d1e
e	 fd¸d¹„ƒZbe/d1e	fdºd»„ƒZce/d1e
e	 fd¼d½„ƒZde/d1efd¾d¿„ƒZee/d1e
e fdÀdÁ„ƒZfe/d1e
e fdÂdÃ„ƒZge/d1e
e fdÄdÅ„ƒZhe/d1e
e fdÆdÇ„ƒZie/d1ee	ef fdÈdÉ„ƒZje/d1eee	ef  fdÊdË„ƒZke/d1eee	ef  fdÌdÍ„ƒZle/d1eee	ef  fdÎdÏ„ƒZme/d1eee	ef  fdÐdÑ„ƒZne/d1eofdÒdÓ„ƒZpe/d1e
eeqeoerf  fdÔdÕ„ƒZs	 e/d1e
e fdÖd×„ƒZtetj>de
e d1dfdØd×„ƒZte/d1ee fdÙdÚ„ƒZueuj>dÛe
ee  d1dfdÜdÚ„ƒZue/d1evfdÝdÞ„ƒZwe/d1evfdßdà„ƒZxe/d1eyfdádâ„ƒZze/d1e
e{ fdãdä„ƒZ|	 e/d1ee	ef fdådæ„ƒZ}‡  Z~S )îÚTrainerÚautoé   NFéÿÿÿÿg        Tr   )'ÚacceleratorÚstrategyÚdevicesÚ	num_nodesÚ	precisionÚloggerÚ	callbacksÚfast_dev_runÚ
max_epochsÚ
min_epochsÚ	max_stepsÚ	min_stepsÚmax_timeÚlimit_train_batchesÚlimit_val_batchesÚlimit_test_batchesÚlimit_predict_batchesÚoverfit_batchesÚval_check_intervalÚcheck_val_every_n_epochÚnum_sanity_val_stepsÚlog_every_n_stepsÚenable_checkpointingÚenable_progress_barÚenable_model_summaryÚaccumulate_grad_batchesÚgradient_clip_valÚgradient_clip_algorithmÚdeterministicÚ	benchmarkÚinference_modeÚuse_distributed_samplerÚprofilerÚdetect_anomalyÚ	barebonesÚpluginsÚsync_batchnormÚ!reload_dataloaders_every_n_epochsÚdefault_root_dirrL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   ro   rp   rq   rr   Úreturnc       '   )         sŠ  t ƒ  ¡  t | jj› dtƒ › ¡ |'durt |'¡}'|#| _	|#r±|r*t
d|›dƒ‚d}|dur<|dur<t
d|›dƒ‚d}|rHt
d|›d	ƒ‚d}|durZ|d
krZt
d|›dƒ‚d
}|rft
d|›dƒ‚d}|durx|d
krxt
d|›dƒ‚d
}|durŠ|d
krŠt
d|›dƒ‚|"r”t
d|"›dƒ‚|!dur t
d|!›dƒ‚d}(tdtj› tj |(¡› ƒ n$|du r·d}|du r½d}|du rÃd}|du rÉd}|du rÏd}|du rÕd}t| ƒ| _t|||||%|| |||$d
| _t| ƒ| _t| ƒ| _t| ƒ| _t| ƒ| _t| |
|	d| _t| ||d| j_t| tj t!j |d| _"t| tj#t!j#|d| _$t%| |d| _&|| _'| j (||||'||¡ |  | j (||&|¡ |durXt)|t*t+fƒsXt,d|› d ƒ‚|durrt- .| /¡ ¡srt0d!|› d"t- 1¡ › d ƒ‚|| _2|dur€t-| /¡ ƒnd| _3|"rŠtd#ƒ |"| _4t5 6| ¡ d| _7t8ƒ | _9t5 :| |!¡ |  | j (||¡ |  |  |  |  |  |  |  t5 ;| ||||||||¡	 dS )$a†'  Customize every aspect of training via flags.

        Args:
            accelerator: Supports passing different accelerator types ("cpu", "gpu", "tpu", "hpu", "mps", "auto")
                as well as custom accelerator instances.

            strategy: Supports different training strategies with aliases as well custom strategies.
                Default: ``"auto"``.

            devices: The devices to use. Can be set to a positive number (int or str), a sequence of device indices
                (list or str), the value ``-1`` to indicate all available devices should be used, or ``"auto"`` for
                automatic selection based on the chosen accelerator. Default: ``"auto"``.

            num_nodes: Number of GPU nodes for distributed training.
                Default: ``1``.

            precision: Double precision (64, '64' or '64-true'), full precision (32, '32' or '32-true'),
                16bit mixed precision (16, '16', '16-mixed') or bfloat16 mixed precision ('bf16', 'bf16-mixed').
                Can be used on CPU, GPU, TPUs, or HPUs.
                Default: ``'32-true'``.

            logger: Logger (or iterable collection of loggers) for experiment tracking. A ``True`` value uses
                the default ``TensorBoardLogger`` if it is installed, otherwise ``CSVLogger``.
                ``False`` will disable logging. If multiple loggers are provided, local files
                (checkpoints, profiler traces, etc.) are saved in the ``log_dir`` of the first logger.
                Default: ``True``.

            callbacks: Add a callback or list of callbacks.
                Default: ``None``.

            fast_dev_run: Runs n if set to ``n`` (int) else 1 if set to ``True`` batch(es)
                of train, val and test to find any bugs (ie: a sort of unit test).
                Default: ``False``.

            max_epochs: Stop training once this number of epochs is reached. Disabled by default (None).
                If both max_epochs and max_steps are not specified, defaults to ``max_epochs = 1000``.
                To enable infinite training, set ``max_epochs = -1``.

            min_epochs: Force training for at least these many epochs. Disabled by default (None).

            max_steps: Stop training after this number of steps. Disabled by default (-1). If ``max_steps = -1``
                and ``max_epochs = None``, will default to ``max_epochs = 1000``. To enable infinite training, set
                ``max_epochs`` to ``-1``.

            min_steps: Force training for at least these number of steps. Disabled by default (``None``).

            max_time: Stop training after this amount of time has passed. Disabled by default (``None``).
                The time duration can be specified in the format DD:HH:MM:SS (days, hours, minutes seconds), as a
                :class:`datetime.timedelta`, or a dictionary with keys that will be passed to
                :class:`datetime.timedelta`.

            limit_train_batches: How much of training dataset to check (float = fraction, int = num_batches).
                Default: ``1.0``.

            limit_val_batches: How much of validation dataset to check (float = fraction, int = num_batches).
                Default: ``1.0``.

            limit_test_batches: How much of test dataset to check (float = fraction, int = num_batches).
                Default: ``1.0``.

            limit_predict_batches: How much of prediction dataset to check (float = fraction, int = num_batches).
                Default: ``1.0``.

            overfit_batches: Overfit a fraction of training/validation data (float) or a set number of batches (int).
                Default: ``0.0``.

            val_check_interval: How often to check the validation set. Pass a ``float`` in the range [0.0, 1.0] to check
                after a fraction of the training epoch. Pass an ``int`` to check after a fixed number of training
                batches. An ``int`` value can only be higher than the number of training batches when
                ``check_val_every_n_epoch=None``, which validates after every ``N`` training batches
                across epochs or during iteration-based training.
                Default: ``1.0``.

            check_val_every_n_epoch: Perform a validation loop after every `N` training epochs. If ``None``,
                validation will be done solely based on the number of training batches, requiring ``val_check_interval``
                to be an integer value.
                Default: ``1``.

            num_sanity_val_steps: Sanity check runs n validation batches before starting the training routine.
                Set it to `-1` to run all batches in all validation dataloaders.
                Default: ``2``.

            log_every_n_steps: How often to log within steps.
                Default: ``50``.

            enable_checkpointing: If ``True``, enable checkpointing.
                It will configure a default ModelCheckpoint callback if there is no user-defined ModelCheckpoint in
                :paramref:`~lightning.pytorch.trainer.trainer.Trainer.callbacks`.
                Default: ``True``.

            enable_progress_bar: Whether to enable to progress bar by default.
                Default: ``True``.

            enable_model_summary: Whether to enable model summarization by default.
                Default: ``True``.

            accumulate_grad_batches: Accumulates gradients over k batches before stepping the optimizer.
                Default: 1.

            gradient_clip_val: The value at which to clip gradients. Passing ``gradient_clip_val=None`` disables
                gradient clipping. If using Automatic Mixed Precision (AMP), the gradients will be unscaled before.
                Default: ``None``.

            gradient_clip_algorithm: The gradient clipping algorithm to use. Pass ``gradient_clip_algorithm="value"``
                to clip by value, and ``gradient_clip_algorithm="norm"`` to clip by norm. By default it will
                be set to ``"norm"``.

            deterministic: If ``True``, sets whether PyTorch operations must use deterministic algorithms.
                Set to ``"warn"`` to use deterministic algorithms whenever possible, throwing warnings on operations
                that don't support deterministic mode. If not set, defaults to ``False``. Default: ``None``.

            benchmark: The value (``True`` or ``False``) to set ``torch.backends.cudnn.benchmark`` to.
                The value for ``torch.backends.cudnn.benchmark`` set in the current session will be used
                (``False`` if not manually set). If :paramref:`~lightning.pytorch.trainer.trainer.Trainer.deterministic`
                is set to ``True``, this will default to ``False``. Override to manually set a different value.
                Default: ``None``.

            inference_mode: Whether to use :func:`torch.inference_mode` or :func:`torch.no_grad` during
                evaluation (``validate``/``test``/``predict``).

            use_distributed_sampler: Whether to wrap the DataLoader's sampler with
                :class:`torch.utils.data.DistributedSampler`. If not specified this is toggled automatically for
                strategies that require it. By default, it will add ``shuffle=True`` for the train sampler and
                ``shuffle=False`` for validation/test/predict samplers. If you want to disable this logic, you can pass
                ``False`` and add your own distributed sampler in the dataloader hooks. If ``True`` and a distributed
                sampler was already added, Lightning will not replace the existing one. For iterable-style datasets,
                we don't do this automatically.

            profiler: To profile individual steps during training and assist in identifying bottlenecks.
                Default: ``None``.

            detect_anomaly: Enable anomaly detection for the autograd engine.
                Default: ``False``.

            barebones: Whether to run in "barebones mode", where all features that may impact raw speed are
                disabled. This is meant for analyzing the Trainer overhead and is discouraged during regular training
                runs. The following features are deactivated:
                :paramref:`~lightning.pytorch.trainer.trainer.Trainer.enable_checkpointing`,
                :paramref:`~lightning.pytorch.trainer.trainer.Trainer.logger`,
                :paramref:`~lightning.pytorch.trainer.trainer.Trainer.enable_progress_bar`,
                :paramref:`~lightning.pytorch.trainer.trainer.Trainer.log_every_n_steps`,
                :paramref:`~lightning.pytorch.trainer.trainer.Trainer.enable_model_summary`,
                :paramref:`~lightning.pytorch.trainer.trainer.Trainer.num_sanity_val_steps`,
                :paramref:`~lightning.pytorch.trainer.trainer.Trainer.fast_dev_run`,
                :paramref:`~lightning.pytorch.trainer.trainer.Trainer.detect_anomaly`,
                :paramref:`~lightning.pytorch.trainer.trainer.Trainer.profiler`,
                :meth:`~lightning.pytorch.core.LightningModule.log`,
                :meth:`~lightning.pytorch.core.LightningModule.log_dict`.
            plugins: Plugins allow modification of core behavior like ddp and amp, and enable custom lightning plugins.
                Default: ``None``.

            sync_batchnorm: Synchronize batch norm layers between process groups/whole world.
                Default: ``False``.

            reload_dataloaders_every_n_epochs: Set to a positive integer to reload dataloaders every n epochs.
                Default: ``0``.

            default_root_dir: Default path for logs and weights when no logger/ckpt_callback passed.
                Default: ``os.getcwd()``.
                Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/'

        Raises:
            TypeError:
                If ``gradient_clip_val`` is not an int or float.

            MisconfigurationException:
                If ``gradient_clip_algorithm`` is invalid.

        z(: Initializing trainer with parameters: Nz.`Trainer(barebones=True, enable_checkpointing=zV)` was passed. Checkpointing can impact raw speed so it is disabled in barebones mode.Fz `Trainer(barebones=True, logger=zP)` was passed. Logging can impact raw speed so it is disabled in barebones mode.z-`Trainer(barebones=True, enable_progress_bar=zY)` was passed. The progress bar can impact raw speed so it is disabled in barebones mode.r   z+`Trainer(barebones=True, log_every_n_steps=z.`Trainer(barebones=True, enable_model_summary=zV)` was passed. Model summary can impact raw speed so it is disabled in barebones mode.z.`Trainer(barebones=True, num_sanity_val_steps=zX)` was passed. Sanity checking can impact raw speed so it is disabled in barebones mode.z&`Trainer(barebones=True, fast_dev_run=zi)` was passed. Development run is not meant for raw speed evaluation so it is disabled in barebones mode.z(`Trainer(barebones=True, detect_anomaly=zZ)` was passed. Anomaly detection can impact raw speed so it is disabled in barebones mode.z"`Trainer(barebones=True, profiler=zR)` was passed. Profiling can impact raw speed so it is disabled in barebones mode.)z6 - Checkpointing: `Trainer(enable_checkpointing=True)`z4 - Progress bar: `Trainer(enable_progress_bar=True)`z6 - Model summary: `Trainer(enable_model_summary=True)`z - Logging: `Trainer(logger=True)`, `Trainer(log_every_n_steps>0)`, `LightningModule.log(...)`, `LightningModule.log_dict(...)`z5 - Sanity checking: `Trainer(num_sanity_val_steps>0)`z0 - Development run: `Trainer(fast_dev_run=True)`z4 - Anomaly detection: `Trainer(detect_anomaly=True)`z% - Profiling: `Trainer(profiler=...)`zÐYou are running in `Trainer(barebones=True)` mode. All features that may impact raw speed have been disabled to facilitate analyzing the Trainer overhead. Specifically, the following features are deactivated:Té2   é   )
rN   rL   rM   rO   rp   ri   rk   rh   rP   ro   )rU   rT   )rW   rV   )rj   z5`gradient_clip_val` should be an int or a float. Got Ú.z`gradient_clip_algorithm` z! is invalid. Allowed algorithms: zYou have turned on `Trainer(detect_anomaly=True)`. This will significantly slow down compute speed and is recommended only for model debugging.)<ÚsuperÚ__init__ÚlogÚdebugÚ	__class__Ú__name__ÚlocalsÚosÚfspathrn   Ú
ValueErrorr?   ÚlinesepÚjoinr.   Ú_data_connectorr+   Ú_accelerator_connectorr/   Ú_logger_connectorr,   Ú_callback_connectorr-   Ú_checkpoint_connectorr3   Ú_signal_connectorr   Úfit_loopr   Ú
epoch_loopr   r5   Ú
VALIDATINGr4   Úvalidate_loopÚTESTINGÚ	test_loopr   Úpredict_loopre   Úon_trainer_initÚ
isinstanceÚintÚfloatÚ	TypeErrorr8   Úsupported_typeÚlowerr=   Úsupported_typesrf   rg   Ú_detect_anomalyr&   Ú_log_device_infoÚshould_stopr6   ÚstateÚ_init_profilerÚ_init_debugging_flags))ÚselfrL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   ro   rp   rq   rr   Údeactivated©r{   © úU/home/ubuntu/.local/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.pyrx   W   s,   
V

ÿ
ÿ
ÿ
ÿ
ÿ
ÿ
ÿ
ÿ
ÿþ
þÿ
ö



ÿú
ýÿÿÿÿÿ
÷zTrainer.__init__Úmodelúpl.LightningModuleÚtrain_dataloadersÚval_dataloadersÚ
datamoduleÚ	ckpt_pathc              	   C   sR   t |ƒ}|| j_t|| jƒ tj| j_tj	| j_
d| _t | | j|||||¡ dS )an  Runs the full optimization routine.

        Args:
            model: Model to fit.

            train_dataloaders: An iterable or collection of iterables specifying training samples.
                Alternatively, a :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
                the :class:`~lightning.pytorch.core.hooks.DataHooks.train_dataloader` hook.

            val_dataloaders: An iterable or collection of iterables specifying validation samples.

            datamodule: A :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
                the :class:`~lightning.pytorch.core.hooks.DataHooks.train_dataloader` hook.

            ckpt_path: Path/URL of the checkpoint from which training is resumed. Could also be one of two special
                keywords ``"last"`` and ``"hpc"``. If there is no checkpoint file at the path, an exception is raised.

        Raises:
            TypeError:
                If ``model`` is not :class:`~lightning.pytorch.core.LightningModule` for torch version less than
                2.0.0 and if ``model`` is not :class:`~lightning.pytorch.core.LightningModule` or
                :class:`torch._dynamo.OptimizedModule` for torch versions greater than or equal to 2.0.0 .

        For more information about multiple dataloaders, see this :ref:`section <multiple-dataloaders>`.

        TN)r;   rM   Ú_lightning_moduler<   r5   ÚFITTINGr›   Úfnr7   ÚRUNNINGÚstatusÚtrainingr%   Ú_call_and_handle_interruptÚ	_fit_impl©rž   r£   r¥   r¦   r§   r¨   r¡   r¡   r¢   Úfitò  s   "

ÿzTrainer.fitc                 C   s¬   t  | jj› d¡ t|tƒr|}d }|d us|d ur#|d ur#tdƒ‚| jj||||d | j	j
d us5J ‚| jj| j	j
|d| jd ud}| j||d | j	jsQJ ‚d| _d S )Nz: trainer fit stagezXYou cannot pass `train_dataloader` or `val_dataloaders` to `trainer.fit(datamodule=...)`)r¥   r¦   r§   T©Úmodel_providedÚmodel_connected©r¨   F)ry   rz   r{   r|   r‘   r   r=   rƒ   Úattach_datar›   r«   r‡   Ú_select_ckpt_pathÚlightning_moduleÚ_runÚstoppedr®   r±   r¡   r¡   r¢   r°     s,   
ÿÿüzTrainer._fit_implÚdataloadersÚverbosec              	   C   ól   |du r| j du rtdƒ‚nt|ƒ}|| j_t| j | jƒ tj| j_	t
j| j_d| _t | | j|||||¡S )a•  Perform one evaluation epoch over the validation set.

        Args:
            model: The model to validate.

            dataloaders: An iterable or collection of iterables specifying validation samples.
                Alternatively, a :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
                the :class:`~lightning.pytorch.core.hooks.DataHooks.val_dataloader` hook.

            ckpt_path: Either ``"best"``, ``"last"``, ``"hpc"`` or path to the checkpoint you wish to validate.
                If ``None`` and the model instance was passed, use the current weights.
                Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded
                if a checkpoint callback is configured.

            verbose: If True, prints the validation results.

            datamodule: A :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
                the :class:`~lightning.pytorch.core.hooks.DataHooks.val_dataloader` hook.

        For more information about multiple dataloaders, see this :ref:`section <multiple-dataloaders>`.

        Returns:
            List of dictionaries with metrics logged during the validation phase, e.g., in model- or callback hooks
            like :meth:`~lightning.pytorch.LightningModule.validation_step` etc.
            The length of the list corresponds to the number of validation dataloaders used.

        Raises:
            TypeError:
                If no ``model`` is passed and there was no ``LightningModule`` passed in the previous run.
                If ``model`` passed is not `LightningModule` or `torch._dynamo.OptimizedModule`.

            MisconfigurationException:
                If both ``dataloaders`` and ``datamodule`` are passed. Pass only one of these.

            RuntimeError:
                If a compiled ``model`` is passed and the strategy is not supported.

        Nz^`Trainer.validate()` requires a `LightningModule` when it hasn't been passed in a previous runT)r¹   r”   r;   rM   r©   r<   r5   r‹   r›   r«   r7   r¬   r­   Ú
validatingr%   r¯   Ú_validate_impl©rž   r£   r¼   r¨   r½   r§   r¡   r¡   r¢   ÚvalidateD  s   .
ÿÿ

ÿzTrainer.validatec                 C   óÆ   t  | jj› d¡ t|tƒr|}d }|d ur|rtdƒ‚|d u r'| j}d}nd}|| j_	| j
j|||d | jjd us>J ‚| jj| jj||| jd ud}| j||d}t|ƒ}| jjs^J ‚d| _|S )Nz: trainer validate stagezHYou cannot pass both `trainer.validate(dataloaders=..., datamodule=...)`FT)r¦   r§   r³   r¶   )ry   rz   r{   r|   r‘   r   r=   r¹   rŒ   r½   rƒ   r·   r›   r«   r‡   r¸   rº   r   r»   r¿   ©rž   r£   r¼   r¨   r½   r§   r´   Úresultsr¡   r¡   r¢   rÀ   ƒ  ó*   
ÿzTrainer._validate_implc              	   C   r¾   )aÊ  Perform one evaluation epoch over the test set. It's separated from fit to make sure you never run on your
        test set until you want to.

        Args:
            model: The model to test.

            dataloaders: An iterable or collection of iterables specifying test samples.
                Alternatively, a :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
                the :class:`~lightning.pytorch.core.hooks.DataHooks.test_dataloader` hook.

            ckpt_path: Either ``"best"``, ``"last"``, ``"hpc"`` or path to the checkpoint you wish to test.
                If ``None`` and the model instance was passed, use the current weights.
                Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded
                if a checkpoint callback is configured.

            verbose: If True, prints the test results.

            datamodule: A :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
                the :class:`~lightning.pytorch.core.hooks.DataHooks.test_dataloader` hook.

        For more information about multiple dataloaders, see this :ref:`section <multiple-dataloaders>`.

        Returns:
            List of dictionaries with metrics logged during the test phase, e.g., in model- or callback hooks
            like :meth:`~lightning.pytorch.LightningModule.test_step` etc.
            The length of the list corresponds to the number of test dataloaders used.

        Raises:
            TypeError:
                If no ``model`` is passed and there was no ``LightningModule`` passed in the previous run.
                If ``model`` passed is not `LightningModule` or `torch._dynamo.OptimizedModule`.

            MisconfigurationException:
                If both ``dataloaders`` and ``datamodule`` are passed. Pass only one of these.

            RuntimeError:
                If a compiled ``model`` is passed and the strategy is not supported.

        NzZ`Trainer.test()` requires a `LightningModule` when it hasn't been passed in a previous runT)r¹   r”   r;   rM   r©   r<   r5   r   r›   r«   r7   r¬   r­   Útestingr%   r¯   Ú
_test_implrÁ   r¡   r¡   r¢   Útest°  s   /
ÿÿ

ÿzTrainer.testc                 C   rÃ   )Nz: trainer test stagezDYou cannot pass both `trainer.test(dataloaders=..., datamodule=...)`FT)Útest_dataloadersr§   r³   r¶   )ry   rz   r{   r|   r‘   r   r=   r¹   rŽ   r½   rƒ   r·   r›   r«   r‡   r¸   rº   r   r»   rÇ   rÄ   r¡   r¡   r¢   rÈ   ð  rÆ   zTrainer._test_implÚreturn_predictionsc              	   C   r¾   )al  Run inference on your data. This will call the model forward function to compute predictions. Useful to
        perform distributed and batched predictions. Logging is disabled in the predict hooks.

        Args:
            model: The model to predict with.

            dataloaders: An iterable or collection of iterables specifying predict samples.
                Alternatively, a :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
                the :class:`~lightning.pytorch.core.hooks.DataHooks.predict_dataloader` hook.

            datamodule: A :class:`~lightning.pytorch.core.datamodule.LightningDataModule` that defines
                the :class:`~lightning.pytorch.core.hooks.DataHooks.predict_dataloader` hook.

            return_predictions: Whether to return predictions.
                ``True`` by default except when an accelerator that spawns processes is used (not supported).

            ckpt_path: Either ``"best"``, ``"last"``, ``"hpc"`` or path to the checkpoint you wish to predict.
                If ``None`` and the model instance was passed, use the current weights.
                Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded
                if a checkpoint callback is configured.

        For more information about multiple dataloaders, see this :ref:`section <multiple-dataloaders>`.

        Returns:
            Returns a list of dictionaries, one for each provided dataloader containing their respective predictions.

        Raises:
            TypeError:
                If no ``model`` is passed and there was no ``LightningModule`` passed in the previous run.
                If ``model`` passed is not `LightningModule` or `torch._dynamo.OptimizedModule`.

            MisconfigurationException:
                If both ``dataloaders`` and ``datamodule`` are passed. Pass only one of these.

            RuntimeError:
                If a compiled ``model`` is passed and the strategy is not supported.

        See :ref:`Lightning inference section<deploy/production_basic:Predict step with your LightningModule>` for more.

        Nz]`Trainer.predict()` requires a `LightningModule` when it hasn't been passed in a previous runT)r¹   r”   r;   rM   r©   r<   r5   Ú
PREDICTINGr›   r«   r7   r¬   r­   Ú
predictingr%   r¯   Ú_predict_impl)rž   r£   r¼   r§   rË   r¨   r¡   r¡   r¢   Úpredict  s   0
ÿÿ

ÿzTrainer.predictc                 C   s¾   t  | jj› d¡ || j_t|tƒr|}d }|d ur!|r!tdƒ‚|d u r+| j	}d}nd}| j
j|||d | jjd us>J ‚| jj| jj||| j	d ud}| j||d}| jjsZJ ‚d| _|S )Nz: trainer predict stagezGYou cannot pass both `trainer.predict(dataloaders=..., datamodule=...)`FT)Úpredict_dataloadersr§   r³   r¶   )ry   rz   r{   r|   r   rË   r‘   r   r=   r¹   rƒ   r·   r›   r«   r‡   r¸   rº   r»   rÍ   )rž   r£   r¼   r§   rË   r¨   r´   rÅ   r¡   r¡   r¢   rÎ   ^  s(   
ÿzTrainer._predict_implc                 C   sJ  | j jtjkrt| j| j| j| j| ƒ\}}|| j	_|| j	_| j
r$tdƒ t|dƒr/t |j¡ | j |¡ | j ¡  | j ¡  t| ƒ t | jj› d¡ | j ¡  |  ¡  t | jj› d¡ | j ¡  t | ¡ t | jj› d¡ t  | ¡ | jj!st | jj› d|› ¡ | j" #|¡ | j$ %¡  | j$ &¡  | j '| ¡ | j jtjkr²t (| d¡ t )| d¡ t*| ƒ | jj!rÌt | jj› d|› ¡ | j" #|¡ t | jj› d¡ | j" +¡  | j" ,¡  | j- .¡  |  /¡ }t | jj› d	¡ |  0¡  | j jtjkrt (| d
¡ t )| d
¡ t | jj› d¡ t 1| ¡ t2j3| j _4d | j _5|S )Nz‡`Trainer(barebones=True)` started running. The progress bar is disabled so you might want to manually print the progress in your model.Úhparamsz!: setting up strategy environmentz: preparing dataz: configuring modelz7: restoring module and callbacks from checkpoint path: Úon_fit_startz: restoring training statez: trainer tearing downÚ
on_fit_endz: calling teardown hooks)6r›   r«   r5   rª   r   rW   rV   rU   rT   r‰   rn   r?   Úhasattrr9   Úclean_namespacerÑ   rM   Úconnectr†   Ú_attach_model_callbacksÚ_attach_model_logging_functionsr'   ry   rz   r{   r|   Úsetup_environmentÚ_Trainer__setup_profilerrƒ   Úprepare_datar%   Ú_call_setup_hookÚ_call_configure_modelÚrestore_checkpoint_after_setupr‡   Ú_restore_modules_and_callbacksr…   Úreset_resultsÚreset_metricsr&   Ú_call_callback_hooksÚ_call_lightning_module_hookr   Úrestore_training_stateÚ
resume_endrˆ   Úregister_signal_handlersÚ
_run_stageÚ	_teardownÚ_call_teardown_hookr7   ÚFINISHEDr­   Ústage)rž   r£   r¨   rU   rT   rÅ   r¡   r¡   r¢   rº   ˆ  sh   ÿÿ













zTrainer._runc                 C   s8   | j  ¡  | j}|dur| ¡  | j ¡  | j ¡  dS )z¥This is the Trainer's internal teardown, unrelated to the `teardown` hooks in LightningModule and Callback;
        those are handled by :meth:`_call_teardown_hook`.N)rM   ÚteardownÚ_active_loopr…   rˆ   )rž   Úloopr¡   r¡   r¢   rè   é  s   

zTrainer._teardownc                 C   s¸   | j  d¡ | j ¡  | jr| j ¡ S | jr| j ¡ S | j	rTt
ƒ  |  ¡  W d   ƒ n1 s0w   Y  tj | j¡ | j ¡  W d   ƒ d S 1 sMw   Y  d S td| j› ƒ‚)Nz	run-stagezUnexpected state )rM   Úbarrierr¹   Ú	zero_gradÚ
evaluatingÚ_evaluation_loopÚrunrÍ   r   r®   rA   Ú_run_sanity_checkÚtorchÚautogradÚset_detect_anomalyr˜   r‰   ÚRuntimeErrorr›   ©rž   r¡   r¡   r¢   rç   ô  s"   



ÿ
ÿþzTrainer._run_stagec                 C   s”   | j jj}| jo| jdko|j }|rH| jj}d| _| j	 
¡  | j	 ¡  t | d¡ | ¡  t | d¡ | j	 
¡  | j	 ¡  t|ƒ || j_d S d S )Nr   TÚon_sanity_check_startÚon_sanity_check_end)r‰   rŠ   Úval_loopÚenable_validationr`   Ú
restartingr›   rë   Úsanity_checkingr…   rà   rá   r%   râ   ró   r   )rž   rü   Úshould_sanity_checkrë   r¡   r¡   r¢   rô     s&   
ÿü	



èzTrainer._run_sanity_checkc                 C   sN   | j jd usJ ‚| jdkr| jnd }t| jƒ| j_| jj| j j|| j	d d S )NrJ   )rë   Ú
local_rankÚlog_dir)
r›   r«   Ú
world_sizer  r   r¹   rl   r©   r&   r  )rž   r  r¡   r¡   r¢   Ú__setup_profiler+  s   zTrainer.__setup_profilerÚ
empty_initc                 c   sh    t d| jtdrtdt| jƒj› dtd | jj|d dV  W d  ƒ dS 1 s-w   Y  dS )a•  Tensors that you instantiate under this context manager will be created on the device right away and have
        the right data type depending on the precision setting in the Trainer.

        The parameters and tensors get created on the device and with the right data type right away without wasting
        memory being allocated unnecessarily.

        Args:
            empty_init: Whether to initialize the model with empty weights (uninitialized memory).
                If ``None``, the strategy will decide. Some strategies may not support all options.
                Set this to ``True`` if you are loading a checkpoint into a large model.

        Úmodel_sharded_context)ÚparentzX`trainer.init_module` cannot fully support proper instantiation of your model with the `zb` strategy. Please instantiate your model inside the`LightningModule.configure_model` hook instead)Úcategory)r  N)r>   rM   r$   r@   Útyper|   rG   Útensor_init_context)rž   r  r¡   r¡   r¢   Úinit_module1  s   €
ÿú"ÿzTrainer.init_moduleÚargsÚkwargsc                 O   s    | j dkrt|i |¤Ž dS dS )zùPrint something only on the first process. If running on multiple machines, it will print from the first
        process in each machine.

        Arguments passed to this method are forwarded to the Python built-in :func:`print` function.

        r   N)r  Úprint)rž   r  r  r¡   r¡   r¢   r  M  s   
ÿzTrainer.printc                 C   s   | j jsJ ‚| j jS ©N)rM   rL   rù   r¡   r¡   r¢   rL   [  s   zTrainer.acceleratorc                 C   ó   | j jS r  )r„   rM   rù   r¡   r¡   r¢   rM   `  ó   zTrainer.strategyc                 C   r  r  )rM   Úprecision_pluginrù   r¡   r¡   r¢   r  d  r  zTrainer.precision_pluginc                 C   r  r  )rM   Úglobal_rankrù   r¡   r¡   r¢   r  h  r  zTrainer.global_rankc                 C   ó   t | jddƒS )Nr  r   ©ÚgetattrrM   rù   r¡   r¡   r¢   r  l  ó   zTrainer.local_rankc                 C   r  )NÚ	node_rankr   r  rù   r¡   r¡   r¢   r  q  r  zTrainer.node_rankc                 C   r  )Nr  rJ   r  rù   r¡   r¡   r¢   r  v  r  zTrainer.world_sizec                 C   r  )NrO   rJ   r  rù   r¡   r¡   r¢   rO   {  ó   zTrainer.num_nodesc                 C   sv   t | jtƒr
| jjn| jjg}|dusJ ‚g }t|ƒD ]\}}t |tjƒr.| |j	p+|¡ qt |t
ƒr8| |¡ q|S )z List of device indexes per node.N)r‘   rM   r#   Úparallel_devicesÚroot_deviceÚ	enumeraterõ   ÚdeviceÚappendÚindexr’   )rž   rN   Ú
device_idsÚidxr  r¡   r¡   r¢   r     s   

ÿý

€zTrainer.device_idsc                 C   s
   t | jƒS )z,Number of devices the trainer uses per node.)Úlenr   rù   r¡   r¡   r¢   Únum_devices  s   
zTrainer.num_devicesc                 C   r  r  )rM   r¹   rù   r¡   r¡   r¢   r¹   •  ó   zTrainer.lightning_modulec                 C   r  r  ©rM   Ú
optimizersrù   r¡   r¡   r¢   r&  š  r  zTrainer.optimizersÚ
new_optimsc                 C   s   || j _d S r  r%  )rž   r'  r¡   r¡   r¢   r&  ž  s   c                 C   r  r  )rM   Úlr_scheduler_configsrù   r¡   r¡   r¢   r(  ¢  r  zTrainer.lr_scheduler_configsc                 C   ó
   | j jjS r  )rM   r  rP   rù   r¡   r¡   r¢   rP   ¦  s   
zTrainer.precisionc                 C   s   t | jdd ƒS )NÚscaler)r  r  rù   r¡   r¡   r¢   r*  ª  r  zTrainer.scalerc                 C   r  )zçThe LightningModule, but possibly wrapped into DataParallel or DistributedDataParallel.

        To access the pure LightningModule, use
        :meth:`~lightning.pytorch.trainer.trainer.Trainer.lightning_module` instead.

        )rM   r£   rù   r¡   r¡   r¢   r£   ®  ó   zTrainer.modelc                 C   sT   t | jƒdkrt| jd ttfƒs| jd j}n
| jd j}n| j}| j 	|¡}|S )as  The directory for the current experiment. Use this to save images to, etc...

        .. note:: You must call this on all processes. Failing to do so will cause your program to stall forever.

         .. code-block:: python

             def training_step(self, batch, batch_idx):
                 img = ...
                 save_img(img, self.trainer.log_dir)

        r   )
r"  Úloggersr‘   r   r   Úsave_dirr  rr   rM   Ú	broadcast)rž   Údirpathr¡   r¡   r¢   r  ¼  s   zTrainer.log_dirc                 C   r  )a  Whether this process is the global zero in multi-node training.

        .. code-block:: python

            def training_step(self, batch, batch_idx):
                if self.trainer.is_global_zero:
                    print("in node 0, accelerator 0")

        )rM   Úis_global_zerorù   r¡   r¡   r¢   r0  Ô  s   zTrainer.is_global_zeroc                 C   s   t | jtƒr
| jjS d S r  )r‘   rM   r#   Údistributed_sampler_kwargsrù   r¡   r¡   r¢   r1  á  s   z"Trainer.distributed_sampler_kwargsc                 C   s&   | j jjj ¡ otd| jƒo| jdkS )z2Check if we should run validation during training.Úvalidation_stepr   )r‰   rŠ   rü   Ú_data_sourceÚ
is_definedr>   r¹   rZ   rù   r¡   r¡   r¢   rý   ç  s
   
ÿýzTrainer.enable_validationc                 C   s&   t | jƒrtj tj | j¡¡S | jS )z³The default location to save artifacts of loggers, checkpoints etc.

        It is used as a fallback if logger or checkpoint callback do not define specific save paths.

        )r   Ú_default_root_dirr~   ÚpathÚnormpathÚ
expanduserrù   r¡   r¡   r¢   rr   ð  s   
zTrainer.default_root_dirc                 C   ó   | j }t|ƒdkr|d S dS )zThe first :class:`~lightning.pytorch.callbacks.early_stopping.EarlyStopping` callback in the
        Trainer.callbacks list, or ``None`` if it doesn't exist.r   N)Úearly_stopping_callbacksr"  ©rž   rR   r¡   r¡   r¢   Úearly_stopping_callbackû  ó   zTrainer.early_stopping_callbackc                 C   ó   dd„ | j D ƒS )zŠA list of all instances of :class:`~lightning.pytorch.callbacks.early_stopping.EarlyStopping` found in the
        Trainer.callbacks list.c                 S   ó   g | ]	}t |tƒr|‘qS r¡   )r‘   r   ©Ú.0Úcr¡   r¡   r¢   Ú
<listcomp>  ó    z4Trainer.early_stopping_callbacks.<locals>.<listcomp>©rR   rù   r¡   r¡   r¢   r:    ó   z Trainer.early_stopping_callbacksc                 C   r9  )z¡The first :class:`~lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint` callback in the
        Trainer.callbacks list, or ``None`` if it doesn't exist.r   N)Úcheckpoint_callbacksr"  r;  r¡   r¡   r¢   Úcheckpoint_callback  r=  zTrainer.checkpoint_callbackc                 C   r>  )zŽA list of all instances of :class:`~lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint` found in
        the Trainer.callbacks list.c                 S   r?  r¡   )r‘   r   r@  r¡   r¡   r¢   rC    rD  z0Trainer.checkpoint_callbacks.<locals>.<listcomp>rE  rù   r¡   r¡   r¢   rG    rF  zTrainer.checkpoint_callbacksc                 C   s"   | j D ]}t|tƒr|  S qdS )z¥An instance of :class:`~lightning.pytorch.callbacks.progress.progress_bar.ProgressBar` found in the
        Trainer.callbacks list, or ``None`` if one doesn't exist.N)rR   r‘   r   )rž   rB  r¡   r¡   r¢   Úprogress_bar_callback  s
   

ÿzTrainer.progress_bar_callbackc                 C   r  )aZ  Set to the path/URL of a checkpoint loaded via :meth:`~lightning.pytorch.trainer.trainer.Trainer.fit`,
        :meth:`~lightning.pytorch.trainer.trainer.Trainer.validate`,
        :meth:`~lightning.pytorch.trainer.trainer.Trainer.test`, or
        :meth:`~lightning.pytorch.trainer.trainer.Trainer.predict`.

        ``None`` otherwise.

        )r‡   Ú
_ckpt_pathrù   r¡   r¡   r¢   r¨     s   
zTrainer.ckpt_pathc                 C   s   || j _t|ƒ| j _dS )ak  Allows you to manage which checkpoint is loaded statefully.

        .. code-block:: python

            trainer = Trainer()
            trainer.ckpt_path = "my/checkpoint/file.ckpt"
            trainer.fit(model)
            ...

            # you will be in charge of resetting this
            trainer.ckpt_path = None
            trainer.test(model)

        N)r‡   rJ  ÚboolÚ_user_managed)rž   r¨   r¡   r¡   r¢   r¨   *  s   ÚfilepathÚweights_onlyÚstorage_optionsc                 C   s@   | j du r	tdƒ‚| j |¡}| jj|||d | j d¡ dS )a,  Runs routine to create a checkpoint.

        This method needs to be called on all processes in case the selected strategy is handling distributed
        checkpointing.

        Args:
            filepath: Path where checkpoint is saved.
            weights_only: If ``True``, will only save the model weights.
            storage_options: parameter for how to save to storage, passed to ``CheckpointIO`` plugin

        Raises:
            AttributeError:
                If the model is not attached to the Trainer before calling this method.

        NzªSaving a checkpoint is only possible if a model is attached to the Trainer. Did you call `Trainer.save_checkpoint()` before calling `Trainer.{fit,validate,test,predict}`?)rO  úTrainer.save_checkpoint)r£   ÚAttributeErrorr‡   Údump_checkpointrM   Úsave_checkpointrï   )rž   rM  rN  rO  Ú
checkpointr¡   r¡   r¢   rS  =  s   
ÿrP  c                 C   ó   | j jtjkS r  )r›   r­   r7   ÚINTERRUPTEDrù   r¡   r¡   r¢   Úinterrupted\  r  zTrainer.interruptedc                 C   rU  r  )r›   rë   r4   ÚTRAININGrù   r¡   r¡   r¢   r®   `  r  zTrainer.trainingÚvalc                 C   ó(   |r	t j| j_d S | jrd | j_d S d S r  )r4   rX  r›   rë   r®   ©rž   rY  r¡   r¡   r¢   r®   d  ó
   ÿc                 C   rU  r  )r›   rë   r4   r   rù   r¡   r¡   r¢   rÇ   k  r  zTrainer.testingc                 C   rZ  r  )r4   r   r›   rë   rÇ   r[  r¡   r¡   r¢   rÇ   o  r\  c                 C   rU  r  )r›   rë   r4   rÌ   rù   r¡   r¡   r¢   rÍ   v  r  zTrainer.predictingc                 C   rZ  r  )r4   rÌ   r›   rë   rÍ   r[  r¡   r¡   r¢   rÍ   z  r\  c                 C   rU  r  )r›   rë   r4   r‹   rù   r¡   r¡   r¢   r¿     r  zTrainer.validatingc                 C   rZ  r  )r4   r‹   r›   rë   r¿   r[  r¡   r¡   r¢   r¿   …  r\  c                 C   s   | j jd uo
| j jjS r  )r›   rë   rñ   rù   r¡   r¡   r¢   rñ   Œ  s   zTrainer.evaluatingc                 C   rU  )z…Whether sanity checking is running.

        Useful to disable some hooks, logging or callbacks during the sanity checking.

        )r›   rë   r4   ÚSANITY_CHECKINGrù   r¡   r¡   r¢   rÿ     s   zTrainer.sanity_checkingc                 C   rZ  r  )r4   r]  r›   rë   rÿ   r[  r¡   r¡   r¢   rÿ   ™  r\  c                 C   r  )zyWhether a ``signal.SIGTERM`` signal was received.

        For example, this can be checked to exit gracefully.

        )rˆ   Úreceived_sigtermrù   r¡   r¡   r¢   r^     s   zTrainer.received_sigtermc                 C   r)  )zƒThe number of optimizer steps taken (does not reset each epoch).

        This includes multiple optimizers (if enabled).

        )r‰   rŠ   Úglobal_steprù   r¡   r¡   r¢   r_  ­  s   
zTrainer.global_stepc                 C   ó   | j jjjS )z=The current epoch, updated after the epoch end hooks are run.)r‰   Úepoch_progressÚcurrentÚ	completedrù   r¡   r¡   r¢   Úcurrent_epoch¶  ó   zTrainer.current_epochc                 C   r  r  )r‰   rT   rù   r¡   r¡   r¢   rT   »  r  zTrainer.max_epochsc                 C   r  r  )r‰   rU   rù   r¡   r¡   r¢   rU   ¿  r  zTrainer.min_epochsc                 C   r  r  )r‰   rV   rù   r¡   r¡   r¢   rV   Ã  r  zTrainer.max_stepsc                 C   r  r  )r‰   rW   rù   r¡   r¡   r¢   rW   Ç  r  zTrainer.min_stepsc                 C   r`  )z,Whether trainer is executing the last batch.)r‰   rŠ   Úbatch_progressÚis_last_batchrù   r¡   r¡   r¢   rg  Ë  re  zTrainer.is_last_batchc                 C   ó   | j j }dur|jS dS )z9The training dataloader(s) used during ``trainer.fit()``.N)r‰   Ú_combined_loaderÚ	iterables©rž   Úcombined_loaderr¡   r¡   r¢   Útrain_dataloaderÐ  ó   zTrainer.train_dataloaderc                 C   s.   | j jjj }dus| jj }dur|jS dS )zUThe validation dataloader(s) used during ``trainer.fit()`` or ``trainer.validate()``.N)r‰   rŠ   rü   ri  rŒ   rj  rk  r¡   r¡   r¢   r¦   ×  s   
þzTrainer.val_dataloadersc                 C   rh  )z6The test dataloader(s) used during ``trainer.test()``.N)rŽ   ri  rj  rk  r¡   r¡   r¢   rÊ   à  rn  zTrainer.test_dataloadersc                 C   rh  )z?The prediction dataloader(s) used during ``trainer.predict()``.N)r   ri  rj  rk  r¡   r¡   r¢   rÐ   ç  rn  zTrainer.predict_dataloadersc                 C   r  )zJThe number of training batches that will be used during ``trainer.fit()``.)r‰   Úmax_batchesrù   r¡   r¡   r¢   Únum_training_batchesî  r$  zTrainer.num_training_batchesc                    s   ˆ j jjj}‡ fdd„|D ƒS )zhThe number of validation batches that will be used during the sanity-checking part of ``trainer.fit()``.c                    s   g | ]}t ˆ j|ƒ‘qS r¡   )Úminr`   )rA  Úbatchesrù   r¡   r¢   rC  ø  s    z2Trainer.num_sanity_val_batches.<locals>.<listcomp>)r‰   rŠ   rü   ro  )rž   ro  r¡   rù   r¢   Únum_sanity_val_batchesó  s   zTrainer.num_sanity_val_batchesc                 C   s"   | j jtjkr| jjS | jjjj	S )zfThe number of validation batches that will be used during ``trainer.fit()`` or ``trainer.validate()``.)
r›   r«   r5   r‹   rŒ   ro  r‰   rŠ   rü   Ú_max_batchesrù   r¡   r¡   r¢   Únum_val_batchesú  s   zTrainer.num_val_batchesc                 C   r  )zGThe number of test batches that will be used during ``trainer.test()``.)rŽ   ro  rù   r¡   r¡   r¢   Únum_test_batches  r$  zTrainer.num_test_batchesc                 C   r  )zPThe number of prediction batches that will be used during ``trainer.predict()``.)r   ro  rù   r¡   r¡   r¢   Únum_predict_batches  r$  zTrainer.num_predict_batchesc                 C   sH   | j jtjkr| jjjS | j jtjkr| jS | j jtj	kr | j
S tdƒ‚)NzPThe `Trainer._evaluation_loop` property isn't defined. Accessed outside of scope)r›   r«   r5   rª   r‰   rŠ   rü   r‹   rŒ   r   rŽ   rø   rù   r¡   r¡   r¢   rò     s   
zTrainer._evaluation_loopc                 C   s.   | j r| jS | js| jr| jS | jr| jS d S r  )r®   r‰   rÿ   rñ   rò   rÍ   r   rù   r¡   r¡   r¢   rí     s   zTrainer._active_loopc                 C   s   t | jƒdkr| jd S dS )zGThe first :class:`~lightning.pytorch.loggers.logger.Logger` being used.r   N)r"  r,  rù   r¡   r¡   r¢   rQ   %  s   zTrainer.loggerc                 C   s   |sg | _ d S |g| _ d S r  )r,  )rž   rQ   r¡   r¡   r¢   rQ   *  s   
c                 C   s   | j S )zÊThe list of :class:`~lightning.pytorch.loggers.logger.Logger` used.

        .. code-block:: python

            for logger in trainer.loggers:
                logger.log_metrics({"foo": 1.0})

        ©Ú_loggersrù   r¡   r¡   r¢   r,  1  s   
zTrainer.loggersr,  c                 C   s   |r|| _ d S g | _ d S r  rx  )rž   r,  r¡   r¡   r¢   r,  =  s   c                 C   r  )a  The metrics available to callbacks.

        .. code-block:: python

            def training_step(self, batch, batch_idx):
                self.log("a_val", 2.0)


            callback_metrics = trainer.callback_metrics
            assert callback_metrics["a_val"] == 2.0

        )r…   Úcallback_metricsrù   r¡   r¡   r¢   rz  A  s   zTrainer.callback_metricsc                 C   r  )zæThe metrics sent to the loggers.

        This includes metrics logged via :meth:`~lightning.pytorch.core.LightningModule.log` with the
        :paramref:`~lightning.pytorch.core.LightningModule.log.logger` argument set.

        )r…   Úlogged_metricsrù   r¡   r¡   r¢   r{  Q  r+  zTrainer.logged_metricsc                 C   r  )zíThe metrics sent to the progress bar.

        This includes metrics logged via :meth:`~lightning.pytorch.core.LightningModule.log` with the
        :paramref:`~lightning.pytorch.core.LightningModule.log.prog_bar` argument set.

        )r…   Úprogress_bar_metricsrù   r¡   r¡   r¢   r|  [  r+  zTrainer.progress_bar_metricsc                 C   s   | j }|d ur
|jS d S r  )rí   Ú_results)rž   Úactive_loopr¡   r¡   r¢   r}  e  s   zTrainer._resultsc                 C   s¢   | j dkr| jdkrtdƒS | jS | jdu rtdƒ | j ¡  | j}|tdƒkr+| jS | j dus2J ‚t 	|| j
 ¡t| j dƒ }| jdkrMt|| jƒ}|S |}|S )a?  The estimated number of batches that will ``optimizer.step()`` during training.

        This accounts for gradient accumulation and the current trainer configuration. This might be used when setting
        up your training dataloader, if it hasn't been set up already.

        .. code-block:: python

            def configure_optimizers(self):
                optimizer = ...
                stepping_batches = self.trainer.estimated_stepping_batches
                scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=1e-3, total_steps=stepping_batches)
                return [optimizer], [scheduler]

        Raises:
            MisconfigurationException:
                If estimated stepping batches cannot be computed due to different `accumulate_grad_batches`
                at different epochs.

        rK   ÚinfNzBLoading `train_dataloader` to estimate number of stepping batches.rJ   )rT   rV   r“   rm  r?   r‰   Ú
setup_datarp  ÚmathÚceilre   Úmaxrq  )rž   Útotal_batchesÚmax_estimated_stepsr¡   r¡   r¢   Úestimated_stepping_batchesp  s   


ÿz"Trainer.estimated_stepping_batches)NNNN)NNNTN)NNNNNr  )rs   N)rs   r¤   )FN)r|   Ú
__module__Ú__qualname__r:   r
   Ústrr   r$   r   r’   r	   r)   r   r   rK  r   r   r   r“   r(   r"   r    r   rx   rE   r   rD   r²   r°   rB   rÂ   rC   rÀ   rÉ   rÈ   rÏ   rÎ   rº   rè   rç   rô   rÚ   r   r   r  r   r  ÚpropertyrL   rM   r!   r  r  r  r  r  rO   r   r#  r¹   r   r&  ÚsetterrF   r(  r*   rP   r*  rõ   ÚnnÚModuler£   r  r0  r1  rý   rr   r   r<  r:  r   rH  rG  r   rI  r¨   rS  rW  r®   rÇ   rÍ   r¿   rñ   rÿ   r^  r_  rd  rT   rU   rV   rW   rg  rm  r¦   rÊ   rÐ   rp  rs  ru  rv  rw  r   rò   r   r   rí   rQ   r,  r0   rz  r{  r1   r|  r2   r}  r†  Ú__classcell__r¡   r¡   r    r¢   rH   V   sh   ×
ý
üûúùø	÷

öõôóòñðïîí
ìëêéèçæåäãâá à!ß"Þ#Ý$Ü%Û&Ú'Ù(Ø)×*Ö    úþýüûú
ù/úþýüûú
ù(úþýüûú
ùAúþýüûú
ù/úþýüûú
ùBúþýüûú
ù/úþýüûú
ùCúþýüûú
ù+ÿÿÿ
þ
a

&
	
ÿÿÿÿ
þ				 rH   ){Ú__doc__Úloggingr  r~   Ú
contextlibr   Údatetimer   Útypingr   r   r   r   r   r	   r
   Úweakrefr   rõ   Útorch.optimr   Úlightning.pytorchÚpytorchÚplÚ%lightning.fabric.utilities.apply_funcr   Ú#lightning.fabric.utilities.cloud_ior   Ú lightning.fabric.utilities.typesr   Úlightning.pytorch.acceleratorsr   Úlightning.pytorch.callbacksr   r   r   r   Ú!lightning.pytorch.core.datamoduler   Úlightning.pytorch.loggersr   Ú"lightning.pytorch.loggers.csv_logsr   Ú%lightning.pytorch.loggers.tensorboardr   Ú#lightning.pytorch.loggers.utilitiesr   Úlightning.pytorch.loopsr   r   Ú'lightning.pytorch.loops.evaluation_loopr   Ú lightning.pytorch.loops.fit_loopr   Ú!lightning.pytorch.loops.utilitiesr   r   Úlightning.pytorch.pluginsr    r!   Úlightning.pytorch.profilersr"   Úlightning.pytorch.strategiesr#   r$   Úlightning.pytorch.trainerr%   r&   Ú1lightning.pytorch.trainer.configuration_validatorr'   Ú:lightning.pytorch.trainer.connectors.accelerator_connectorr(   r)   r*   r+   Ú7lightning.pytorch.trainer.connectors.callback_connectorr,   Ú9lightning.pytorch.trainer.connectors.checkpoint_connectorr-   Ú3lightning.pytorch.trainer.connectors.data_connectorr.   Ú5lightning.pytorch.trainer.connectors.logger_connectorr/   Ú<lightning.pytorch.trainer.connectors.logger_connector.resultr0   r1   r2   Ú5lightning.pytorch.trainer.connectors.signal_connectorr3   Ú lightning.pytorch.trainer.statesr4   r5   r6   r7   Úlightning.pytorch.utilitiesr8   r9   Ú$lightning.pytorch.utilities.argparser:   Ú#lightning.pytorch.utilities.compiler;   r<   Ú&lightning.pytorch.utilities.exceptionsr=   Ú)lightning.pytorch.utilities.model_helpersr>   Ú%lightning.pytorch.utilities.rank_zeror?   r@   Ú lightning.pytorch.utilities.seedrA   Ú!lightning.pytorch.utilities.typesrB   rC   rD   rE   rF   Ú$lightning.pytorch.utilities.warningsrG   Ú	getLoggerr|   ry   rH   r¡   r¡   r¡   r¢   Ú<module>   sb   $
