o
    zij                     @  s   d dl mZ d dlmZ d dlZd dlmZ d dlmZ d dl	m
Z
 d dlmZ d dlmZ d d	lmZmZ d d
lmZ G dd deZdS )    )annotations)AnyN)Module)override)Accelerator)CheckpointIO)	Precision)Strategy
TBroadcast)_DEVICEc                      s   e Zd ZdZ				d1d2 fddZeed3ddZeed4ddZed5ddZ	ed6dd Z
ed7d8d&d'Zed9d(d)Zed:d;d/d0Z  ZS )<SingleDeviceStrategyz7Strategy that handles communication on a single device.cpuNdevicer   acceleratorAccelerator | Nonecheckpoint_ioCheckpointIO | None	precisionPrecision | Nonec                   sD   t  j|||d t|tjst|}|| _d| _d| _d| _d S )N)r   r   r   r      )	super__init__
isinstancetorchr   _root_deviceglobal_rank
local_rank
world_size)selfr   r   r   r   	__class__ ]/home/ubuntu/.local/lib/python3.10/site-packages/lightning_fabric/strategies/single_device.pyr       s   

zSingleDeviceStrategy.__init__returntorch.devicec                 C  s   | j S N)r   r   r!   r!   r"   root_device/   s   z SingleDeviceStrategy.root_deviceboolc                 C  s   dS )NTr!   r&   r!   r!   r"   is_global_zero4      z#SingleDeviceStrategy.is_global_zeromoduler   Nonec                 C  s   | | j d S r%   )tor'   )r   r+   r!   r!   r"   module_to_device9   s   z%SingleDeviceStrategy.module_to_devicetensorAny | torch.Tensorargsr   kwargsc                 O     |S )a  Reduces a tensor from several distributed processes to one aggregated tensor. As this plugin only operates
        with a single device, the reduction is simply the identity.

        Args:
            tensor: the tensor to sync and reduce
            *args: ignored
            **kwargs: ignored

        Return:
            the unmodified input as reduction is not needed for single process operation

        r!   )r   r/   r1   r2   r!   r!   r"   
all_reduce=   s   zSingleDeviceStrategy.all_reduceFtorch.Tensorgroup
Any | None
sync_gradsc                 C  r3   )z*Perform a ``all_gather`` on all processes.r!   )r   r/   r6   r8   r!   r!   r"   
all_gatherM   r*   zSingleDeviceStrategy.all_gatherc                 O  s   d S r%   r!   )r   r1   r2   r!   r!   r"   barrierR      zSingleDeviceStrategy.barrierr   objr
   srcintc                 C  r3   r%   r!   )r   r<   r=   r!   r!   r"   	broadcastV   r;   zSingleDeviceStrategy.broadcast)r   NNN)r   r   r   r   r   r   r   r   )r#   r$   )r#   r(   )r+   r   r#   r,   )r/   r0   r1   r   r2   r   r#   r0   )NF)r/   r5   r6   r7   r8   r(   r#   r5   )r1   r   r2   r   r#   r,   )r   )r<   r
   r=   r>   r#   r
   )__name__
__module____qualname____doc__r   propertyr   r'   r)   r.   r4   r9   r:   r?   __classcell__r!   r!   r   r"   r      s.    r   )
__future__r   typingr   r   torch.nnr   typing_extensionsr   lightning_fabric.acceleratorsr   )lightning_fabric.plugins.io.checkpoint_ior   "lightning_fabric.plugins.precisionr   $lightning_fabric.strategies.strategyr	   r
    lightning_fabric.utilities.typesr   r   r!   r!   r!   r"   <module>   s   