o
    闦i>                  "   @   s  d dl mZmZmZmZmZ d dlZd dlmZ ddlm	Z	m
Z
mZmZmZmZmZmZmZmZmZmZmZmZ ddgZG dd deZd	e d
e de de de	 de_dee dee dee dee dee dee dededededededededefddZdee dee dee dee dee dee dededededededededefddZeed 		!	!	!	!d$dee dee dee dee dee dee d"ee dededededededededef d#dZdS )%    )castListOptionalTupleUnionN)Tensor   )_capturable_doc_default_to_fused_or_foreach_differentiable_doc_disable_dynamo_if_unsupported_foreach_doc!_get_capturable_supported_devices_get_scalar_dtype
_get_value_maximize_doc_params_doc_use_grad_for_differentiable_view_as_real	OptimizerParamsTASGDasgdc                       s   e Zd Z									dded	eeef d
ededededee dededef fddZ	 fddZ
dd ZedddZ  ZS )r   {Gz?-C6?      ?    .Ar   NFparamslrlambdalphat0weight_decayforeachmaximizedifferentiable
capturablec                    sv   t |tr| dkrtdd|kstd| d|ks%td| t||||||||	|
d	}t || d S )Nr   zTensor lr must be 1-elementg        zInvalid learning rate: zInvalid weight_decay value: )	r   r   r    r!   r"   r#   r$   r%   r&   )
isinstancer   numel
ValueErrordictsuper__init__)selfr   r   r   r    r!   r"   r#   r$   r%   r&   defaults	__class__ N/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/torch/optim/asgd.pyr,      s$   zASGD.__init__c                    s   t  | | jD ]q}|dd  |dd |dd |dd |d D ]R}| j|g }t|dkryt|d sOt	|d }tj
|t |jd	|d< t|d
 sdtj
|d
 t |jd	|d
< t|d sytj
|d t |jd	|d< q'q	d S )Nr#   r$   Fr%   r&   r   r   step)dtypedeviceetamu)r+   __setstate__param_groups
setdefaultstategetlentorch	is_tensorfloattensorr   r5   )r-   r;   grouppp_statestep_valr/   r1   r2   r8   ?   s2   




zASGD.__setstate__c                 C   s
  d}|d D ]|}	|	j d ur|t|	O }||	 |	j jr!td||	j  | j|	 }
t|
dkrftjd|	j	t
 d|
d< tj|d |	j	t
 d  |
d	< tjd|	j	t
 d|
d
< tj|	tjd|
d< ||
d
  ||
d  ||
d	  ||
d  q|S )NFr   z&ASGD does not support sparse gradientsr   r1   )r5   r4   r3   r   r6   r7   )memory_formatax)gradr>   
is_complexappend	is_sparseRuntimeErrorr;   r=   zerosr5   r   	as_tensorclonedetachones
zeros_likepreserve_format)r-   rB   params_with_gradgradsmusaxsetasstate_stepshas_complexrC   r;   r1   r1   r2   _init_groupW   s>   







zASGD._init_groupc                 C   s   |    d}|dur!t  | }W d   n1 sw   Y  | jD ]?}g }g }g }g }g }g }	| |||||||	}
t||||||	|d |d |d |d |d |d |d |d	 |d
 |
d q$|S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r!   r    r"   r#   r$   r%   r&   )
r   r   r!   r    r"   r#   r$   r%   r&   rZ   ) _cuda_graph_capture_health_checkr>   enable_gradr9   r[   r   )r-   closurelossrB   rT   rU   rV   rW   rX   rY   rZ   r1   r1   r2   r3   {   sF   

z	ASGD.step)	r   r   r   r   r   NFFFN)__name__
__module____qualname__r   r   r@   r   r   boolr,   r8   r[   r   r3   __classcell__r1   r1   r/   r2   r      sF    
	
!$zImplements Averaged Stochastic Gradient Descent.

    It has been proposed in `Acceleration of stochastic approximation by
    averaging`_.

    Args:
        am  
        lr (float, Tensor, optional): learning rate (default: 1e-2)
        lambd (float, optional): decay term (default: 1e-4)
        alpha (float, optional): power for eta update (default: 0.75)
        t0 (float, optional): point at which to start averaging (default: 1e6)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        z	
        zx

    .. _Acceleration of stochastic approximation by averaging:
        https://dl.acm.org/citation.cfm?id=131098

    r   rU   rW   rV   rX   rY   r   r   r!   r    r"   r$   r%   r&   rZ   c       	      
   C   s  t | D ] \}}|| }|s|n| }|| }|| }|| }|| }tj sU|rUt }|jj|jj  krE|jj  krE|jjkrMn n|jj|v sUJ d| dt|rit|}t|}t|}|d7 }|
dkrx|j	||
d}|r|
d||   |j||dd nt|}|
d||   |j|| d |s| dkr|||
| n|| |r||d|| |  |	   |dt|| t|  qt|}t|d|| |  |	  }|| tdtd||  }|| qd S )NUIf capturable=True, params, mus, etas, and state_steps must be on supported devices: .r   r   r    value)	enumerater>   compileris_compilingr   r5   typerI   view_as_realaddmul_addcmul_r   add_itemsubcopy_maximum	ones_likerN   max)r   rU   rW   rV   rX   rY   r   r   r!   r    r"   r$   r%   r&   rZ   iparamrH   r7   rG   r6   step_tcapturable_supported_devices	eta_valuer3   new_etanew_mur1   r1   r2   _single_tensor_asgd   s\   





"
r   c       	             s  t | dkrd S |rJ dtj s2|r2tddtfddt| |||D s2J d dt| |||||g}|	 D ]\\}\\}}}}}}}t
tt |}t
tt |}t
tt |}t
tt |}t
tt |}t
tt |}|rt||| |rt|}tj s|d jrtj|tjd	d
dd	d nt|d |
dkr|rtj|||
d |}ntj|||
d}tj||d ntj||d}tj|||dd ~t||}t||| ~|r.t|}t|d	 t| t|| ~t|}t| t|d t|  t| t| t|| qA fdd|D }fdd|D }t|| t|| qAd S )Nr   z#_foreach ops don't support autogradF)supports_xlac                 3   sV    | ]&\}}}}|j j|j j  ko|j j  ko|j jkn  o&|j j v V  qd S r`   )r5   ro   ).0rC   r7   r6   r3   )r~   r1   r2   	<genexpr>+  s    
2

z%_multi_tensor_asgd.<locals>.<genexpr>rf   rg   g      ?cpur5   rh   r   ri   rj   c                    s.   g | ]}t jd  |     dqS r   r   )r>   rN   r   r3   )r    r5   r   r   r1   r2   
<listcomp>  s     z&_multi_tensor_asgd.<locals>.<listcomp>c                    s,   g | ]}t jd td t|   dqS r   )r>   rN   rz   r   r   )r5   r!   r1   r2   r     s    )r=   r>   rm   rn   r   allzipr   "_group_tensors_by_device_and_dtypeitemsr   r   r   r   _foreach_negis_cpu_foreach_add_rA   _foreach_add_foreach_addcmul__foreach_sub_foreach_maximum__foreach_reciprocal__foreach_copy__foreach_mul_foreach_mul__foreach_pow_) r   rU   rW   rV   rX   rY   r   r   r!   r    r"   r$   r%   r&   rZ   grouped_tensors_grouped_params_grouped_grads_grouped_axs_grouped_mus_grouped_etas_grouped_state_steps_grouped_paramsgrouped_gradsgrouped_axsgrouped_musgrouped_etasgrouped_state_stepsintermediatenew_musnew_etasr1   )r    r~   r5   r   r   r!   r2   _multi_tensor_asgd  s   





r   )single_tensor_fnFr#   c                C   sr   |du rt | |dd\}}|rtj rtd|r"tj s"t}nt}|| |||||||||||||	|
d dS )znFunctional API that performs asgd algorithm computation.

    See :class:`~torch.optim.ASGD` for details.
    NF)	use_fusedz6torch.jit.script not supported with foreach optimizers)	r   r   r!   r    r"   r$   r%   r&   rZ   )r
   r>   jitis_scriptingrL   r   r   )r   rU   rW   rV   rX   rY   r#   r$   r%   r&   rZ   r   r   r!   r    r"   r   funcr1   r1   r2   r     s4   

)NFFFF)typingr   r   r   r   r   r>   r   	optimizerr	   r
   r   r   r   r   r   r   r   r   r   r   r   r   __all__r   __doc__r@   rd   r   r   r   r1   r1   r1   r2   <module>   s   @ 
	

L	

 
	
