o
    <&ib5                  !   @   s  d dl Z d dl mZ ddlmZmZmZmZmZmZm	Z	m
Z
mZ d dlmZ d dlmZmZ ddgZd%d	d
ZG dd deZde de	 de de
 d	e_					d&dee dee dee dee dee dee dee dededededededededef d dZdee dee dee dee dee dee dededededededededefd!d"Zdee dee dee dee dee dee dededededededededefd#d$ZdS )'    N)Tensor   )		Optimizer_use_grad_for_differentiable
_get_value_default_to_fused_or_foreach_differentiable_doc_foreach_doc_maximize_doc_capturable_doc_view_as_real)is_compiling)ListOptionalASGDasgdc                 C   s   t | tjstj| |dS | S )Ndevice)
isinstancetorchr   tensor)xr    r   ;C:\wamp64\www\opt\env\Lib\site-packages\torch/optim/asgd.py
_to_tensor   s   r   c                	       sh   e Zd Z									ddee d	ed
edef fddZ fddZdd ZedddZ	  Z
S )r   {Gz?-C6?      ?    .Ar   NFforeachmaximizedifferentiable
capturablec                    sl   d|kst d| d|kst d| |du r |
r t dt||||||||	|
d	}t || d S )Ng        zInvalid learning rate: zInvalid weight_decay value: F0Capturable not supported with single tensor ASGD)	lrlambdalphat0weight_decayr   r    r!   r"   )
ValueErrordictsuper__init__)selfparamsr$   r%   r&   r'   r(   r   r    r!   r"   defaults	__class__r   r   r,      s$   zASGD.__init__c                    s2  t  | | jD ]}|dd  |dd |dd |dd q	t| j }t|dko9t	|d d }|sO|D ]}tj
t|d tjd|d< q>t|dko]t	|d d	 }|sq|D ]}tj
|d	 tjd|d	< qbt|dkot	|d d
 }|s|D ]}tj
t|d
 tjd|d
< qd S d S )Nr   r    Fr!   r"   r   step)dtypeetamu)r+   __setstate__param_groups
setdefaultliststatevalueslenr   Z	is_tensorr   floatfloat32)r-   r:   groupZstate_valuesZstep_is_tensorsZeta_is_tensorZmu_is_tensorr0   r   r   r6   4   s6   



zASGD.__setstate__c                 C   s  d}|d D ]x}	|	j d ur~|t|	O }||	 |	j jr!td||	j  | j|	 }
t|
dkrbtjd|	j	tj
d|
d< tj|d |	j	tj
d|
d	< tjd|	j	tj
d|
d
< tj|	tjd|
d< ||
d
  ||
d  ||
d	  ||
d  q|S )NFr.   z&ASGD does not support sparse gradientsr   r   )r   r3   r2   r$   r4   r5   )Zmemory_formatax)gradr   
is_complexappendZ	is_sparseRuntimeErrorr:   r<   Zzerosr   r>   r   ZonesZ
zeros_likeZpreserve_format)r-   r?   params_with_gradgradsmusaxsetasstate_stepshas_complexpr:   r   r   r   _init_groupO   s,   



zASGD._init_groupc                 C   s   d}|durt   | }W d   n1 sw   Y  | jD ]?}g }g }g }g }g }g }	| |||||||	}
t||||||	|d |d |d |d |d |d |d |d	 |d
 |
d q |S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr%   r$   r'   r&   r(   r   r    r!   r"   )
r%   r$   r'   r&   r(   r   r    r!   r"   rL   )r   Zenable_gradr7   rN   r   )r-   closureZlossr?   rF   rG   rH   rI   rJ   rK   rL   r   r   r   r2   i   s@   

z	ASGD.step)	r   r   r   r   r   NFFFN)__name__
__module____qualname__r   boolr,   r6   rN   r   r2   __classcell__r   r   r0   r   r      s.    	
"ah  Implements Averaged Stochastic Gradient Descent.

    It has been proposed in `Acceleration of stochastic approximation by
    averaging`_.

    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, optional): learning rate (default: 1e-2)
        lambd (float, optional): decay term (default: 1e-4)
        alpha (float, optional): power for eta update (default: 0.75)
        t0 (float, optional): point at which to start averaging (default: 1e6)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        z	
        z For ASGD, capturable is only supported when foreach is True.

    .. _Acceleration of stochastic approximation by averaging:
        https://dl.acm.org/citation.cfm?id=131098

    Fr.   rG   rI   rH   rJ   rK   r   r    r!   r"   rL   r%   r$   r'   r&   r(   c                C   s   |du rt | |dd\}}|rtj rtd|r"tj s"t}n|	r+t s+tdt}|| |||||||||||||	|
d dS )znFunctional API that performs asgd algorithm computation.

    See :class:`~torch.optim.ASGD` for details.
    NF)Z	use_fusedz6torch.jit.script not supported with foreach optimizersr#   )	r%   r$   r'   r&   r(   r    r!   r"   rL   )r   r   ZjitZis_scriptingrE   _multi_tensor_asgdr   _single_tensor_asgd)r.   rG   rI   rH   rJ   rK   r   r    r!   r"   rL   r%   r$   r'   r&   r(   _funcr   r   r   r      s4   

c       	         C   s>  t | D ]\}}|| }|s|n| }|| }|| }|| }|| }t|r7t|}t|}t|}|d7 }t|}|
dkrJ|j||
d}t|}|d||   |j|| d t sh|	 dkrt||
|| n|| t|d|| |  |	  }|| tdtd||  }|| qd S )Nr   r   r&   )	enumerater   rC   Zview_as_realr   addZmul_Zadd_r   itemsubmulZcopy_r   max)r.   rG   rI   rH   rJ   rK   r%   r$   r'   r&   r(   r    r!   r"   rL   iparamrB   r5   rA   r4   Zstep_tr2   Z	eta_valuenew_etanew_mur   r   r   rW      s4   





rW   c       	         C   sN  t | dkrd S |rJ dt| |||||g}| D ]\\}}\\}}}}}}}|r3t|}t|}|r?t||| |d jrRtj	|tj
ddddd nt	|d |
dkrz|ritj	|||
d |}ntj|||
d}tj	|||d ntj|||d}tj|||dd	 ~t||}t||| ~|rt||}t|d t| t|| ~t||	}t|| t|| t	|d t| t|| t|| q|d  }g }g }tt |D ])}t|d|| ||	    |d}|| tdtd||  |d}|| qt|| t|| qd S )
Nr   z#_foreach ops don't support autogradg      ?cpur   rZ   r   )value)r<   r   Z"_group_tensors_by_device_and_dtypeitemsr   Z_foreach_negr9   r   Zis_cpuZ_foreach_add_r   Z_foreach_addZ_foreach_addcmul_Z_foreach_subZ_foreach_maximum_Z_foreach_reciprocal_Z_foreach_copy_Z_foreach_powZ_foreach_mul_r]   ranger   rD   r`   )r.   rG   rI   rH   rJ   rK   r%   r$   r'   r&   r(   r    r!   r"   rL   Zgrouped_tensorsr   rX   Zgrouped_paramsZgrouped_gradsZgrouped_axsZgrouped_musZgrouped_etasZgrouped_state_stepsZintermediateZnew_musZnew_etasr2   ra   rc   rd   r   r   r   rV      sj   





rV   rP   )NFFFF)r   r   Z	optimizerr   r   r   r   r   r	   r
   r   r   Ztorch._utilsr   typingr   r   __all__r   r   __doc__rT   r=   r   rW   rV   r   r   r   r   <module>   s    ,
 
!	

9	

9	
