o
    <&iq                  )   @   s  d dl Z d dl mZ ddlmZmZmZmZmZmZm	Z	m
Z
mZmZmZmZmZ d dlmZmZmZmZ d dlmZ ddgZG d	d deZd
de de
 de de	 de d e_							d+dee dee dee dee dee dee dee dededee dee dee dededededeeef d ed!ed"ef(d#dZdee dee dee dee dee dee dee dee dedededeeef d ed!ed"edededef$d$d%Zdee dee dee dee dee dee dee dee dedededeeef d ed!ed"edededef$d&d'Zdee dee dee dee dee dee dee dee dedededeeef d ed!ed"edededed(df&d)d*ZdS ),    N)Tensor   )	Optimizer_use_grad_for_differentiable
_get_value_dispatch_sqrt_stack_if_compiling_capturable_doc_differentiable_doc_foreach_doc
_fused_doc_maximize_doc_default_to_fused_or_foreachParamsT_view_as_real)ListOptionalTupleUnion)$_get_fused_kernels_supported_devicesAdamWadamwc                       s   e Zd Z					dddddddded	eeef d
eeef dedededede	e dedede	e f fddZ
 fddZdd ZedddZ  ZS )r   MbP?g?g+?:0yE>{Gz?FN)maximizeforeach
capturabledifferentiablefusedparamslrbetasepsweight_decayamsgradr   r   r   r   r    c                   s6  d|kst d| t|tr|r|	st dd|ks#t d| d|d   kr/dk s9n t d|d  d|d   krEdk sOn t d	|d  d|ksZt d
| t||||||||	|
|d
}t || |r|
rwtdd| _t  t	 fdd| j
D std  d|rtdd S d S )N        zInvalid learning rate: Elr as a Tensor is not supported for capturable=False and foreach=TruezInvalid epsilon value: r         ?z#Invalid beta parameter at index 0: r   z#Invalid beta parameter at index 1: zInvalid weight_decay value: )
r"   r#   r$   r%   r&   r   r   r   r   r    z)`fused` does not support `differentiable`Tc                 3   s4    | ]}|d  D ]}|j j v ot|V  qqdS )r!   N)devicetypetorchZis_floating_point).0ZpgpZfused_supported_devices <C:\wamp64\www\opt\env\Lib\site-packages\torch/optim/adamw.py	<genexpr>?   s    z!AdamW.__init__.<locals>.<genexpr>zX`fused=True` requires all the params to be floating point Tensors of supported devices: .z0`fused` and `foreach` cannot be `True` together.)
ValueError
isinstancer   dictsuper__init__RuntimeErrorZ_step_supports_amp_scalingr   allparam_groups)selfr!   r"   r#   r$   r%   r&   r   r   r   r   r    defaults	__class__r/   r1   r8      sP   
zAdamW.__init__c                    s   t  | | jD ]&}|dd |dd |dd  |dd |dd |dd  q	t| j }t|dkoEt	|d d	 }|s[|D ]}tj
t|d	 tjd
|d	< qJd S d S )Nr&   Fr   r   r   r   r    r   stepdtype)r7   __setstate__r;   
setdefaultliststatevalueslenr,   Z	is_tensortensorfloatfloat32)r<   rF   groupZstate_valuesZstep_is_tensorsr>   r0   r1   rC   I   s"   

zAdamW.__setstate__c	                 C   sd  d}	|d D ]}
|
j d u rq|	t|
O }	||
 |
j jr"td||
j  | j|
 }t|dkro|d s;|d rEtjdtj	|
j
dntjd	tj	d
|d< tj|
tjd|d< tj|
tjd|d< |rotj|
tjd|d< ||d  ||d  |d r||d  |d r|d jrtd|d rt|d tr|d std||d  q|	S )NFr!   z'AdamW does not support sparse gradientsr   r   r    r0   )rB   r*   r'   rA   r@   )Zmemory_formatexp_avg
exp_avg_sqmax_exp_avg_sqr&   r   zB`requires_grad` is not supported for `step` in differentiable moder   r"   r(   )gradr,   
is_complexappendZ	is_sparser9   rF   rH   ZzerosrK   r*   rI   Z
zeros_likeZpreserve_formatZrequires_gradr5   r   )r<   rL   params_with_gradgradsr&   exp_avgsexp_avg_sqsmax_exp_avg_sqsstate_stepshas_complexr.   rF   r0   r0   r1   _init_groupZ   sJ   





zAdamW._init_groupc                 C   s   |    d}|dur!t  | }W d   n1 sw   Y  | jD ]V}g }g }g }g }g }g }	|d }
|d \}}| ||||
||||	}t||||||	f|
|||d |d |d |d |d |d	 |d
 |d t| ddt| dd|d q$|S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr&   r#   r"   r%   r$   r   r   r   r   r    
grad_scale	found_inf)r&   beta1beta2r"   r%   r$   r   r   r   r   r    r\   r]   rZ   )Z _cuda_graph_capture_health_checkr,   Zenable_gradr;   r[   r   getattr)r<   closureZlossrL   rT   rU   rV   rW   rX   rY   r&   r^   r_   rZ   r0   r0   r1   r@      sb   




z
AdamW.step)r   r   r   r   FN)__name__
__module____qualname__r   r   rJ   r   r   boolr   r8   rC   r[   r   r@   __classcell__r0   r0   r>   r1   r      sN    	

	
<=a  Implements AdamW algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{(lr)}, \: \beta_1, \beta_2
                \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)},
                \: \epsilon \text{ (epsilon)}                                                    \\
            &\hspace{13mm}      \lambda \text{(weight decay)},  \: \textit{amsgrad},
                \: \textit{maximize}                                                             \\
            &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0
                \text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0              \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\

            &\hspace{5mm}\textbf{if} \: \textit{maximize}:                                       \\
            &\hspace{10mm}g_t           \leftarrow   -\nabla_{\theta} f_t (\theta_{t-1})          \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1}         \\
            &\hspace{5mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{5mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{5mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{5mm}\widehat{v_t} \leftarrow   v_t/\big(1-\beta_2^t \big)                   \\
            &\hspace{5mm}\textbf{if} \: amsgrad                                                  \\
            &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
                \widehat{v_t})                                                                   \\
            &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big)                                 \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}} + \epsilon \big)                                       \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_.
    a  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR
            is not yet supported for all our implementations. Please use a float
            LR if you are not also specifying fused=True or capturable=True.
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay coefficient (default: 1e-2)
        amsgrad (bool, optional): whether to use the AMSGrad variant of this
            algorithm from the paper `On the Convergence of Adam and Beyond`_
            (default: False)
        z	
        z
    .. _Decoupled Weight Decay Regularization:
        https://arxiv.org/abs/1711.05101
    .. _On the Convergence of Adam and Beyond:
        https://openreview.net/forum?id=ryQu7f-RZ

    Fr!   rU   rV   rW   rX   rY   r   r   r   r    r\   r]   rZ   r&   r^   r_   r"   r%   r$   r   c                C   s   t j stdd |D std|	du r.|du r.t| |dd\}}|r.t|tr.|s.d}|	du r4d}	|du r:d}|rEt j	 rEtd|	rPt j	 rPtd|	rZt j	 sZt
}n|rdt j	 sdt}nt}|| |||||||||||||||
||d	 dS )
zpFunctional API that performs AdamW algorithm computation.

    See :class:`~torch.optim.AdamW` for details.
    c                 s   s    | ]	}t |tjV  qd S rb   )r5   r,   r   )r-   tr0   r0   r1   r2   4  s    zadamw.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)Z	use_fusedz6torch.jit.script not supported with foreach optimizersz4torch.jit.script not supported with fused optimizers)r&   r^   r_   r"   r%   r$   r   r   r   r\   r]   rZ   )r,   _utilsis_compilingr:   r9   r   r5   r   jitis_scripting_fused_adamw_multi_tensor_adamw_single_tensor_adamw)r!   rU   rV   rW   rX   rY   r   r   r   r    r\   r]   rZ   r&   r^   r_   r"   r%   r$   r   _funcr0   r0   r1   r     sR   
c       
          C   s  |d u r|d u s
J t j rt|tsJ t| D ]-\}}|s%|| n||  }|| }|| }|| }t j sM|rM|jrC|jsM|j	rI|j	sMJ dt 
|rqt |}t |}t |}|rlt || ||< t |}|d7 }|d||   ||d|	  ||
j||d|
 d |s|r|}d|	|  }d|
|  }|| }| }| }|r|r||  }n|| }|| t || ||  ||  || }n| ||  || }||| nEt|}d|	|  }d|
|  }|| }t|}|r"t j|| ||| d ||  | |}n	| | |}|j||| d |rHt 
| | rHt || ||< qd S )NzGIf capturable=True, params and state_steps must be CUDA or XLA tensors.r   )value)out)r,   rk   rl   r5   rJ   	enumerateri   rj   is_cudaZis_xlarR   Zview_as_realZmul_Zlerp_Zaddcmul_negsqrtcloneZcopy_maximumZadd_Zaddcdiv_r   r   Zview_as_complex) r!   rU   rV   rW   rX   rY   r\   r]   r&   r^   r_   r"   r%   r$   r   r   r   rZ   iparamrQ   rN   rO   Zstep_tr@   bias_correction1bias_correction2	step_sizeZstep_size_negbias_correction2_sqrtrP   denomr0   r0   r1   ro   i  s~   







ro   c       
            s  t | dkrd S ttr|stdtj s*|r*tdd t| |D s*J d|r0J d|d u r8|d u s:J t	
| |||||g}| D ]\\}}}}}}}|r[t|}|ro|rht||||| nt|||| |d jrtj|tjddd	dd
 nt|d |dkrt|d|   t||d   t| t|||d  ~|rt |}t|}t|d t|d t| t| t| t| |}|}|rt|| t|}nt|}t|| t|| t|| t||| qI fdd|D }fdd|D }tfdd|D }dd |D }|rDt|| t|}nt|}t|| t|| t|||| qId S )Nr   r(   c                 s   s     | ]\}}|j o|j V  qd S rb   )ru   )r-   r.   r@   r0   r0   r1   r2     s    
z&_multi_tensor_adamw.<locals>.<genexpr>z@If capturable=True, params and state_steps must be CUDA tensors.z#_foreach ops don't support autogradr)   cpu)r*   )alphar   c                       g | ]
}d  t |  qS r   r   r-   r@   )r^   r0   r1   
<listcomp>R      z'_multi_tensor_adamw.<locals>.<listcomp>c                    r   r   r   r   )r_   r0   r1   r   S  r   c                    s   g | ]} | d  qS )r0   r-   bc)r"   r0   r1   r   U  s    c                 S   s   g | ]}t |qS r0   )r   r   r0   r0   r1   r   W  s    )rH   r5   r   r9   r,   ri   rj   r:   zipr   "_group_tensors_by_device_and_dtyperG   Z_foreach_negr   Zis_cpu_foreach_add_rI   Z_foreach_mul_Z_foreach_lerp_Z_foreach_addcmul_Z_foreach_pow_foreach_sub_Z_foreach_neg_Z_foreach_div_Z_foreach_reciprocal_Z_foreach_sqrt_Z_foreach_maximum_Z_foreach_sqrtZ_foreach_addcdiv_r   )r!   rU   rV   rW   rX   rY   r\   r]   r&   r^   r_   r"   r%   r$   r   r   r   rZ   grouped_tensorsdevice_paramsdevice_gradsdevice_exp_avgsdevice_exp_avg_sqsdevice_max_exp_avg_sqsdevice_state_stepsrp   r|   r}   r~   r   Zexp_avg_sq_sqrtr0   )r^   r_   r"   r1   rn     s   
	







rn   returnc       
          C   s~  | sd S |r
t d|d ur|j|ind }|d ur|j|ind }t|tr1t|jdkr1|j|ind }t| |||||g}| D ]z\\}}\\}}}}}}}d\}}|d uri||vre|j|dd||< || }|d ur~||vrz|j|dd||< || }|d ur||vr|j|dd||< || }t	
|d t	j|||||||||	|
|||||d |d urt	||gt|  qBd S )	Nz9Adam with fused=True does not support differentiable=Truer   )NNT)non_blocking)r*   r   r   )	r&   r"   r^   r_   r%   r$   r   r\   r]   )r9   r*   r5   r   strr   r   itemstor,   r   Z_fused_adamw_r   rH   ) r!   rU   rV   rW   rX   rY   r\   r]   r&   r^   r_   r"   r%   r$   r   r   r   rZ   Zgrad_scale_dictZfound_inf_dictZlr_dictr   r*   rp   r   r   r   r   r   r   Zdevice_grad_scaleZdevice_found_infr0   r0   r1   rm   g  sf   &rm   )NFFNNNF) r,   r   Z	optimizerr   r   r   r   r   r	   r
   r   r   r   r   r   r   typingr   r   r   r   Ztorch.utils._foreach_utilsr   __all__r   __doc__rf   rJ   r   ro   rn   rm   r0   r0   r0   r1   <module>   sn   < J&K	


R


v


 	

