o
    <&i[Y                  #   @   s  d dl Z d dl mZ ddlmZmZmZmZmZmZm	Z	m
Z
mZmZ d dlmZmZ ddgZG dd deZd	d
e
 de de	 d e_					d$dee dee dee dee dee dee dedee dededededededededef"ddZdee dee dee dee dee dee dedededededededededef d d!Zdee dee dee dee dee dee dedededededededededef d"d#ZdS )%    N)Tensor   )
	Optimizer_use_grad_for_differentiable
_get_value_dispatch_sqrt_stack_if_compiling_capturable_doc_differentiable_doc_foreach_doc_default_to_fused_or_foreach_view_as_real)ListOptionalNAdamnadamc                
       sd   e Zd Z		dddddd	ed
ee dedef fddZ fddZdd ZedddZ	  Z
S )r   Mb`?g?g+?:0yE>r   Mbp?FN)foreach
capturabledifferentiabledecoupled_weight_decayr   r   r   c                   s   d|kst d| d|kst d| d|d   kr"dk s,n t d|d  d|d   kr8dk sBn t d|d  d|ksMt d	| d|ksXt d
| t||||||||	|
d	}t || d S )N        zInvalid learning rate: zInvalid epsilon value: r         ?z#Invalid beta parameter at index 0: r   z#Invalid beta parameter at index 1: zInvalid weight_decay value: zInvalid momentum_decay value: )	lrbetasepsweight_decaymomentum_decayr   r   r   r   )
ValueErrordictsuper__init__)selfparamsr   r   r   r   r    r   r   r   r   defaults	__class__ <C:\wamp64\www\opt\env\Lib\site-packages\torch/optim/nadam.pyr$   
   s$   zNAdam.__init__c                    s   t  | | jD ]}|dd  |dd |dd |dd q	t| j }t|dko9t	|d d }|sO|D ]}tj
t|d tjd|d< q>t|dko]t	|d d	 }|sq|D ]}tj
|d	 tjd|d	< qbd S d S )
Nr   r   Fr   r   r   stepdtype
mu_product)r#   __setstate__param_groups
setdefaultliststatevalueslentorchZ	is_tensortensorfloatfloat32)r%   r4   groupZstate_valuesZstep_is_tensorsZmu_product_is_tensorr(   r*   r+   r0       s"   
zNAdam.__setstate__c                 C   s*  d}|d D ]}	|	j d ur|t|	O }||	 |	j jr!td||	j  | j|	 }
t|
dkrv|d r@tjdtj	|	j
dntjdtj	d	|
d
< |d rXtjdtj	|	j
dntjdtj	d	|
d< tj|	tjd|
d< tj|	tjd|
d< ||
d  ||
d  ||
d  ||
d
  q|S )NFr&   z'NAdam does not support sparse gradientsr   r   r*   )r.   devicer   r-   r,   r   r/   )Zmemory_formatexp_avg
exp_avg_sq)gradr7   
is_complexappendZ	is_sparseRuntimeErrorr4   r6   Zzerosr:   r=   r8   ZonesZ
zeros_likeZpreserve_format)r%   r;   params_with_gradgradsexp_avgsexp_avg_sqsmu_productsstate_stepshas_complexpr4   r*   r*   r+   _init_group1   s4   


zNAdam._init_groupc                 C   s   |    d}|dur!t  | }W d   n1 sw   Y  | jD ]D}g }g }g }g }g }g }	|d \}
}| |||||||	}t||||||	|
||d |d |d |d |d |d |d	 |d
 |d q$|S )zPerforms a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r    r   r   r   r   r   )beta1beta2r   r   r    r   r   r   r   r   rJ   )Z _cuda_graph_capture_health_checkr7   Zenable_gradr1   rL   r   )r%   closureZlossr;   rD   rE   rF   rG   rH   rI   rM   rN   rJ   r*   r*   r+   r,   T   sD   

z
NAdam.step)r   r   r   r   r   FN)__name__
__module____qualname__boolr   r$   r0   rL   r   r,   __classcell__r*   r*   r(   r+   r   	   s&    #a  Implements NAdam algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma_t \text{ (lr)}, \: \beta_1,\beta_2 \text{ (betas)},
                \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)}                   \\
            &\hspace{13mm} \: \lambda \text{ (weight decay)}, \:\psi \text{ (momentum decay)}    \\
            &\hspace{13mm} \: \textit{decoupled\_weight\_decay}                                  \\
            &\textbf{initialize} :  m_0 \leftarrow 0 \text{ ( first moment)},
                v_0 \leftarrow 0 \text{ ( second moment)}                                 \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm} \theta_t \leftarrow \theta_{t-1}                                       \\
            &\hspace{5mm} \textbf{if} \: \lambda \neq 0                                          \\
            &\hspace{10mm}\textbf{if} \: \textit{decoupled\_weight\_decay}                       \\
            &\hspace{15mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1}                    \\
            &\hspace{10mm}\textbf{else}                                                          \\
            &\hspace{15mm} g_t \leftarrow g_t + \lambda \theta_{t-1}                             \\
            &\hspace{5mm} \mu_t \leftarrow \beta_1 \big(1 - \frac{1}{2}  0.96^{t \psi} \big)     \\
            &\hspace{5mm} \mu_{t+1} \leftarrow \beta_1 \big(1 - \frac{1}{2} 0.96^{(t+1)\psi}\big)\\
            &\hspace{5mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{5mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{5mm}\widehat{m_t} \leftarrow \mu_{t+1} m_t/(1-\prod_{i=1}^{t+1}\mu_i)\\[-1.ex]
            & \hspace{11mm} + (1-\mu_t) g_t /(1-\prod_{i=1}^{t} \mu_{i})                         \\
            &\hspace{5mm}\widehat{v_t} \leftarrow   v_t/\big(1-\beta_2^t \big)                   \\
            &\hspace{5mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}} + \epsilon \big)                                       \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Incorporating Nesterov Momentum into Adam`_.
    a  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, optional): learning rate (default: 2e-3)
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        momentum_decay (float, optional): momentum momentum_decay (default: 4e-3)
        decoupled_weight_decay (bool, optional): whether to use decoupled weight
            decay as in AdamW to obtain NAdamW (default: False)
        z	
        z

    .. _Incorporating Nesterov Momentum into Adam:
        https://openreview.net/forum?id=OM0jvwB8jIp57ZJjtNEZ
    .. _Decoupled Weight Decay Regularization:
        https://arxiv.org/abs/1711.05101

    Fr&   rE   rF   rG   rH   rI   r   r   r   r   rJ   rM   rN   r   r   r    r   c                C   s   t dd |D stdt dd |D std|du r't| |	dd\}}|r2tj r2td	|r<tj s<t}nt}|| ||||||||||||||	|
d
 dS )zpFunctional API that performs NAdam algorithm computation.

    See :class:`~torch.optim.NAdam` for details.
    c                 s       | ]	}t |tjV  qd S rP   
isinstancer7   r   .0tr*   r*   r+   	<genexpr>       znadam.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsc                 s   rV   rP   rW   rY   r*   r*   r+   r\      r]   zPAPI has changed, `mu_products` argument must contain a list of singleton tensorsNF)Z	use_fusedz6torch.jit.script not supported with foreach optimizers)
rM   rN   r   r   r    r   r   r   r   rJ   )allrC   r   r7   ZjitZis_scripting_multi_tensor_nadam_single_tensor_nadam)r&   rE   rF   rG   rH   rI   r   r   r   r   rJ   rM   rN   r   r   r    r   _funcr*   r*   r+   r      s8   
c       
         C   s2  t | D ]\}}|| }|| }|| }|| }|| }t|r6t|}t|}t|}t|}tj sS|rS|jrF|jrF|jsS|jrO|jrO|jsSJ d|d7 }|r\|}nt|}d||  }|	dkr}|rv|	d||	   n|j
||	d}|ddd||
     }|ddd|d |
     }||9 }||d|  |	|j||d| d || }|s|r|
|}|| }|| d|  d|   }|| | d|   }||| ||| qt|| }|| |j||| d|  dt|  d |j||| | d|  d qd S )	NzUIf capturable=True, params, mu_products, and state_steps must be CUDA or XLA tensors.r   r   alphar         ?Q?)value)	enumerater7   rA   Zview_as_real_utilsis_compilingis_cudaZis_xlar   Zmul_addZlerp_Zaddcmul_divsqrtZaddcdiv_Zadd_)r&   rE   rF   rG   rH   rI   rM   rN   r   r   r    r   r   r   r   rJ   iparamr@   r>   r?   r/   Zstep_tr,   Zbias_correction2mumu_nextdenomZmu_product_nextr*   r*   r+   r`      sj   






& r`   c       
   !         sB  t | dkrd S |rJ dtj s&|r&tdd t| ||D s&J dt| |||||g}| D ]h\\}}}}}}}|rIt	|||| |d j
r\tj|tjddddd	 nt|d
 |	dkr{|rst|d
|	   ntj|||	d	}t||d
   t| t|||d
  t|}|rt|}td|}t|d t|d t|  t| td|}t|d t|d t|  ~t|}t|d t| t| nfdd|D } fdd|D } fdd|D }t|| t|| t|| ~|rrt|d t| t|d}t| t|| |}~t||}t| t|d t|| |}~t||} t| || t|| | q5tfddt||D }tfddt||D }t|||| t|||| q5d S )Nr   z#_foreach ops don't support autogradc                 s   s(    | ]\}}}|j o|j o|j V  qd S rP   )rk   )rZ   rK   mpr,   r*   r*   r+   r\   g  s    z&_multi_tensor_nadam.<locals>.<genexpr>zNIf capturable=True, params, mu_products, and state_steps must be CUDA tensors.r   cpu)r=   rc   r   rf   g      c                    s    g | ]}t d  t|  qS )r   )r   r   rZ   r,   )rN   r*   r+   
<listcomp>  s     z'_multi_tensor_nadam.<locals>.<listcomp>c                    s(   g | ]} d ddt |     qS )r   re   rf   r   rv   rM   r    r*   r+   rw     s   ( c                    s,   g | ]} d ddt |d      qS )r   re   rf   r   rx   rv   ry   r*   r+   rw     s    $c                    s,   g | ]\}} d |  d t |  d qS r   rx   )rZ   r/   rq   r   r*   r+   rw          c                    s,   g | ]\}} | d t ||   d qS rz   rx   )rZ   r/   rr   r|   r*   r+   rw     r}   )r6   r7   ri   rj   r^   zipr   Z"_group_tensors_by_device_and_dtyper5   r   Zis_cpuZ_foreach_add_r8   Z_foreach_mul_Z_foreach_addZ_foreach_lerp_Z_foreach_addcmul_Z_foreach_sqrtZ_foreach_mulZ_foreach_powZ_foreach_sub_Z_foreach_neg_Z_foreach_sqrt_Z_foreach_div_Z_foreach_subZ_foreach_addcdiv_r   )!r&   rE   rF   rG   rH   rI   rM   rN   r   r   r    r   r   r   r   rJ   Zgrouped_tensorsZgrouped_paramsZgrouped_gradsZgrouped_exp_avgsZgrouped_exp_avg_sqsZgrouped_mu_productsZgrouped_state_stepsra   Zexp_avg_sq_sqrtexponentZmusZmu_nextsZbias_correction_sqrtrs   Zstep_size_gradsZstep_size_expavg	numeratorr*   )rM   rN   r   r    r+   r_   N  s   





r_   )FNFFF)r7   r   Z	optimizerr   r   r   r   r   r	   r
   r   r   r   typingr   r   __all__r   __doc__rT   r9   r   r`   r_   r*   r*   r*   r+   <module>   s    0y#D	


=	


S	
