o
    <&iE-                     @   sd  d dl Z d dl mZ ddlmZmZmZmZmZmZm	Z	 d dl
mZmZ ddgZG dd deZd	d
e de de d e_			ddee dee dee dee dee dededededededefddZdee dee dee dee dededededededefddZdee dee dee dee dededededededefddZdS )     N)Tensor   )	Optimizer_use_grad_for_differentiable_default_to_fused_or_foreach_differentiable_doc_foreach_doc_maximize_doc_view_as_real)ListOptionalAdadeltaadadeltac                       sd   e Zd Z					dddddee d	ed
ef fddZ fddZdd ZedddZ	  Z
S )r         ??ư>r   NF)maximizedifferentiableforeachr   r   c          
   	      s   d|kst d| d|  krdksn t d| d|ks(t d| d|ks3t d| t|||||||d}	t ||	 d S )Ng        zInvalid learning rate: r   zInvalid rho value: zInvalid epsilon value: zInvalid weight_decay value: )lrrhoepsweight_decayr   r   r   )
ValueErrordictsuper__init__)
selfparamsr   r   r   r   r   r   r   defaults	__class__ ?C:\wamp64\www\opt\env\Lib\site-packages\torch/optim/adadelta.pyr      s$   	zAdadelta.__init__c                    s@   t  | | jD ]}|dd  |dd |dd q	d S )Nr   r   Fr   )r   __setstate__param_groups
setdefault)r   stategroupr    r"   r#   r$   ,   s   
zAdadelta.__setstate__c           	      C   s   d}|d D ][}|j d u rq|t|O }|| |j jr"td||j  | j| }t|dkrKd|d< tj|tj	d|d< tj|tj	d|d< ||d  ||d  |d  d	7  < q|S )
NFr   z*Adadelta does not support sparse gradientsr   step)Zmemory_format
square_avg	acc_deltar   )
gradtorch
is_complexappendZ	is_sparseRuntimeErrorr'   lenZ
zeros_likeZpreserve_format)	r   r(   params_with_gradgradssquare_avgs
acc_deltashas_complexpr'   r"   r"   r#   _init_group3   s,   




zAdadelta._init_groupc                 C   s   d}|durt   | }W d   n1 sw   Y  | jD ]A}g }g }g }g }|d |d |d |d |d |d |d f\}}	}
}}}}| |||||}t||||||	|
|||||d	 q |S )
zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r   r   r   r   )r   r   r   r   r   r   r   r6   )r-   Zenable_gradr%   r8   r   )r   closureZlossr(   r2   r3   r4   r5   r   r   r   r   r   r   r   r6   r"   r"   r#   r)   P   sD   


zAdadelta.step)r   r   r   r   N)N)__name__
__module____qualname__r   boolr   r$   r8   r   r)   __classcell__r"   r"   r    r#   r      s(    		
 a  Implements Adadelta algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)},
                \: f(\theta) \text{ (objective)}, \: \rho \text{ (decay)},
                \: \lambda \text{ (weight decay)}                                                \\
            &\textbf{initialize} :  v_0  \leftarrow 0 \: \text{ (square avg)},
                \: u_0 \leftarrow 0 \: \text{ (accumulate variables)}                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm}if \: \lambda \neq 0                                                    \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm} v_t      \leftarrow v_{t-1} \rho + g^2_t (1 - \rho)                    \\
            &\hspace{5mm}\Delta x_t    \leftarrow   \frac{\sqrt{u_{t-1} +
                \epsilon }}{ \sqrt{v_t + \epsilon}  }g_t \hspace{21mm}                           \\
            &\hspace{5mm} u_t  \leftarrow   u_{t-1}  \rho +
                 \Delta x^2_t  (1 - \rho)                                                        \\
            &\hspace{5mm}\theta_t      \leftarrow   \theta_{t-1} - \gamma  \Delta x_t            \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `ADADELTA: An Adaptive Learning Rate Method`_.
    a  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        rho (float, optional): coefficient used for computing a running average
            of squared gradients (default: 0.9). A higher value of `rho` will
            result in a slower average, which can be helpful for preventing
            oscillations in the learning process.
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-6).
        lr (float, optional): coefficient that scale delta before it is applied
            to the parameters (default: 1.0)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        z	
        zd

    .. _ADADELTA\: An Adaptive Learning Rate Method:
        https://arxiv.org/abs/1212.5701

    Fr   r3   r4   r5   r   r   r6   r   r   r   r   r   c                C   sj   |du rt | |dd\}}|rtj rtd|r"tj s"t}nt}|| ||||||	|
|||d dS )zvFunctional API that performs Adadelta algorithm computation.

    See :class:`~torch.optim.Adadelta` for details.
    NF)Z	use_fusedz6torch.jit.script not supported with foreach optimizers)r   r   r   r   r   r   r6   )r   r-   ZjitZis_scriptingr0   _multi_tensor_adadelta_single_tensor_adadelta)r   r3   r4   r5   r   r   r6   r   r   r   r   r   _funcr"   r"   r#   r      s(   
c                C   s   t | |||D ]t\}}}}|s|n| }|dkr|j||d}t|r3t|}t|}t|}||j||d| d || }|| }|	rT| }|	|| ||j||d| d t|rst
|}|j|| d qd S )Nr   alphar   value)zipaddr-   r.   Zview_as_realZmul_Zaddcmul_Zsqrt_cloneZdiv_Zview_as_complexZadd_)r   r3   r4   r5   r   r   r   r   r   r   r6   paramr,   r*   r+   stddeltar"   r"   r#   r@      s*   





r@   c                C   s4  |	rJ dt | dkrd S t| |||g}| D ]|\\}}}}}|r*t|}|
r3t|||| |dkrJ|rBtj|||d ntj|||d}t	|| tj
|||d| d t||}t| t||}t| t|| t	|| tj||| d t	|| tj
|||d| d qd S )Nz#_foreach ops don't support autogradr   rC   r   rE   )r1   r   Z"_group_tensors_by_device_and_dtypevaluesr-   Z_foreach_negr
   Z_foreach_add_Z_foreach_addZ_foreach_mul_Z_foreach_addcmul_Z_foreach_sqrt_Z_foreach_div_)r   r3   r4   r5   r   r   r   r   r   r   r6   Zgrouped_tensorsZdevice_paramsZdevice_gradsZdevice_square_avgsZdevice_acc_deltasrA   rK   Zdeltasr"   r"   r#   r?     s2   


r?   )NFF)r-   r   Z	optimizerr   r   r   r   r   r	   r
   typingr   r   __all__r   __doc__r=   floatr   r@   r?   r"   r"   r"   r#   <module>   s    $u9	
0	

)	
