o
    eiS                     @   s^   U d dl Z d dlm  mZ d dl mZ d dlmZ g Ze	e
 ed< e jjG dd dZdS )    N)Tensor)2_scripted_functional_optimizer_deprecation_warning__all__c                   @   s`   e Zd Z							ddee deded	ed
edededefddZdeedB  fddZdS )_FunctionalAdadelta      ??ư>        Fparamslrrhoepsweight_decayforeachmaximize_allow_empty_param_listc	           	      C   sp   t dd ||||d| _|| _|| _t|dkr|stdd|i| _tj	t
tjt
ttjf f i | _d S )N   )
stacklevel)r   r   r   r   r   z%optimizer got an empty parameter listr
   )r   defaultsr   r   len
ValueErrorparam_grouptorchjitannotatedictr   strstate)	selfr
   r   r   r   r   r   r   r    r   u/var/www/addictedbytheproject.nl/epg/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_adadelta.py__init__   s   

(z_FunctionalAdadelta.__init__	gradientsNc                 C   s  | j d }g }g }g }g }g }| jd }| jd }	| jd }
| jd }t|t|kr>tddt| d d	t|  d
}t||D ]]\}}|d ur|t|O }|| || || jvri | j|< | j| }t	d|d< tj
|tjd|d< tj
|tjd|d< | j| }||d  ||d  ||d  qEt  tj|||||||	|
|| j| j|d W d    d S 1 sw   Y  d S )Nr
   r   r   r   r   zEthe gradients passed in does not equal to the size of the parameters!zParams length: z. zGradients length: Fr	   step)memory_format
square_avg	acc_delta)r   r   r   r   r   r   has_complex)r   r   r   r   zipr   
is_complexappendr   tensor
zeros_likepreserve_formatno_gradFadadeltar   r   )r   r"   r
   params_with_gradgradssquare_avgs
acc_deltasstate_stepsr   r   r   r   r'   paramgradientr   r   r   r    r#   7   sn   













"z_FunctionalAdadelta.step)r   r   r   r	   FFF)	__name__
__module____qualname__listr   floatboolr!   r#   r   r   r   r    r      s4    	
r   )r   torch.optim._functionaloptim_functionalr/   r   ,torch.distributed.optim._deprecation_warningr   r   r;   r   __annotations__r   scriptr   r   r   r   r    <module>   s   
