
    $h}0                        d dl Z d dl mZ ddlmZmZmZmZmZmZm	Z	 d dl
mZmZ ddgZ G d de          Zd	d
e de de dz   e_        	 	 	 	 ddee         dee         dee         dee         dee         dededededededefdZdee         dee         dee         dee         dededededededefdZdee         dee         dee         dee         dededededededefdZdS )    N)Tensor   )	Optimizer_use_grad_for_differentiable_default_to_fused_or_foreach_differentiable_doc_foreach_doc_maximize_doc_view_as_real)ListOptionalRproprpropc                   r     e Zd Z	 	 	 ddddddee         ded	ef fd
Z fdZd Zedd            Z	 xZ
S )r   {Gz?g      ?g333333?gư>2   NF)foreachmaximizedifferentiabler   r   r   c                ,   d|k    st          d|           d|d         cxk     rdcxk     r|d         k     s#n t          d|d          d|d                    t          ||||||          }t                                          ||           d S )	Ng        zInvalid learning rate: r   g      ?r   zInvalid eta values: z, )lretas
step_sizesr   r   r   )
ValueErrordictsuper__init__)
selfparamsr   r   r   r   r   r   defaults	__class__s
            Q/var/www/html/auto_sub_bot/venv/lib/python3.11/site-packages/torch/optim/rprop.pyr   zRprop.__init__   s     byy;r;;<<<T!W,,,,s,,,,T!W,,,,HDGHHtAwHHIII!)
 
 
 	*****    c                     t                                          |           | j        D ]D}|                    dd            |                    dd           |                    dd           Ed S )Nr   r   Fr   )r   __setstate__param_groups
setdefault)r    stategroupr#   s      r$   r'   zRprop.__setstate__%   s}    U###& 	6 	6EY---Z///-u5555	6 	6r%   c                 Z   d}|d         D ]}|j         |t          j        |          z  }|                    |           |j         }|j        rt          d          |                    |           | j        |         }	t          |	          dk    rd|	d<   t          j        |t          j	                  |	d<   |j
        j        rX|                                                    |                              t          |d         |d                             |	d	<   nC|                                                    |                              |d                   |	d	<   |                    |	d                    |                    |	d	                    |	dxx         d
z  cc<   |S )NFr!   z'Rprop does not support sparse gradientsr   stepmemory_formatprevr   	step_sizer   )gradtorch
is_complexappend	is_sparseRuntimeErrorr*   len
zeros_likepreserve_formatdtypenew
resize_as_fill_complex)
r    r+   r!   gradsprevsr   has_complexpr2   r*   s
             r$   _init_groupzRprop._init_group,   s   x "	 "	Av~5+A...KMM!6D~ N"#LMMMLLJqME 5zzQ !f % 0U%:! ! !f 7%  

#D))wuT{E$K@@AA +&& 

--d3399%+FF +& LLv'''eK0111&MMMQMMMMr%   c                 \   d}|5t          j                    5   |            }ddd           n# 1 swxY w Y   | j        D ]j}g }g }g }g }|d         \  }}	|d         \  }
}|d         }|d         }|                     |||||          }t	          |||||
|||	|||d         |           k|S )zPerforms a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r   r   )step_size_minstep_size_maxetaminusetaplusr   r   r   rB   )r3   enable_gradr(   rD   r   )r    closurelossr+   r!   r@   rA   r   rH   rI   rF   rG   r   r   rB   s                  r$   r-   z
Rprop.stepS   s?    "$$ ! !wyy! ! ! ! ! ! ! ! ! ! ! ! ! ! ! & 	 	EFEEJ %fHg+0+>(M=I&GZ(H**5&%
SSK++!!$%56'     s   /33)r   r   r   )N)__name__
__module____qualname__r   boolr   r'   rD   r   r-   __classcell__)r#   s   @r$   r   r   
   s         + #'$+ + + $+ + + + + + + +46 6 6 6 6% % %N "' ' ' "!' ' ' ' 'r%   a
  Implements the resilient backpropagation algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \theta_0 \in \mathbf{R}^d \text{ (params)},f(\theta)
                \text{ (objective)},                                                             \\
            &\hspace{13mm}      \eta_{+/-} \text{ (etaplus, etaminus)}, \Gamma_{max/min}
                \text{ (step sizes)}                                                             \\
            &\textbf{initialize} :   g^0_{prev} \leftarrow 0,
                \: \eta_0 \leftarrow \text{lr (learning rate)}                                   \\
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm} \textbf{for} \text{  } i = 0, 1, \ldots, d-1 \: \mathbf{do}            \\
            &\hspace{10mm}  \textbf{if} \:   g^i_{prev} g^i_t  > 0                               \\
            &\hspace{15mm}  \eta^i_t \leftarrow \mathrm{min}(\eta^i_{t-1} \eta_{+},
                \Gamma_{max})                                                                    \\
            &\hspace{10mm}  \textbf{else if}  \:  g^i_{prev} g^i_t < 0                           \\
            &\hspace{15mm}  \eta^i_t \leftarrow \mathrm{max}(\eta^i_{t-1} \eta_{-},
                \Gamma_{min})                                                                    \\
            &\hspace{15mm}  g^i_t \leftarrow 0                                                   \\
            &\hspace{10mm}  \textbf{else}  \:                                                    \\
            &\hspace{15mm}  \eta^i_t \leftarrow \eta^i_{t-1}                                     \\
            &\hspace{5mm}\theta_t \leftarrow \theta_{t-1}- \eta_t \mathrm{sign}(g_t)             \\
            &\hspace{5mm}g_{prev} \leftarrow  g_t                                                \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to the paper
    `A Direct Adaptive Method for Faster Backpropagation Learning: The RPROP Algorithm
    <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.21.1417>`_.
    a  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, optional): learning rate (default: 1e-2)
        etas (Tuple[float, float], optional): pair of (etaminus, etaplus), that
            are multiplicative increase and decrease factors
            (default: (0.5, 1.2))
        step_sizes (Tuple[float, float], optional): a pair of minimal and
            maximal allowed step sizes (default: (1e-6, 50))
        z	
        z

    Fr!   r@   rA   r   r   r   r   rB   rF   rG   rH   rI   c                   |t          | |d          \  }}|r-t          j                                        rt	          d          |r&t          j                                        st
          }nt          } || |||||	|
||||           dS )zpFunctional API that performs rprop algorithm computation.

    See :class:`~torch.optim.Rprop` for details.
    NF)	use_fusedz6torch.jit.script not supported with foreach optimizers)rF   rG   rH   rI   r   r   rB   )r   r3   jitis_scriptingr7   _multi_tensor_rprop_single_tensor_rprop)r!   r@   rA   r   r   r   r   rB   rF   rG   rH   rI   _funcs                 r$   r   r      s    , 1&.TYZZZ
7 U59))++ USTTT $uy--// $"#D##%     r%   c                   t          |           D ]\  }}||         }|s|n| }||         }||         }t          j        |          rPt          j        |          }t          j        |          }t          j        |          }t          j        |          }|	r:|                    |                                                                          }n'|                    |                                          }|||                    d          <   |||                    d          <   d||	                    d          <   |
                    |                              ||           |                    t          j                  }d||	                    |          <   |                    |                                |d           |                    |           d S )Nr   r   r.   value)	enumerater3   r4   view_as_realmulclonesigngtlteqmul_clamp_r:   addcmul_copy_)r!   r@   rA   r   rF   rG   rH   rI   r   r   rB   iparamr2   r0   r1   rb   s                    r$   rW   rW      s    f%%  5Qx#.tt$QxqM	E"" 	6%d++D%d++D&u--E*955I 	)88DJJLL))..00DD88D>>&&((D"TWWQZZ#TWWQZZTWWQZZ 	t##M=AAA zz(=z>>"#TWWX 	tyy{{IR888

4; r%   c                   t          |           dk    rd S |	r
J d            t          j        | |||g          }|                                D ]\  \  }}}}}|
rt	          ||||           t          j        ||          }|rt          j        |           t          j        ||           |rt          j        |           |}t          j	        |           |D ]J}|||
                    d          <   |||                    d          <   d||                    d          <   Kt          j        ||           |D ]}|                    ||           t          |          }t!          t          |                    D ]&}d||         ||                             |          <   '~d |D             }t          j        |||d           d S )Nr   z#_foreach ops don't support autogradr   c                 6    g | ]}|                                 S  )rb   ).0r2   s     r$   
<listcomp>z'_multi_tensor_rprop.<locals>.<listcomp>J  s     <<<ddiikk<<<r%   r[   r\   )r8   r   "_group_tensors_by_device_and_dtypevaluesr   r3   _foreach_mul_foreach_neg__foreach_copy__foreach_sign_rc   rd   re   _foreach_mul_rg   listrange_foreach_addcmul_)r!   r@   rA   r   rF   rG   rH   rI   r   r   rB   grouped_tensorsgrouped_paramsgrouped_gradsgrouped_prevsgrouped_step_sizesrX   signsrb   r1   rj   
grad_signss                         r$   rV   rV     s%    6{{aDDDDDBFESXZdCeffOSbSiSiSkSk 'Z 'ZO	K.-8JQ 	\.-HZ[[["=-@@ 	'&&&
 	]M::: 	/...%U### 	! 	!D&D'D D 	.666+ 	; 	;I]M:::: ]++s=))** 	8 	8A67M!U1X[[2233  =<m<<<

<NVXYYYYYO'Z 'Zr%   )NFFF)r3   r   	optimizerr   r   r   r   r	   r
   r   typingr   r   __all__r   __doc__rP   floatr   rW   rV   rn   r%   r$   <module>r      s         Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y ! ! ! ! ! ! ! !G
q q q q qI q q qh"D 
  
  
  E0r # - -L-<- <- V	- d^- - - - - - - - - - -`,L,<, <, V	, , , , , , , , , , ,^<ZL<Z<<Z <<Z V	<Z <Z <Z <Z <Z <Z <Z <Z <Z <Z <Z <Z <Zr%   