
    ~WhV                         d Z ddlmc mZ ddlmZ ddlmZ ddl	m
Z
  e             e
ddg            G d	 d
ej                                          Zej                             dej                  e_         dS )z!RMSprop optimizer implementation.    N)	optimizer)register_keras_serializable)keras_exportz%keras.optimizers.experimental.RMSpropzkeras.optimizers.RMSprop)v1c                   Z     e Zd ZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z fdZd Z fdZ xZS )RMSpropa$  Optimizer that implements the RMSprop algorithm.

    The gist of RMSprop is to:

    - Maintain a moving (discounted) average of the square of gradients
    - Divide the gradient by the root of this average

    This implementation of RMSprop uses plain momentum, not Nesterov momentum.

    The centered version additionally maintains a moving average of the
    gradients, and uses that average to estimate the variance.

    Args:
      learning_rate: Initial value for the learning rate:
        either a floating point value,
        or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance.
        Defaults to 0.001.
      rho: float, defaults to 0.9. Discounting factor for the old gradients.
      momentum: float, defaults to 0.0. If not 0.0., the optimizer tracks the
        momentum value, with a decay rate equals to `1 - momentum`.
      epsilon: A small constant for numerical stability. This epsilon is
        "epsilon hat" in the Kingma and Ba paper (in the formula just before
        Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to
        1e-7.
      centered: Boolean. If `True`, gradients are normalized by the estimated
        variance of the gradient; if False, by the uncentered second moment.
        Setting this to `True` may help with training, but is slightly more
        expensive in terms of computation and memory. Defaults to `False`.
      {{base_optimizer_keyword_args}}

    Usage:

    >>> opt = tf.keras.optimizers.experimental.RMSprop(learning_rate=0.1)
    >>> var1 = tf.Variable(10.0)
    >>> loss = lambda: (var1 ** 2) / 2.0  # d(loss) / d(var1) = var1
    >>> opt.minimize(loss, [var1])
    >>> var1.numpy()
    9.683772

    Reference:
      - [Hinton, 2012](
        http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
    MbP??        Hz>FNGz?d   Tc                      t                      j        d||||	|
||||d	| |                     |          | _        || _        || _        || _        || _        d S )N)	weight_decayclipnorm	clipvalueglobal_clipnormuse_emaema_momentumema_overwrite_frequencyjit_compilename )super__init___build_learning_rate_learning_raterhomomentumepsiloncentered)selflearning_rater   r   r    r!   r   r   r   r   r   r   r   r   r   kwargs	__class__s                   ^/var/www/html/movieo_spanner_bot/venv/lib/python3.11/site-packages/keras/optimizers/rmsprop.pyr   zRMSprop.__init__K   s    $ 	 	
%+%$;#	
 	
 	
 	
 	
 #77FF      c                    t                                          |           t          | d          r	| j        rd S d| _        g | _        |D ]0}| j                            |                     |d                     1g | _        | j        dk    r3|D ]0}| j                            |                     |d                     1g | _	        | j
        r3|D ]2}| j	                            |                     |d                     1d S d S )N_builtTvelocityr   r   average_gradient)r   buildhasattrr)   _velocitiesappendadd_variable_from_reference
_momentumsr   _average_gradientsr!   )r"   var_listvarr%   s      r&   r,   zRMSprop.buildo   sL   h4"" 	t{ 	F 	 	C##00jAA    =1  &&44S*EE    #%= 	  '..44S:LMM   	 	 r'   c                    t          j        | j        |j                  }|                     |          }| j        | j        |                  }d}| j        dk    r| j        | j        |                  }d}| j	        r| j
        | j        |                  }| j        }t          |t           j                  r|                    ||z             |                    t          j        t          j        |j                  d|z
  z  |j                             | j	        rp|                    ||z             |                    t          j        |j        d|z
  z  |j                             |t          j        |          z
  | j        z   }	n
|| j        z   }	t          j        |	|j                  }
t          j        ||j        z  t           j                            |
          z  |j                  }| j        dk    rJ|                    | j        |z             |                    |           |                    |            dS |                    |            dS |                    ||z  d|z
  t          j        |          z  z              | j	        rA|                    ||z  d|z
  |z  z              |t          j        |          z
  | j        z   }	n
|| j        z   }	||z  t           j                            |	          z  }| j        dk    r8|                    | j        |z  |z              |                    |            dS |                    |            dS )z=Update step given gradient and the associated model variable.Nr      )tfcastr#   dtype_var_keyr.   _index_dictr   r1   r!   r2   r   
isinstanceIndexedSlicesassignscatter_addsquarevaluesindicesr    gathermathrsqrt
assign_add)r"   gradientvariablelrvar_keyr*   r   average_gradr   denominatordenominator_slices	increments               r&   update_stepzRMSprop.update_step   sS   WT'88--))#D$4W$=>=1t'7'@AH= 	N243CG3LMLhh 011 +	0OOC(N+++   Iho..!c':H<L   
 } 	6##C,$6777(($ 1s73X5E   
 '<)@)@@4<O&5!#;8H!I!I(X_$rw}}5G'H'HH  I
 }q   8999$$Y///##XI.....$$iZ00000 OOC(Na#g89L9L-LLMMM} 6##C,$6!c'X9M$MNNN&<)@)@@4<O&5Xk(B(BBI}q   89 DEEE##XI.....##YJ/////r'   c                     t                                                      }|                    |                     | j                  | j        | j        | j        | j        d           |S )N)r#   r   r   r    r!   )	r   
get_configupdate_serialize_hyperparameterr   r   r   r    r!   )r"   configr%   s     r&   rQ   zRMSprop.get_config   sn    ##%%!%!?!?'" " x M< M 
	
 
	
 
	
 r'   )r	   r
   r   r   FNNNNFr   r   Tr   )	__name__
__module____qualname____doc__r   r,   rO   rQ   __classcell__)r%   s   @r&   r   r      s        
* *\  #"! "! "! "! "! "!H    4:0 :0 :0x        r'   r   z{{base_optimizer_keyword_args}})rX   tensorflow.compat.v2compatv2r7   keras.optimizersr    keras.saving.object_registrationr    tensorflow.python.util.tf_exportr   	Optimizerr   replacebase_optimizer_keyword_argsr   r'   r&   <module>rc      s    ( ' ! ! ! ! ! ! ! ! ! & & & & & & H H H H H H : 9 9 9 9 9 +-GB  u u u u ui! u u  up /))%y'L r'   