
    ~Wh                        d Z ddlmc mZ ddlmZ ddlmZ ddl	m
Z
 ddlmZ  e
             eddg 	           G d
 dej                                          Zej                             dej                  e_         dS )z!Adagrad optimizer implementation.    N)initializers)	optimizer)register_keras_serializable)keras_exportz%keras.optimizers.experimental.Adagradzkeras.optimizers.Adagrad)v1c                   V     e Zd ZdZ	 	 	 	 	 	 	 	 	 	 	 	 d fd		Z fd
Zd Z fdZ xZS )Adagrada6  Optimizer that implements the Adagrad algorithm.

    Adagrad is an optimizer with parameter-specific learning rates,
    which are adapted relative to how frequently a parameter gets
    updated during training. The more updates a parameter receives,
    the smaller the updates.

    Args:
      learning_rate: Initial value for the learning rate:
        either a floating point value,
        or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance.
        Defaults to 0.001.
        Note that `Adagrad` tends to benefit from higher initial learning rate
        values compared to other optimizers.
        To match the exact form in the original paper, use 1.0.
      initial_accumulator_value: Floating point value.
        Starting value for the accumulators (per-parameter momentum values).
        Must be non-negative.
      epsilon: Small floating point value used to maintain numerical stability.
      {{base_optimizer_keyword_args}}

    Reference:
      - [Duchi et al., 2011](
        http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
    MbP?皙?Hz>NFGz?Tc                      t                      j        d||||||	|
||d	| |                     |          | _        || _        || _        d S )N)	weight_decayclipnorm	clipvalueglobal_clipnormuse_emaema_momentumema_overwrite_frequencyjit_compilename )super__init___build_learning_rate_learning_rateinitial_accumulator_valueepsilon)selflearning_rater   r   r   r   r   r   r   r   r   r   r   kwargs	__class__s                 ^/var/www/html/movieo_spanner_bot/venv/lib/python3.11/site-packages/keras/optimizers/adagrad.pyr   zAdagrad.__init__:   sz      	 	
%+%$;#	
 	
 	
 	
 	
 #77FF)B&    c                 ^   t                                          |           t          | d          r	| j        rd S d| _        g | _        t          j        | j                  }|D ]G}| j                            | 	                    |d ||j
        |j                                       Hd S )N_builtTaccumulator)shapedtype)initial_value)r   buildhasattrr&   _accumulatorsr   Constantr   appendadd_variable_from_referencer(   r)   )r   var_listinitializervarr"   s       r#   r+   zAdagrad.buildZ   s    h4"" 	t{ 	F"+D,JKK 	 	C%%00!"-+CISY"O"O"O 1     	 	r$   c                    t          j        | j        |j                  }|                     |          }| j        | j        |                  }t          |t           j                  r|	                    t          j        |j
        |j
        z  |j                             t          j        ||j                  }t          j        || j        z             }|	                    t          j        | |j
        z  |z  |j                             dS |                    ||z             |                    ||z  t          j        || j        z             z             dS )z=Update step given gradient and the associated model variable.)indicesN)tfcastr    r)   _var_keyr-   _index_dict
isinstanceIndexedSlicesscatter_addvaluesr5   gathersqrtr   
assign_add
assign_sub)r   gradvariablelrvar_keyr'   sparse_accumulatorsparse_denominators           r#   update_stepzAdagrad.update_stepj   sN   WT'88--))()9')BCdB,-- 	Q## t{!:DLII   "$;!M!M!M!#);dl)J!K!K   C$+%(::DL      ""4$;///T	BGK$,4N,O,O OPPPPPr$   c                     t                                                      }|                    |                     | j                  | j        | j        d           |S )N)r    r   r   )r   
get_configupdate_serialize_hyperparameterr   r   r   )r   configr"   s     r#   rJ   zAdagrad.get_config   se    ##%%!%!?!?'" " .2-K< 	
 	
 	
 r$   )r
   r   r   NNNNFr   NTr	   )	__name__
__module____qualname____doc__r   r+   rH   rJ   __classcell__)r"   s   @r#   r	   r	      s        
 8 "% $     @     Q Q Q0        r$   r	   z{{base_optimizer_keyword_args}})rQ   tensorflow.compat.v2compatv2r6   kerasr   keras.optimizersr    keras.saving.object_registrationr    tensorflow.python.util.tf_exportr   	Optimizerr	   replacebase_optimizer_keyword_argsr   r$   r#   <module>r]      s   ( ' ! ! ! ! ! ! ! ! !       & & & & & & H H H H H H : 9 9 9 9 9 +-GB  o o o o oi! o o  od /))%y'L r$   