
    ~Wh                         d Z ddlZddlmc mZ ddlmZ ddl	m
Z
 ddlmZ  edddg           G d	 d
e
j                              ZdS )z!Adagrad optimizer implementation.    N)backend_config)optimizer_v2)keras_exportzkeras.optimizers.legacy.Adagradzkeras.optimizers.Adagrad)v1c                   |     e Zd ZdZdZ	 	 	 	 d fd	Zd Z fdZ fd	Ze	dd            Z
ddZddZ fdZ xZS )Adagrada  Optimizer that implements the Adagrad algorithm.

    Adagrad is an optimizer with parameter-specific learning rates,
    which are adapted relative to how frequently a parameter gets
    updated during training. The more updates a parameter receives,
    the smaller the updates.

    Args:
      learning_rate: Initial value for the learning rate:
        either a floating point value,
        or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance.
        Defaults to 0.001.
        Note that `Adagrad` tends to benefit from higher initial learning rate
        values compared to other optimizers.
        To match the exact form in the original paper, use 1.0.
      initial_accumulator_value: Floating point value.
        Starting value for the accumulators (per-parameter momentum values).
        Must be non-negative.
      epsilon: Small floating point value used to maintain numerical stability.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to `"Adagrad"`.
      **kwargs: keyword arguments. Allowed arguments are `clipvalue`,
        `clipnorm`, `global_clipnorm`.
        If `clipvalue` (float) is set, the gradient of each weight
        is clipped to be no higher than this value.
        If `clipnorm` (float) is set, the gradient of each weight
        is individually clipped so that its norm is no higher than this value.
        If `global_clipnorm` (float) is set the gradient of all weights is
        clipped so that their global norm is no higher than this value..

    Reference:
      - [Duchi et al., 2011](
        http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
    TMbP?皙?Hz>c                 b   |dk     rt          d|z            |t          j                    } t                      j        |fi | |                     d|                    d|                     |                     d| j                   || _        |pt          j                    | _        d S )Ng        z2initial_accumulator_value must be non-negative: %slearning_ratelrdecay)	
ValueErrorr   epsilonsuper__init__
_set_hyperget_initial_decay_initial_accumulator_value)selfr   initial_accumulator_valuer   namekwargs	__class__s         e/var/www/html/movieo_spanner_bot/venv/lib/python3.11/site-packages/keras/optimizers/legacy/adagrad.pyr   zAdagrad.__init__E   s     %s**D+,   ?$,..G(((((D-)H)HIII!4555*C':."8":":    c                     |D ]P}|j         j        }t          j        j                            | j        |          }|                     |d|           Qd S )Ndtypeaccumulator)r!   
base_dtypetfcompatr   constant_initializerr   add_slot)r   var_listvarr!   inits        r   _create_slotszAdagrad._create_slotsZ   sf     	4 	4CI(E9<44/u 5  D MM#}d3333	4 	4r   c           
      2   t                                          |||           |||f                             t          t	          j        | j        |          |||f         d          t	          j        dt          j                                       d S )Nlr_t r    )r   neg_lr_tzero)	r   _prepare_localupdatedictr$   convert_to_tensorr   zerosint64)r   
var_device	var_dtypeapply_stater   s       r   r1   zAdagrad._prepare_localb   s    z9kBBBZ+,33,T\9EE%z9&=>vFFXb111  	
 	
 	
 	
 	
r   c                     | j         }t          |          t          |          dz   k    rt          j        d          g|z   }t	                                          |           d S )N   r   )weightslennparrayr   set_weights)r   r<   paramsr   s      r   r@   zAdagrad.set_weightsl   sZ     v;;#g,,***x{{mg-GG$$$$$r   Nc                 \    d|vrd|d<   d|v r|                     d          |d<    | di |S )a  Creates an optimizer from its config.

        This method is the reverse of `get_config`,
        capable of instantiating the same optimizer from the config
        dictionary.

        Args:
            config: A Python dictionary, typically the output of get_config.
            custom_objects: A Python dictionary mapping names to additional
              Python objects used to create this optimizer, such as a function
              used for a hyperparameter.

        Returns:
            An optimizer instance.
        r   r
   r   r   r.   )pop)clsconfigcustom_objectss      r   from_configzAdagrad.from_configu   sK    " 'f4425F./6>>&,jj&6&6F?#s}}V}}r   c                 2   |j         |j        j        }}|pi                     ||f          p|                     ||          }|                     |d          }t          j                            |j	        |j	        |d         |d         || j
                  S )Nr"   r-   r   )r)   accumr   r   graduse_locking)devicer!   r#   r   _fallback_apply_stateget_slotr$   raw_opsResourceApplyAdagradV2handle_use_locking)r   rJ   r)   r9   r7   r8   coefficientsaccs           r   _resource_apply_densezAdagrad._resource_apply_dense   s     #
CI,@I
#)r..#
 
 ?''
I>> 	 mmC//z00
*F# +) 1 
 
 	
r   c           	      4   |j         |j        j        }}|pi                     ||f          p|                     ||          }|                     |d          }t          j                            |j	        |j	        |d         |d         ||| j
                  S )Nr"   r-   r   )r)   rI   r   r   rJ   indicesrK   )rL   r!   r#   r   rM   rN   r$   rO   ResourceSparseApplyAdagradV2rQ   rR   )	r   rJ   r)   rW   r9   r7   r8   rS   rT   s	            r   _resource_apply_sparsezAdagrad._resource_apply_sparse   s     #
CI,@I
#)r..#
 
 ?''
I>> 	 mmC//z66
*F# +) 7 
 
 	
r   c                     t                                                      }|                    |                     d          | j        | j        | j        d           |S )Nr   )r   r   r   r   )r   
get_configr2   _serialize_hyperparameterr   r   r   )r   rE   r   s     r   r[   zAdagrad.get_config   sh    ##%%!%!?!?#" " ,-1-L< 		
 		
 		
 r   )r	   r
   r   r   )N)__name__
__module____qualname____doc___HAS_AGGREGATE_GRADr   r+   r1   r@   classmethodrG   rU   rY   r[   __classcell__)r   s   @r   r   r      s        
! !F  "%; ; ; ; ; ;*4 4 4
 
 
 
 
% % % % %    [,
 
 
 
 
 
 
 
"        r   r   )r`   numpyr>   tensorflow.compat.v2r%   v2r$   kerasr   keras.optimizers.legacyr    tensorflow.python.util.tf_exportr   OptimizerV2r   r.   r   r   <module>rk      s    ( '     ! ! ! ! ! ! ! ! !             0 0 0 0 0 0 : 9 9 9 9 9 %"$EF  Z Z Z Z Zl& Z Z	 Z Z Zr   