
    hh                         d Z ddlmc mZ ddlmZ ddlmZ ddl	m
Z
 ddlmZ  e
        eddd	g 
       G d dej                                      Zej                   j                  dej                         e_         y)z!Adagrad optimizer implementation.    N)initializers)	optimizer)register_keras_serializable)keras_exportzkeras.optimizers.Adagradz%keras.optimizers.experimental.Adagradz-keras.dtensor.experimental.optimizers.Adagrad)v1c                   V     e Zd ZdZ	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z fdZd Z fdZ xZS )Adagradad  Optimizer that implements the Adagrad algorithm.

    Adagrad is an optimizer with parameter-specific learning rates,
    which are adapted relative to how frequently a parameter gets
    updated during training. The more updates a parameter receives,
    the smaller the updates.

    Args:
        learning_rate: Initial value for the learning rate:
            either a floating point value,
            or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance.
            Defaults to 0.001. Note that `Adagrad` tends to benefit from higher
            initial learning rate values compared to other optimizers. To match
            the exact form in the original paper, use 1.0.
        initial_accumulator_value: Floating point value.
            Starting value for the accumulators (per-parameter momentum values).
            Must be non-negative.
        epsilon: Small floating point value used to maintain numerical
            stability.
        {{base_optimizer_keyword_args}}

    Reference:
        - [Duchi et al., 2011](
            http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
    c                     t        |   d||||||	|
||d	| | j                  |      | _        || _        || _        y )N)	weight_decayclipnorm	clipvalueglobal_clipnormuse_emaema_momentumema_overwrite_frequencyjit_compilename )super__init___build_learning_rate_learning_rateinitial_accumulator_valueepsilon)selflearning_rater   r   r   r   r   r   r   r   r   r   r   kwargs	__class__s                 ]/var/www/html/dev/engine/venv/lib/python3.12/site-packages/tf_keras/src/optimizers/adagrad.pyr   zAdagrad.__init__=   s`      	 	
%+%$;#	
 	
 #77F)B&    c                 P   t         |   |       t        | d      r| j                  ry d| _        g | _        t        j                  | j                        }|D ]K  }| j                  j                  | j                  |d ||j                  |j                                     M y )N_builtTaccumulator)shapedtype)initial_value)r   buildhasattrr"   _accumulatorsr   Constantr   appendadd_variable_from_referencer$   r%   )r   var_listinitializervarr   s       r   r'   zAdagrad.build]   s    h4"t{{"++D,J,JK 	C%%00!"-CIISYY"O 1 	r    c                    t        j                  | j                  |j                        }| j	                  |      }| j
                  | j                  |      }t        |t         j                        r|j                  t        j                  |j                  |j                  z  |j                               t        j                  ||j                        }t        j                  || j                  z         }|j                  t        j                  | |j                  z  |z  |j                               y|j                  ||z         |j!                  ||z  t        j                  || j                  z         z         y)z=Update step given gradient and the associated model variable.)indicesN)tfcastr   r%   _var_keyr)   _index_dict
isinstanceIndexedSlicesscatter_addvaluesr1   gathersqrtr   
assign_add
assign_sub)r   gradvariablelrvar_keyr#   sparse_accumulatorsparse_denominators           r   update_stepzAdagrad.update_stepm   s'   WWT''8--)(()9)9')BCdB,,-##  t{{!:DLLI "$;!M!#);dll)J!K    C$++%(::DLL ""4$;/T	BGGK$,,4N,O OPr    c                     t         |          }|j                  | j                  | j                        | j
                  | j                  d       |S )N)r   r   r   )r   
get_configupdate_serialize_hyperparameterr   r   r   )r   configr   s     r   rF   zAdagrad.get_config   sQ    #%!%!?!?''" .2-K-K<<	
 r    )gMbP?g?gHz>NNNNFgGz?NTr	   )	__name__
__module____qualname____doc__r   r'   rD   rF   __classcell__)r   s   @r   r	   r	      sI    8 "% $@ Q0 r    r	   z{{base_optimizer_keyword_args}})rM   tensorflow.compat.v2compatv2r2   tf_keras.srcr   tf_keras.src.optimizersr   'tf_keras.src.saving.object_registrationr    tensorflow.python.util.tf_exportr   	Optimizerr	   replacebase_optimizer_keyword_argsr   r    r   <module>rY      s    ( ! ! % - O : +3		oi!! o od //))%y'L'Lr    