
    |hX(                        d Z ddlmc mZ ddlmZ ddlmZ ddl	m
Z
 ddlmZ ddlmZ ddlmZ dd	lmZ d
ZdZ e       Z edd      	 	 	 	 	 	 	 dd       Z ed      dd       Z ed      dd       Zej0                  j3                  dej4                  ej6                        e_         ej.                  j                   e_         y)zVGG19 model for TF-Keras.

Reference:
  - [Very Deep Convolutional Networks for Large-Scale Image Recognition](
      https://arxiv.org/abs/1409.1556) (ICLR 2015)
    N)backend)imagenet_utils)training)VersionAwareLayers)
data_utils)layer_utils)keras_exportznhttps://storage.googleapis.com/tensorflow/keras-applications/vgg19/vgg19_weights_tf_dim_ordering_tf_kernels.h5zthttps://storage.googleapis.com/tensorflow/keras-applications/vgg19/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5zkeras.applications.vgg19.VGG19zkeras.applications.VGG19c                 d	   |dv s8t         j                  j                  j                  |      st	        d| d      |dk(  r| r|dk7  rt	        d| d      t        j                  |ddt        j                         | |	      }|t        j                  |
      }n/t        j                  |      st        j                  ||      }n|}t        j                  ddddd      |      }t        j                  ddddd      |      }t        j                  ddd      |      }t        j                  ddddd      |      }t        j                  ddddd      |      }t        j                  ddd      |      }t        j                  ddddd      |      }t        j                  ddddd      |      }t        j                  ddddd      |      }t        j                  ddddd      |      }t        j                  ddd      |      }t        j                  d dddd!      |      }t        j                  d dddd"      |      }t        j                  d dddd#      |      }t        j                  d dddd$      |      }t        j                  ddd%      |      }t        j                  d dddd&      |      }t        j                  d dddd'      |      }t        j                  d dddd(      |      }t        j                  d dddd)      |      }t        j                  ddd*      |      }| rt        j                  d+,      |      }t        j                  d-dd./      |      }t        j                  d-dd0/      |      }t        j                   ||       t        j                  ||d1/      |      }n=|d2k(  rt        j#                         |      }n|d3k(  rt        j%                         |      }|t'        j(                  |      }	n|}	t+        j,                  |	|d4,      }
|dk(  rP| rt/        j0                  d5t2        d6d78      }nt/        j0                  d9t4        d6d:8      }|
j7                  |       |
S ||
j7                  |       |
S );a+  Instantiates the VGG19 architecture.

    Reference:
    - [Very Deep Convolutional Networks for Large-Scale Image Recognition](
        https://arxiv.org/abs/1409.1556) (ICLR 2015)

    For image classification use cases, see
    [this page for detailed examples](
      https://keras.io/api/applications/#usage-examples-for-image-classification-models).

    For transfer learning use cases, make sure to read the
    [guide to transfer learning & fine-tuning](
      https://keras.io/guides/transfer_learning/).

    The default input size for this model is 224x224.

    Note: each TF-Keras Application expects a specific kind of input
    preprocessing. For VGG19, call
    `tf.keras.applications.vgg19.preprocess_input` on your inputs before passing
    them to the model. `vgg19.preprocess_input` will convert the input images
    from RGB to BGR, then will zero-center each color channel with respect to
    the ImageNet dataset, without scaling.

    Args:
      include_top: whether to include the 3 fully-connected
        layers at the top of the network.
      weights: one of `None` (random initialization),
          'imagenet' (pre-training on ImageNet),
          or the path to the weights file to be loaded.
      input_tensor: optional TF-Keras tensor
        (i.e. output of `layers.Input()`)
        to use as image input for the model.
      input_shape: optional shape tuple, only to be specified
        if `include_top` is False (otherwise the input shape
        has to be `(224, 224, 3)`
        (with `channels_last` data format)
        or `(3, 224, 224)` (with `channels_first` data format).
        It should have exactly 3 inputs channels,
        and width and height should be no smaller than 32.
        E.g. `(200, 200, 3)` would be one valid value.
      pooling: Optional pooling mode for feature extraction
        when `include_top` is `False`.
        - `None` means that the output of the model will be
            the 4D tensor output of the
            last convolutional block.
        - `avg` means that global average pooling
            will be applied to the output of the
            last convolutional block, and thus
            the output of the model will be a 2D tensor.
        - `max` means that global max pooling will
            be applied.
      classes: optional number of classes to classify images
        into, only to be specified if `include_top` is True, and
        if no `weights` argument is specified.
      classifier_activation: A `str` or callable. The activation function to use
        on the "top" layer. Ignored unless `include_top=True`. Set
        `classifier_activation=None` to return the logits of the "top" layer.
        When loading pretrained weights, `classifier_activation` can only
        be `None` or `"softmax"`.

    Returns:
      A `keras.Model` instance.
    >   NimagenetzThe `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.  Received: `weights=z.`r     zmIf using `weights` as `"imagenet"` with `include_top` as true, `classes` should be 1000.  Received: `classes=       )default_sizemin_sizedata_formatrequire_flattenweights)shape)tensorr   @   )   r   relusameblock1_conv1)
activationpaddingnameblock1_conv2)   r   block1_pool)stridesr      block2_conv1block2_conv2block2_pool   block3_conv1block3_conv2block3_conv3block3_conv4block3_pooli   block4_conv1block4_conv2block4_conv3block4_conv4block4_poolblock5_conv1block5_conv2block5_conv3block5_conv4block5_poolflatten)r   i   fc1)r   r   fc2predictionsavgmaxvgg19z+vgg19_weights_tf_dim_ordering_tf_kernels.h5models cbe5617147190e668d6c5d5026f83318)cache_subdir	file_hashz1vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5 253f8cb515780f3b799900260a226db6)tfiogfileexists
ValueErrorr   obtain_input_shaper   image_data_formatlayersInputis_keras_tensorConv2DMaxPooling2DFlattenDensevalidate_activationGlobalAveragePooling2DGlobalMaxPooling2Dr   get_source_inputsr   Modelr   get_fileWEIGHTS_PATHWEIGHTS_PATH_NO_TOPload_weights)include_topr   input_tensorinput_shapepoolingclassesclassifier_activation	img_inputxinputsmodelweights_paths               ^/var/www/html/test/engine/venv/lib/python3.12/site-packages/tf_keras/src/applications/vgg19.pyVGG19re   0   sO   R ))RUU[[-?-?-H" #*"	.
 	
 *D"")".
 	
 !33--/#K LL{L3	&&|4LLI$I
FvvN 	 		A 	
FvvN 	 			A 	FFGJA 	V^ 	 			A 	V^ 	 			A 	FFGJA 	V^ 	 			A 	V^ 	 			A 	V^ 	 			A 	V^ 	 			A 	FFGJA 	V^ 	 			A 	V^ 	 			A 	V^ 	 			A 	V^ 	 			A 	FFGJA 	V^ 	 			A 	V^ 	 			A 	V^ 	 			A 	V^ 	 			A 	FFGJANN	N*1-LL&uL=a@LL&uL=a@**+@'JLL 5M  

 e--/2A))+A.A ..|<NN6173E *%..=%<	L &..C#%<	L 	<( L 
	7#L    z)keras.applications.vgg19.preprocess_inputc                 2    t        j                  | |d      S )Ncaffe)r   mode)r   preprocess_input)r`   r   s     rd   rj   rj     s    **	{ rf   z+keras.applications.vgg19.decode_predictionsc                 0    t        j                  | |      S )N)top)r   decode_predictions)predsrl   s     rd   rm   rm     s    ,,U<<rf    )ri   reterror)Tr   NNNr   softmax)N)   )__doc__tensorflow.compat.v2compatv2rB   tf_keras.srcr   tf_keras.src.applicationsr   tf_keras.src.enginer   tf_keras.src.layersr   tf_keras.src.utilsr   r    tensorflow.python.util.tf_exportr	   rV   rW   rI   re   rj   rm   PREPROCESS_INPUT_DOCformatPREPROCESS_INPUT_RET_DOC_CAFFEPREPROCESS_INPUT_ERROR_DOC rf   rd   <module>r      s     " !   4 ( 2 ) * :8 
8  
	 .0JK#S LSl 9: ; ;<= == *>>EE	55

3
3 F   
 ,>>FF  rf   