
    hhj             
          d Z ddlZddlZddlZddlmc mZ ddlm	Z	 ddl
mZ ddlmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ  ed       G d d             Z edg        G d de             Z ed       G d de             Z  ed       G d de             Z! ed       G d de             Z" ed       G d  d!e             Z# ed"       G d# d$e             Z$ ed%       G d& d'e             Z% ed(       G d) d*e             Z& ed+       G d, d-e             Z' ed.       G d/ d0e             Z( ed1       G d2 d3e             Z) ed4       G d5 d6e             Z* ed7       G d8 d9e             Z+ ed:       G d; d<e             Z, ed=       G d> d?e             Z- ed@       G dA dBe             Z. edC       G dD dEe             Z/ edF       G dG dHe             Z0 edIdJdKdLdMdN      ejb                  j2                  jd                  dO               Z3ddPZ4 ejj                  e3ejl                        dQ        Z7 edRdSdTdUdVdW      ejb                  j2                  jd                  dX               Z8 ejj                  e8ejl                        dY        Z9 edZd[d\d]d^d_      ejb                  j2                  jd                  d`               Z: ejj                  e:ejl                        da        Z; edbdcdddedfdg      ejb                  j2                  jd                  dh               Z< ejj                  e<ejl                        di        Z=dj Z> edkdl      ejb                  j2                  jd                  dm               Z? edndo      ejb                  j2                  jd                  dp               Z@ edq      ejb                  j2                  jd                  dr               ZA edsg       ejb                  j2                  jd                  ddt              ZB edudvdwdx      ejb                  j2                  jd                  dy               ZC edzd{      ejb                  j2                  jd                  	 dd|              ZD ejj                  eDejl                        	 dd}       ZE ed~d      ejb                  j2                  jd                  	 	 	 	 	 dd              ZF ejj                  eFejl                        	 	 	 	 	 dd       ZG edd      ejb                  j2                  jd                  	 dd              ZH ejj                  eHejl                        	 dd       ZI edd      ejb                  j2                  jd                  	 dd              ZJ ejj                  eJejl                        	 dd       ZK edd      ejb                  j2                  jd                  	 	 	 	 	 	 dd              ZL ejj                  eLejl                        	 	 	 	 	 	 dd       ZM edddddddd      ejb                  j2                  jd                  d               ZN edd      ejb                  j2                  jd                  d               ZO edg d      ejb                  j2                  jd                  dd              ZPeJxZQZRe3xZSZTe8xZUZVe:xZWZXe<xZYZZeNxZ[xZ\Z]eCZ^eBZ_d Z` ed      dd       Za ed      dd       Zb ed      d        Zcej
                  j                  j                  j                  deHdiZgy)zBuilt-in loss functions.    N)backend)
saving_lib)serialization)deserialize_keras_object)serialize_keras_object)losses_utils)tf_utils)ragged_map_ops)ragged_util)dispatch)keras_export)doc_controlszkeras.losses.Lossc                       e Zd ZdZej
                  j                  dfdZd Zd
dZ	e
d        Zd Zej                  ej                   d               Zd	 Zy)Lossak  Loss base class.

    To be implemented by subclasses:
    * `call()`: Contains the logic for loss calculation using `y_true`,
        `y_pred`.

    Example subclass implementation:

    ```python
    class MeanSquaredError(Loss):

      def call(self, y_true, y_pred):
          return tf.reduce_mean(tf.math.square(y_pred - y_true), axis=-1)
    ```

    When using a Loss under a `tf.distribute.Strategy`, except passing it
    to `Model.compile()` for use by `Model.fit()`, please use reduction
    types 'SUM' or 'NONE', and reduce losses explicitly. Using 'AUTO' or
    'SUM_OVER_BATCH_SIZE' will raise an error when calling the Loss object
    from a custom training loop or from user-defined code in `Layer.call()`.
    Please see this custom training
    [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training)
    for more details on this.
    Nc                     t         j                  j                  |       || _        || _        d| _        | j                          y)a   Initializes `Loss` class.

        Args:
            reduction: Type of `tf.keras.losses.Reduction` to apply to
                loss. Default value is `AUTO`. `AUTO` indicates that the
                reduction option will be determined by the usage context. For
                almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
                used under a `tf.distribute.Strategy`, except via
                `Model.compile()` and `Model.fit()`, using `AUTO` or
                `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
                custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Optional name for the instance.
        FN)r   ReductionV2validate	reductionname_allow_sum_over_batch_size_set_name_scope)selfr   r   s      Q/var/www/html/dev/engine/venv/lib/python3.12/site-packages/tf_keras/src/losses.py__init__zLoss.__init__D   s<      	  )))4"	 +0'    c                     | j                   +| j                  j                  j                  d      | _        y| j                   dk(  rd| _        y| j                   j                  d      | _        y)z"Creates a valid `name_scope` name.N_z<lambda>lambda)r   	__class____name__strip_name_scoper   s    r   r   zLoss._set_name_scope\   sT    99#~~66<<SADYY*$'D  $yys3Dr   c                    t        j                  |||      }t        j                  | j                        5  |5  t        j                         r| j                  }nZt
        j                  j                  j                  | j                  t
        j                  j                  j                               } |||      }t        j                  |      }t        j                  |      }||||z  }	n||}	n||}	nd}	| j                         }
t        j                  |||	|
      }t        j                   |||
      cddd       cddd       S # 1 sw Y   nxY wddd       y# 1 sw Y   yxY w)a  Invokes the `Loss` instance.

        Args:
            y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`,
                except sparse loss functions such as sparse categorical
                crossentropy where shape = `[batch_size, d0, .. dN-1]`
            y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
            sample_weight: Optional `sample_weight` acts as a coefficient for
                the loss. If a scalar is provided, then the loss is simply
                scaled by the given value. If `sample_weight` is a tensor of
                size `[batch_size]`, then the total loss for each sample of the
                batch is rescaled by the corresponding element in the
                `sample_weight` vector. If the shape of `sample_weight` is
                `[batch_size, d0, .. dN-1]` (or can be broadcasted to this
                shape), then each loss element of `y_pred` is scaled by the
                corresponding value of `sample_weight`. (Note on`dN-1`: all loss
                functions reduce by 1 dimension, usually axis=-1.)

        Returns:
            Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
                shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar.
                (Note `dN-1` because all loss functions reduce by 1 dimension,
                usually axis=-1.)

        Raises:
          ValueError: If the shape of `sample_weight` is invalid.
        N)r   )r	   "graph_context_for_symbolic_tensorsr   
name_scoper"   tfexecuting_eagerlycall__internal__	autograph
tf_convertcontrol_status_ctxr   get_mask_get_reductionapply_valid_maskcompute_weighted_loss)r   y_truey_predsample_weight	graph_ctxcall_fnlossesin_maskout_maskmaskr   s              r   __call__zLoss.__call__f   sI   < ??FM
	  0 01 	9 	##%))//33>>IIr88KKM VV,F"++F3G#,,V4H"x';)$%++-I(99tYM  555	 	 	 	 	 	 	s#   E&DE>	E&E	E&&E/c                      | di |S )zInstantiates a `Loss` from its config (output of `get_config()`).

        Args:
            config: Output of `get_config()`.

        Returns:
            A `Loss` instance.
         r=   )clsconfigs     r   from_configzLoss.from_config   s     }V}r   c                 4    | j                   | j                  dS )z4Returns the config dictionary for a `Loss` instance.r   r   rB   r#   s    r   
get_configzLoss.get_config   s    !^^TYY??r   c                     t        d      )a  Invokes the `Loss` instance.

        Args:
            y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`,
                except sparse loss functions such as sparse categorical
                crossentropy where shape = `[batch_size, d0, .. dN-1]`
            y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`

        Returns:
            Loss values with the shape `[batch_size, d0, .. dN-1]`.
        z"Must be implemented in subclasses.)NotImplementedError)r   r2   r3   s      r   r)   z	Loss.call   s     ""FGGr   c                    | j                   swt        j                  j                         rY| j                  t
        j                  j                  k(  s'| j                  t
        j                  j                  k(  rt        d      | j                  t
        j                  j                  k(  rt
        j                  j                  S | j                  S )z?Handles `AUTO` reduction cases and returns the reduction value.aZ  Please use `tf.keras.losses.Reduction.SUM` or `tf.keras.losses.Reduction.NONE` for loss reduction when losses are used with `tf.distribute.Strategy`, except for specifying losses in `Model.compile()` for use by the built-in training looop `Model.fit()`.
Please see https://www.tensorflow.org/tutorials/distribute/custom_training for more details.)
r   r'   
distributehas_strategyr   r   r   AUTOSUM_OVER_BATCH_SIZE
ValueErrorr#   s    r   r/   zLoss._get_reduction   s     //**,,":":"?"??>>++??@ @  >>\55:::++???~~r   N)r    
__module____qualname____doc__r   r   rI   r   r   r;   classmethodr@   rC   abcabstractmethodr   for_subclass_implementersr)   r/   r=   r   r   r   r   )   ss    2 ".!9!9!>!>T 04=~ 	 	@ 	++H , Hr   r   z-keras.__internal__.losses.LossFunctionWrapper)v1c                   p     e Zd ZdZej
                  j                  df fd	Zd Z fdZ	e
d        Z xZS )LossFunctionWrapperz*Wraps a loss function in the `Loss` class.Nc                 D    t         |   ||       || _        || _        y)a  Initializes `LossFunctionWrapper` class.

        Args:
            fn: The loss function to wrap, with signature `fn(y_true, y_pred,
                **kwargs)`.
            reduction: Type of `tf.keras.losses.Reduction` to apply to
                loss. Default value is `AUTO`. `AUTO` indicates that the
                reduction option will be determined by the usage context. For
                almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
                used under a `tf.distribute.Strategy`, except via
                `Model.compile()` and `Model.fit()`, using `AUTO` or
                `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
                custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Optional name for the instance.
            **kwargs: The keyword arguments that are passed on to `fn`.
        rB   N)superr   fn
_fn_kwargs)r   rY   r   r   kwargsr   s        r   r   zLossFunctionWrapper.__init__   s%    * 	948 r   c                 d   t        j                  |      r.t        j                  |      rt        j                  ||      \  }}t         j                  j
                  j                  | j                  t         j                  j
                  j                               } |||fi | j                  S )zInvokes the `LossFunctionWrapper` instance.

        Args:
            y_true: Ground truth values.
            y_pred: The predicted values.

        Returns:
            Loss values per sample.
        )
r'   	is_tensorr   squeeze_or_expand_dimensionsr*   r+   r,   rY   r-   rZ   )r   r2   r3   ag_fns       r   r)   zLossFunctionWrapper.call   s     <<BLL$8)FFNFF ))44GGR__..AAC
 VV7t77r   c                    i }| j                   j                         D ]4  \  }}t        j                  |      rt	        j
                  |      n|||<   6 t        j                         rddlm	}  || j                        |d<   t        | 1         }t        t        |j                               t        |j                               z         S )Nr   )get_registered_namerY   )rZ   itemsr	   is_tensor_or_variabler   evalr   saving_v3_enabledtf_keras.src.utilsra   rY   rX   rC   dictlist)r   r?   kvra   base_configr   s         r   rC   zLossFunctionWrapper.get_config  s    OO))+ 	DAq#+#A#A!#DQ! 1I	
 '')>.tww7F4Lg(*D**,-V\\^0DDEEr   c                     t        j                         r*|j                  dd      }|r| t        u rt	        |      |d<    | di |S )zInstantiates a `Loss` from its config (output of `get_config()`).

        Args:
            config: Output of `get_config()`.

        Returns:
            A `keras.losses.Loss` instance.
        rY   Nr=   )r   re   poprV   get)r>   r?   fn_names      r   r@   zLossFunctionWrapper.from_config  sD     '')jjt,G3"55"7|t}V}r   )r    rM   rN   rO   r   r   rI   r   r)   rC   rP   r@   __classcell__r   s   @r   rV   rV      s=    4 )4499!28(F  r   rV   zkeras.losses.MeanSquaredErrorc                   P     e Zd ZdZej
                  j                  df fd	Z xZS )MeanSquaredErrora  Computes the mean of squares of errors between labels and predictions.

    `loss = mean(square(y_true - y_pred))`

    Standalone usage:

    >>> y_true = [[0., 1.], [0., 0.]]
    >>> y_pred = [[1., 1.], [1., 0.]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> mse = tf.keras.losses.MeanSquaredError()
    >>> mse(y_true, y_pred).numpy()
    0.5

    >>> # Calling with 'sample_weight'.
    >>> mse(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
    0.25

    >>> # Using 'sum' reduction type.
    >>> mse = tf.keras.losses.MeanSquaredError(
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> mse(y_true, y_pred).numpy()
    1.0

    >>> # Using 'none' reduction type.
    >>> mse = tf.keras.losses.MeanSquaredError(
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> mse(y_true, y_pred).numpy()
    array([0.5, 0.5], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd', loss=tf.keras.losses.MeanSquaredError())
    ```
    mean_squared_errorc                 2    t         |   t        ||       y)aA  Initializes `MeanSquaredError` instance.

        Args:
            reduction: Type of `tf.keras.losses.Reduction` to apply to
                loss. Default value is `AUTO`. `AUTO` indicates that the
                reduction option will be determined by the usage context. For
                almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
                used under a `tf.distribute.Strategy`, except via
                `Model.compile()` and `Model.fit()`, using `AUTO` or
                `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
                custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Optional name for the instance. Defaults to
                'mean_squared_error'.
        r   r   N)rX   r   rt   r   r   r   r   s      r   r   zMeanSquaredError.__init__V  s    & 	+$)Lr   	r    rM   rN   rO   r   r   rI   r   rp   rq   s   @r   rs   rs   0  s)    "J %0055<PM Mr   rs   zkeras.losses.MeanAbsoluteErrorc                   P     e Zd ZdZej
                  j                  df fd	Z xZS )MeanAbsoluteErrora  Computes the mean of absolute difference between labels and predictions.

    `loss = mean(abs(y_true - y_pred))`

    Standalone usage:

    >>> y_true = [[0., 1.], [0., 0.]]
    >>> y_pred = [[1., 1.], [1., 0.]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> mae = tf.keras.losses.MeanAbsoluteError()
    >>> mae(y_true, y_pred).numpy()
    0.5

    >>> # Calling with 'sample_weight'.
    >>> mae(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
    0.25

    >>> # Using 'sum' reduction type.
    >>> mae = tf.keras.losses.MeanAbsoluteError(
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> mae(y_true, y_pred).numpy()
    1.0

    >>> # Using 'none' reduction type.
    >>> mae = tf.keras.losses.MeanAbsoluteError(
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> mae(y_true, y_pred).numpy()
    array([0.5, 0.5], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd', loss=tf.keras.losses.MeanAbsoluteError())
    ```
    mean_absolute_errorc                 2    t         |   t        ||       y)aC  Initializes `MeanAbsoluteError` instance.

        Args:
            reduction: Type of `tf.keras.losses.Reduction` to apply to
                loss. Default value is `AUTO`. `AUTO` indicates that the
                reduction option will be determined by the usage context. For
                almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
                used under a `tf.distribute.Strategy`, except via
                `Model.compile()` and `Model.fit()`, using `AUTO` or
                `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
                custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Optional name for the instance. Defaults to
                'mean_absolute_error'.
        rv   N)rX   r   r{   rw   s      r   r   zMeanAbsoluteError.__init__  s    * 	,49Mr   rx   rq   s   @r   rz   rz   l  s)    "L **//"N Nr   rz   z(keras.losses.MeanAbsolutePercentageErrorc                   P     e Zd ZdZej
                  j                  df fd	Z xZS )MeanAbsolutePercentageErrora  Computes the mean absolute percentage error between `y_true` & `y_pred`.

    Formula:

    `loss = 100 * abs((y_true - y_pred) / y_true)`

    Note that to avoid dividing by zero, a small epsilon value
    is added to the denominator.

    Standalone usage:

    >>> y_true = [[2., 1.], [2., 3.]]
    >>> y_pred = [[1., 1.], [1., 0.]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> mape = tf.keras.losses.MeanAbsolutePercentageError()
    >>> mape(y_true, y_pred).numpy()
    50.

    >>> # Calling with 'sample_weight'.
    >>> mape(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
    20.

    >>> # Using 'sum' reduction type.
    >>> mape = tf.keras.losses.MeanAbsolutePercentageError(
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> mape(y_true, y_pred).numpy()
    100.

    >>> # Using 'none' reduction type.
    >>> mape = tf.keras.losses.MeanAbsolutePercentageError(
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> mape(y_true, y_pred).numpy()
    array([25., 75.], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss=tf.keras.losses.MeanAbsolutePercentageError())
    ```
    mean_absolute_percentage_errorc                 2    t         |   t        ||       y)aX  Initializes `MeanAbsolutePercentageError` instance.

        Args:
            reduction: Type of `tf.keras.losses.Reduction` to apply to
                loss. Default value is `AUTO`. `AUTO` indicates that the
                reduction option will be determined by the usage context. For
                almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
                used under a `tf.distribute.Strategy`, except via
                `Model.compile()` and `Model.fit()`, using `AUTO` or
                `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
                custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Optional name for the instance. Defaults to
                'mean_absolute_percentage_error'.
        rv   N)rX   r   r   rw   s      r   r   z$MeanAbsolutePercentageError.__init__      * 	* 	 	
r   rx   rq   s   @r   r~   r~     s'    (X **//-
 
r   r~   z(keras.losses.MeanSquaredLogarithmicErrorc                   P     e Zd ZdZej
                  j                  df fd	Z xZS )MeanSquaredLogarithmicErroraZ  Computes the mean squared logarithmic error between `y_true` & `y_pred`.

    `loss = square(log(y_true + 1.) - log(y_pred + 1.))`

    Standalone usage:

    >>> y_true = [[0., 1.], [0., 0.]]
    >>> y_pred = [[1., 1.], [1., 0.]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> msle = tf.keras.losses.MeanSquaredLogarithmicError()
    >>> msle(y_true, y_pred).numpy()
    0.240

    >>> # Calling with 'sample_weight'.
    >>> msle(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
    0.120

    >>> # Using 'sum' reduction type.
    >>> msle = tf.keras.losses.MeanSquaredLogarithmicError(
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> msle(y_true, y_pred).numpy()
    0.480

    >>> # Using 'none' reduction type.
    >>> msle = tf.keras.losses.MeanSquaredLogarithmicError(
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> msle(y_true, y_pred).numpy()
    array([0.240, 0.240], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss=tf.keras.losses.MeanSquaredLogarithmicError())
    ```
    mean_squared_logarithmic_errorc                 2    t         |   t        ||       y)aX  Initializes `MeanSquaredLogarithmicError` instance.

        Args:
            reduction: Type of `tf.keras.losses.Reduction` to apply to
                loss. Default value is `AUTO`. `AUTO` indicates that the
                reduction option will be determined by the usage context. For
                almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
                used under a `tf.distribute.Strategy`, except via
                `Model.compile()` and `Model.fit()`, using `AUTO` or
                `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
                custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Optional name for the instance. Defaults to
                'mean_squared_logarithmic_error'.
        rv   N)rX   r   r   rw   s      r   r   z$MeanSquaredLogarithmicError.__init__  r   r   rx   rq   s   @r   r   r     s'    #N **//-
 
r   r   zkeras.losses.BinaryCrossentropyc                   V     e Zd ZdZdddej
                  j                  df fd	Z xZS )BinaryCrossentropya!	  Computes the cross-entropy loss between true labels and predicted labels.

    Use this cross-entropy loss for binary (0 or 1) classification applications.
    The loss function requires the following inputs:

    - `y_true` (true label): This is either 0 or 1.
    - `y_pred` (predicted value): This is the model's prediction, i.e, a single
        floating-point value which either represents a
        [logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf]
        when `from_logits=True`) or a probability (i.e, value in [0., 1.] when
        `from_logits=False`).

    **Recommended Usage:** (set `from_logits=True`)

    With `tf.keras` API:

    ```python
    model.compile(
        loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
        ....
    )
    ```

    As a standalone function:

    >>> # Example 1: (batch_size = 1, number of samples = 4)
    >>> y_true = [0, 1, 0, 0]
    >>> y_pred = [-18.6, 0.51, 2.94, -12.8]
    >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)
    >>> bce(y_true, y_pred).numpy()
    0.865

    >>> # Example 2: (batch_size = 2, number of samples = 4)
    >>> y_true = [[0, 1], [0, 0]]
    >>> y_pred = [[-18.6, 0.51], [2.94, -12.8]]
    >>> # Using default 'auto'/'sum_over_batch_size' reduction type.
    >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)
    >>> bce(y_true, y_pred).numpy()
    0.865
    >>> # Using 'sample_weight' attribute
    >>> bce(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
    0.243
    >>> # Using 'sum' reduction` type.
    >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True,
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> bce(y_true, y_pred).numpy()
    1.730
    >>> # Using 'none' reduction type.
    >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True,
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> bce(y_true, y_pred).numpy()
    array([0.235, 1.496], dtype=float32)

    **Default Usage:** (set `from_logits=False`)

    >>> # Make the following updates to the above "Recommended Usage" section
    >>> # 1. Set `from_logits=False`
    >>> tf.keras.losses.BinaryCrossentropy() # OR ...('from_logits=False')
    >>> # 2. Update `y_pred` to use probabilities instead of logits
    >>> y_pred = [0.6, 0.3, 0.2, 0.8] # OR [[0.6, 0.3], [0.2, 0.8]]
    F        binary_crossentropyc                 F    t         |   t        |||||       || _        y)a  Initializes `BinaryCrossentropy` instance.

        Args:
            from_logits: Whether to interpret `y_pred` as a tensor of
                [logit](https://en.wikipedia.org/wiki/Logit) values. By default,
                we assume that `y_pred` contains probabilities (i.e., values in
                [0, 1]).
            label_smoothing: Float in [0, 1]. When 0, no smoothing occurs.
                When > 0, we compute the loss between the predicted labels and a
                smoothed version of the true labels, where the smoothing
                squeezes the labels towards 0.5.  Larger values of
                `label_smoothing` correspond to heavier smoothing.
            axis: The axis along which to compute crossentropy (the features
                axis).  Defaults to -1.
            reduction: Type of `tf.keras.losses.Reduction` to apply to
                loss. Default value is `AUTO`. `AUTO` indicates that the
                reduction option will be determined by the usage context. For
                almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
                used under a `tf.distribute.Strategy`, except via
                `Model.compile()` and `Model.fit()`, using `AUTO` or
                `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
                custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Name for the op. Defaults to 'binary_crossentropy'.
        r   r   from_logitslabel_smoothingaxisN)rX   r   r   r   r   r   r   r   r   r   r   s         r   r   zBinaryCrossentropy.__init__q  s4    D 	#+ 	 	
 'r   rx   rq   s   @r   r   r   1  s0    <@ **//"*' *'r   r   z$keras.losses.BinaryFocalCrossentropyc                   f     e Zd ZdZddddddej
                  j                  df fd	Z fd	Z xZ	S )
BinaryFocalCrossentropya  Computes focal cross-entropy loss between true labels and predictions.

    Binary cross-entropy loss is often used for binary (0 or 1) classification
    tasks. The loss function requires the following inputs:

    - `y_true` (true label): This is either 0 or 1.
    - `y_pred` (predicted value): This is the model's prediction, i.e, a single
        floating-point value which either represents a
        [logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf]
        when `from_logits=True`) or a probability (i.e, value in [0., 1.] when
        `from_logits=False`).

    According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
    helps to apply a "focal factor" to down-weight easy examples and focus more
    on hard examples. By default, the focal tensor is computed as follows:

    `focal_factor = (1 - output) ** gamma` for class 1
    `focal_factor = output ** gamma` for class 0
    where `gamma` is a focusing parameter. When `gamma=0`, this function is
    equivalent to the binary crossentropy loss.

    With the `compile()` API:

    ```python
    model.compile(
      loss=tf.keras.losses.BinaryFocalCrossentropy(gamma=2.0, from_logits=True),
      ....
    )
    ```

    As a standalone function:

    >>> # Example 1: (batch_size = 1, number of samples = 4)
    >>> y_true = [0, 1, 0, 0]
    >>> y_pred = [-18.6, 0.51, 2.94, -12.8]
    >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=2,
    ...                                                from_logits=True)
    >>> loss(y_true, y_pred).numpy()
    0.691

    >>> # Apply class weight
    >>> loss = tf.keras.losses.BinaryFocalCrossentropy(
    ...     apply_class_balancing=True, gamma=2, from_logits=True)
    >>> loss(y_true, y_pred).numpy()
    0.51

    >>> # Example 2: (batch_size = 2, number of samples = 4)
    >>> y_true = [[0, 1], [0, 0]]
    >>> y_pred = [[-18.6, 0.51], [2.94, -12.8]]
    >>> # Using default 'auto'/'sum_over_batch_size' reduction type.
    >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=3,
    ...                                                from_logits=True)
    >>> loss(y_true, y_pred).numpy()
    0.647

    >>> # Apply class weight
    >>> loss = tf.keras.losses.BinaryFocalCrossentropy(
    ...     apply_class_balancing=True, gamma=3, from_logits=True)
    >>> loss(y_true, y_pred).numpy()
    0.482

    >>> # Using 'sample_weight' attribute with focal effect
    >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=3,
    ...                                                from_logits=True)
    >>> loss(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
    0.133

    >>> # Apply class weight
    >>> loss = tf.keras.losses.BinaryFocalCrossentropy(
    ...     apply_class_balancing=True, gamma=3, from_logits=True)
    >>> loss(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
    0.097

    >>> # Using 'sum' reduction` type.
    >>> loss = tf.keras.losses.BinaryFocalCrossentropy(gamma=4,
    ...                                                from_logits=True,
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> loss(y_true, y_pred).numpy()
    1.222

    >>> # Apply class weight
    >>> loss = tf.keras.losses.BinaryFocalCrossentropy(
    ...     apply_class_balancing=True, gamma=4, from_logits=True,
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> loss(y_true, y_pred).numpy()
    0.914

    >>> # Using 'none' reduction type.
    >>> loss = tf.keras.losses.BinaryFocalCrossentropy(
    ...     gamma=5, from_logits=True,
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> loss(y_true, y_pred).numpy()
    array([0.0017 1.1561], dtype=float32)

    >>> # Apply class weight
    >>> loss = tf.keras.losses.BinaryFocalCrossentropy(
    ...     apply_class_balancing=True, gamma=5, from_logits=True,
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> loss(y_true, y_pred).numpy()
    array([0.0004 0.8670], dtype=float32)


    Args:
        apply_class_balancing: A bool, whether to apply weight balancing on the
            binary classes 0 and 1.
        alpha: A weight balancing factor for class 1, default is `0.25` as
            mentioned in reference [Lin et al., 2018](
            https://arxiv.org/pdf/1708.02002.pdf).  The weight for class 0 is
            `1.0 - alpha`.
        gamma: A focusing parameter used to compute the focal factor, default is
            `2.0` as mentioned in the reference
            [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf).
        from_logits: Whether to interpret `y_pred` as a tensor of
            [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
            assume that `y_pred` are probabilities (i.e., values in `[0, 1]`).
        label_smoothing: Float in `[0, 1]`. When `0`, no smoothing occurs.
            When > `0`, we compute the loss between the predicted labels and a
            smoothed version of the true labels, where the smoothing squeezes
            the labels towards `0.5`. Larger values of `label_smoothing`
            correspond to heavier smoothing.
        axis: The axis along which to compute crossentropy (the features axis).
            Defaults to `-1`.
        reduction: Type of `tf.keras.losses.Reduction` to apply to
            loss. Default value is `AUTO`. `AUTO` indicates that the reduction
            option will be determined by the usage context. For almost all cases
            this defaults to `SUM_OVER_BATCH_SIZE`. When used under a
            `tf.distribute.Strategy`, except via `Model.compile()` and
            `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
            will raise an error. Please see this custom training [tutorial](
            https://www.tensorflow.org/tutorials/distribute/custom_training)
            for more details.
        name: Name for the op. Defaults to 'binary_focal_crossentropy'.
    F      ?       @r   r   binary_focal_crossentropyc	                 v    t         	|   t        ||||||||	       || _        || _        || _        || _        y)z/Initializes `BinaryFocalCrossentropy` instance.)apply_class_balancingalphagammar   r   r   r   r   N)rX   r   r   r   r   r   r   )
r   r   r   r   r   r   r   r   r   r   s
            r   r   z BinaryFocalCrossentropy.__init__&  sR     	%"7#+ 	 
	
 '%:"

r   c                     | j                   | j                  | j                  d}t        |          }t        t        |j                               t        |j                               z         S )N)r   r   r   )r   r   r   rX   rC   rg   rh   rb   r   r?   rk   r   s      r   rC   z"BinaryFocalCrossentropy.get_configB  sY    %)%?%?ZZZZ

 g(*D**,-V\\^0DDEEr   
r    rM   rN   rO   r   r   rI   r   rC   rp   rq   s   @r   r   r     sA    DP $**//(8F Fr   r   z$keras.losses.CategoricalCrossentropyc                   V     e Zd ZdZdddej
                  j                  df fd	Z xZS )CategoricalCrossentropya  Computes the crossentropy loss between the labels and predictions.

    Use this crossentropy loss function when there are two or more label
    classes. We expect labels to be provided in a `one_hot` representation. If
    you want to provide labels as integers, please use
    `SparseCategoricalCrossentropy` loss.  There should be `# classes` floating
    point values per feature.

    In the snippet below, there is `# classes` floating pointing values per
    example. The shape of both `y_pred` and `y_true` are
    `[batch_size, num_classes]`.

    Standalone usage:

    >>> y_true = [[0, 1, 0], [0, 0, 1]]
    >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> cce = tf.keras.losses.CategoricalCrossentropy()
    >>> cce(y_true, y_pred).numpy()
    1.177

    >>> # Calling with 'sample_weight'.
    >>> cce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy()
    0.814

    >>> # Using 'sum' reduction type.
    >>> cce = tf.keras.losses.CategoricalCrossentropy(
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> cce(y_true, y_pred).numpy()
    2.354

    >>> # Using 'none' reduction type.
    >>> cce = tf.keras.losses.CategoricalCrossentropy(
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> cce(y_true, y_pred).numpy()
    array([0.0513, 2.303], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss=tf.keras.losses.CategoricalCrossentropy())
    ```
    Fr   r   categorical_crossentropyc                 8    t         |   t        |||||       y)a  Initializes `CategoricalCrossentropy` instance.

        Args:
            from_logits: Whether `y_pred` is expected to be a logits tensor. By
                default, we assume that `y_pred` encodes a probability
                distribution.
            label_smoothing: Float in [0, 1]. When > 0, label values are
                smoothed, meaning the confidence on label values are relaxed.
                For example, if `0.1`, use `0.1 / num_classes` for non-target
                labels and `0.9 + 0.1 / num_classes` for target labels.
            axis: The axis along which to compute crossentropy (the features
                axis). Defaults to -1.
            reduction: Type of `tf.keras.losses.Reduction` to apply to loss.
                Default value is `AUTO`. `AUTO` indicates that the reduction
                option will be determined by the usage context. For almost all
                cases this defaults to `SUM_OVER_BATCH_SIZE`. When used under a
                `tf.distribute.Strategy`, except via `Model.compile()` and
                `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
                will raise an error. Please see this custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Optional name for the instance.
                Defaults to 'categorical_crossentropy'.
        r   N)rX   r   r   r   s         r   r   z CategoricalCrossentropy.__init__{  s*    @ 	$#+ 	 	
r   rx   rq   s   @r   r   r   L  s0    +^ **//''
 '
r   r   z)keras.losses.CategoricalFocalCrossentropyc                   d     e Zd ZdZdddddej
                  j                  df fd	Z fd	Z xZ	S )
CategoricalFocalCrossentropyuG  Computes the alpha balanced focal crossentropy loss.

    Use this crossentropy loss function when there are two or more label
    classes and if you want to handle class imbalance without using
    `class_weights`. We expect labels to be provided in a `one_hot`
    representation.

    According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
    helps to apply a focal factor to down-weight easy examples and focus more on
    hard examples. The general formula for the focal loss (FL)
    is as follows:

    `FL(p_t) = (1 − p_t)^gamma * log(p_t)`

    where `p_t` is defined as follows:
    `p_t = output if y_true == 1, else 1 - output`

    `(1 − p_t)^gamma` is the `modulating_factor`, where `gamma` is a focusing
    parameter. When `gamma` = 0, there is no focal effect on the cross entropy.
    `gamma` reduces the importance given to simple examples in a smooth manner.

    The authors use alpha-balanced variant of focal loss (FL) in the paper:
    `FL(p_t) = −alpha * (1 − p_t)^gamma * log(p_t)`

    where `alpha` is the weight factor for the classes. If `alpha` = 1, the
    loss won't be able to handle class imbalance properly as all
    classes will have the same weight. This can be a constant or a list of
    constants. If alpha is a list, it must have the same length as the number
    of classes.

    The formula above can be generalized to:
    `FL(p_t) = alpha * (1 − p_t)^gamma * CrossEntropy(y_true, y_pred)`

    where minus comes from `CrossEntropy(y_true, y_pred)` (CE).

    Extending this to multi-class case is straightforward:
    `FL(p_t) = alpha * (1 − p_t)^gamma * CategoricalCE(y_true, y_pred)`

    In the snippet below, there is `# classes` floating pointing values per
    example. The shape of both `y_pred` and `y_true` are
    `[batch_size, num_classes]`.

    Standalone usage:

    >>> y_true = [[0., 1., 0.], [0., 0., 1.]]
    >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> cce = tf.keras.losses.CategoricalFocalCrossentropy()
    >>> cce(y_true, y_pred).numpy()
    0.23315276

    >>> # Calling with 'sample_weight'.
    >>> cce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy()
    0.1632

    >>> # Using 'sum' reduction type.
    >>> cce = tf.keras.losses.CategoricalFocalCrossentropy(
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> cce(y_true, y_pred).numpy()
    0.46631

    >>> # Using 'none' reduction type.
    >>> cce = tf.keras.losses.CategoricalFocalCrossentropy(
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> cce(y_true, y_pred).numpy()
    array([3.2058331e-05, 4.6627346e-01], dtype=float32)

    Usage with the `compile()` API:
    ```python
    model.compile(optimizer='adam',
                  loss=tf.keras.losses.CategoricalFocalCrossentropy())
    ```

    Args:
        alpha: A weight balancing factor for all classes, default is `0.25` as
            mentioned in the reference. It can be a list of floats or a scalar.
            In the multi-class case, alpha may be set by inverse class
            frequency by using `compute_class_weight` from `sklearn.utils`.
        gamma: A focusing parameter, default is `2.0` as mentioned in the
            reference. It helps to gradually reduce the importance given to
            simple (easy) examples in a smooth manner.
        from_logits: Whether `output` is expected to be a logits tensor. By
            default, we consider that `output` encodes a probability
            distribution.
        label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
            meaning the confidence on label values are relaxed. For example, if
            `0.1`, use `0.1 / num_classes` for non-target labels and
            `0.9 + 0.1 / num_classes` for target labels.
        axis: The axis along which to compute crossentropy (the features
            axis). Defaults to -1.
        reduction: Type of `tf.keras.losses.Reduction` to apply to
            loss. Default value is `AUTO`. `AUTO` indicates that the reduction
            option will be determined by the usage context. For almost all cases
            this defaults to `SUM_OVER_BATCH_SIZE`. When used under a
            `tf.distribute.Strategy`, except via `Model.compile()` and
            `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
            will raise an error. Please see this custom training [tutorial](
            https://www.tensorflow.org/tutorials/distribute/custom_training)
            for more details.
        name: Optional name for the instance.
            Defaults to 'categorical_focal_crossentropy'.

    r   r   Fr   r   categorical_focal_crossentropyc           
      f    t         |   t        |||||||       || _        || _        || _        y)z4Initializes `CategoricalFocalCrossentropy` instance.)r   r   r   r   r   r   r   N)rX   r   r   r   r   r   )	r   r   r   r   r   r   r   r   r   s	           r   r   z%CategoricalFocalCrossentropy.__init__  sG     	*#+ 	 		
 '

r   c                     | j                   | j                  d}t        |          }t	        t        |j                               t        |j                               z         S )N)r   r   )r   r   rX   rC   rg   rh   rb   r   s      r   rC   z'CategoricalFocalCrossentropy.get_config(  sP    ZZZZ
 g(*D**,-V\\^0DDEEr   r   rq   s   @r   r   r     s>    fT **//-2F Fr   r   z*keras.losses.SparseCategoricalCrossentropyc                   T     e Zd ZdZddej
                  j                  df fd	Z xZS )SparseCategoricalCrossentropya  Computes the crossentropy loss between the labels and predictions.

    Use this crossentropy loss function when there are two or more label
    classes.  We expect labels to be provided as integers. If you want to
    provide labels using `one-hot` representation, please use
    `CategoricalCrossentropy` loss.  There should be `# classes` floating point
    values per feature for `y_pred` and a single floating point value per
    feature for `y_true`.

    In the snippet below, there is a single floating point value per example for
    `y_true` and `# classes` floating pointing values per example for `y_pred`.
    The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
    `[batch_size, num_classes]`.

    Standalone usage:

    >>> y_true = [1, 2]
    >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> scce = tf.keras.losses.SparseCategoricalCrossentropy()
    >>> scce(y_true, y_pred).numpy()
    1.177

    >>> # Calling with 'sample_weight'.
    >>> scce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy()
    0.814

    >>> # Using 'sum' reduction type.
    >>> scce = tf.keras.losses.SparseCategoricalCrossentropy(
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> scce(y_true, y_pred).numpy()
    2.354

    >>> # Using 'none' reduction type.
    >>> scce = tf.keras.losses.SparseCategoricalCrossentropy(
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> scce(y_true, y_pred).numpy()
    array([0.0513, 2.303], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss=tf.keras.losses.SparseCategoricalCrossentropy())
    ```
    FNsparse_categorical_crossentropyc                 6    t         |   t        ||||       y)al  Initializes `SparseCategoricalCrossentropy` instance.

        Args:
            from_logits: Whether `y_pred` is expected to be a logits tensor. By
                default, we assume that `y_pred` encodes a probability
                distribution.
            ignore_class: Optional integer. The ID of a class to be ignored
                during loss computation. This is useful, for example, in
                segmentation problems featuring a "void" class (commonly -1 or
                255) in segmentation maps.
                By default (`ignore_class=None`), all classes are considered.
            reduction: Type of `tf.keras.losses.Reduction` to apply to
                loss. Default value is `AUTO`. `AUTO` indicates that the
                reduction ption will be determined by the usage context. For
                almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
                used under a `tf.distribute.Strategy`, except via
                `Model.compile()` and `Model.fit()`, using `AUTO` or
                `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
                custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Optional name for the instance.
                Defaults to 'sparse_categorical_crossentropy'.
        )r   r   r   ignore_classN)rX   r   r   )r   r   r   r   r   r   s        r   r   z&SparseCategoricalCrossentropy.__init__b  s&    > 	+#% 	 	
r   rx   rq   s   @r   r   r   1  s-    -b **//.%
 %
r   r   zkeras.losses.CosineSimilarityc                   R     e Zd ZdZdej
                  j                  df fd	Z xZS )CosineSimilaritya
  Computes the cosine similarity between labels and predictions.

    Note that it is a number between -1 and 1. When it is a negative number
    between -1 and 0, 0 indicates orthogonality and values closer to -1
    indicate greater similarity. The values closer to 1 indicate greater
    dissimilarity. This makes it usable as a loss function in a setting
    where you try to maximize the proximity between predictions and targets.
    If either `y_true` or `y_pred` is a zero vector, cosine similarity will be 0
    regardless of the proximity between predictions and targets.

    `loss = -sum(l2_norm(y_true) * l2_norm(y_pred))`

    Standalone usage:

    >>> y_true = [[0., 1.], [1., 1.]]
    >>> y_pred = [[1., 0.], [1., 1.]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1)
    >>> # l2_norm(y_true) = [[0., 1.], [1./1.414, 1./1.414]]
    >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414, 1./1.414]]
    >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
    >>> # loss = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
    >>> #       = -((0. + 0.) +  (0.5 + 0.5)) / 2
    >>> cosine_loss(y_true, y_pred).numpy()
    -0.5

    >>> # Calling with 'sample_weight'.
    >>> cosine_loss(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
    -0.0999

    >>> # Using 'sum' reduction type.
    >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1,
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> cosine_loss(y_true, y_pred).numpy()
    -0.999

    >>> # Using 'none' reduction type.
    >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1,
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> cosine_loss(y_true, y_pred).numpy()
    array([-0., -0.999], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss=tf.keras.losses.CosineSimilarity(axis=1))
    ```

    Args:
        axis: The axis along which the cosine similarity is computed
            (the features axis). Defaults to -1.
        reduction: Type of `tf.keras.losses.Reduction` to apply to loss.
            Default value is `AUTO`. `AUTO` indicates that the reduction option
            will be determined by the usage context. For almost all cases this
            defaults to `SUM_OVER_BATCH_SIZE`. When used under a
            `tf.distribute.Strategy`, except via `Model.compile()` and
            `Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an
            error. Please see this custom training [tutorial](
            https://www.tensorflow.org/tutorials/distribute/custom_training)
            for more details.
        name: Optional name for the instance. Defaults to 'cosine_similarity'.
    r   cosine_similarityc                 4    t         |   t        |||       y )N)r   r   r   )rX   r   r   )r   r   r   r   r   s       r   r   zCosineSimilarity.__init__  s      	D 	 	
r   rx   rq   s   @r   r   r     s*    >D **// 	
 
r   r   zkeras.losses.Hingec                   P     e Zd ZdZej
                  j                  df fd	Z xZS )Hingea/  Computes the hinge loss between `y_true` & `y_pred`.

    `loss = maximum(1 - y_true * y_pred, 0)`

    `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
    provided we will convert them to -1 or 1.

    Standalone usage:

    >>> y_true = [[0., 1.], [0., 0.]]
    >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> h = tf.keras.losses.Hinge()
    >>> h(y_true, y_pred).numpy()
    1.3

    >>> # Calling with 'sample_weight'.
    >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
    0.55

    >>> # Using 'sum' reduction type.
    >>> h = tf.keras.losses.Hinge(
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> h(y_true, y_pred).numpy()
    2.6

    >>> # Using 'none' reduction type.
    >>> h = tf.keras.losses.Hinge(
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> h(y_true, y_pred).numpy()
    array([1.1, 1.5], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd', loss=tf.keras.losses.Hinge())
    ```
    hingec                 2    t         |   t        ||       y)a  Initializes `Hinge` instance.

        Args:
            reduction: Type of `tf.keras.losses.Reduction` to apply to
                loss. Default value is `AUTO`. `AUTO` indicates that the
                reduction ption will be determined by the usage context. For
                almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
                used under a `tf.distribute.Strategy`, except via
                `Model.compile()` and `Model.fit()`, using `AUTO` or
                `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
                custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Optional name for the instance. Defaults to 'hinge'.
        rv   N)rX   r   r   rw   s      r   r   zHinge.__init__   s      	TY?r   rx   rq   s   @r   r   r     s(    %N ".!9!9!>!>W @ @r   r   zkeras.losses.SquaredHingec                   P     e Zd ZdZej
                  j                  df fd	Z xZS )SquaredHingea_  Computes the squared hinge loss between `y_true` & `y_pred`.

    `loss = square(maximum(1 - y_true * y_pred, 0))`

    `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
    provided we will convert them to -1 or 1.

    Standalone usage:

    >>> y_true = [[0., 1.], [0., 0.]]
    >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> h = tf.keras.losses.SquaredHinge()
    >>> h(y_true, y_pred).numpy()
    1.86

    >>> # Calling with 'sample_weight'.
    >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
    0.73

    >>> # Using 'sum' reduction type.
    >>> h = tf.keras.losses.SquaredHinge(
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> h(y_true, y_pred).numpy()
    3.72

    >>> # Using 'none' reduction type.
    >>> h = tf.keras.losses.SquaredHinge(
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> h(y_true, y_pred).numpy()
    array([1.46, 2.26], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd', loss=tf.keras.losses.SquaredHinge())
    ```
    squared_hingec                 2    t         |   t        ||       y)a'  Initializes `SquaredHinge` instance.

        Args:
            reduction: Type of `tf.keras.losses.Reduction` to apply to
                loss. Default value is `AUTO`. `AUTO` indicates that the
                reduction ption will be determined by the usage context. For
                almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
                used under a `tf.distribute.Strategy`, except via
                `Model.compile()` and `Model.fit()`, using `AUTO` or
                `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
                custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Optional name for the instance. Defaults to 'squared_hinge'.
        rv   N)rX   r   r   rw   s      r   r   zSquaredHinge.__init__<  s    $ 	TYGr   rx   rq   s   @r   r   r     s(    %P %0055OH Hr   r   zkeras.losses.CategoricalHingec                   P     e Zd ZdZej
                  j                  df fd	Z xZS )CategoricalHingea%  Computes the categorical hinge loss between `y_true` & `y_pred`.

    `loss = maximum(neg - pos + 1, 0)`
    where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)`

    Standalone usage:

    >>> y_true = [[0, 1], [0, 0]]
    >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> h = tf.keras.losses.CategoricalHinge()
    >>> h(y_true, y_pred).numpy()
    1.4

    >>> # Calling with 'sample_weight'.
    >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
    0.6

    >>> # Using 'sum' reduction type.
    >>> h = tf.keras.losses.CategoricalHinge(
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> h(y_true, y_pred).numpy()
    2.8

    >>> # Using 'none' reduction type.
    >>> h = tf.keras.losses.CategoricalHinge(
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> h(y_true, y_pred).numpy()
    array([1.2, 1.6], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalHinge())
    ```
    categorical_hingec                 2    t         |   t        ||       y)a?  Initializes `CategoricalHinge` instance.

        Args:
            reduction: Type of `tf.keras.losses.Reduction` to apply to
                loss. Default value is `AUTO`. `AUTO` indicates that the
                reduction ption will be determined by the usage context. For
                almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
                used under a `tf.distribute.Strategy`, except via
                `Model.compile()` and `Model.fit()`, using `AUTO` or
                `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
                custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Optional name for the instance.
                Defaults to 'categorical_hinge'.
        rv   N)rX   r   r   rw   s      r   r   zCategoricalHinge.__init__x  s    & 	*Kr   rx   rq   s   @r   r   r   Q  s)    #L %0055<OL Lr   r   zkeras.losses.Poissonc                   P     e Zd ZdZej
                  j                  df fd	Z xZS )Poissona  Computes the Poisson loss between `y_true` & `y_pred`.

    `loss = y_pred - y_true * log(y_pred)`

    Standalone usage:

    >>> y_true = [[0., 1.], [0., 0.]]
    >>> y_pred = [[1., 1.], [0., 0.]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> p = tf.keras.losses.Poisson()
    >>> p(y_true, y_pred).numpy()
    0.5

    >>> # Calling with 'sample_weight'.
    >>> p(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
    0.4

    >>> # Using 'sum' reduction type.
    >>> p = tf.keras.losses.Poisson(
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> p(y_true, y_pred).numpy()
    0.999

    >>> # Using 'none' reduction type.
    >>> p = tf.keras.losses.Poisson(
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> p(y_true, y_pred).numpy()
    array([0.999, 0.], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd', loss=tf.keras.losses.Poisson())
    ```
    poissonc                 2    t         |   t        ||       y)a  Initializes `Poisson` instance.

        Args:
            reduction: Type of `tf.keras.losses.Reduction` to apply to
                loss. Default value is `AUTO`. `AUTO` indicates that the
                reduction ption will be determined by the usage context. For
                almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
                used under a `tf.distribute.Strategy`, except via
                `Model.compile()` and `Model.fit()`, using `AUTO` or
                `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
                custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Optional name for the instance. Defaults to 'poisson'.
        rv   N)rX   r   r   rw   s      r   r   zPoisson.__init__  s      	tyAr   rx   rq   s   @r   r   r     s(    "H ".!9!9!>!>Y B Br   r   zkeras.losses.LogCoshc                   P     e Zd ZdZej
                  j                  df fd	Z xZS )LogCosha  Computes the logarithm of the hyperbolic cosine of the prediction error.

    `logcosh = log((exp(x) + exp(-x))/2)`,
    where x is the error `y_pred - y_true`.

    Standalone usage:

    >>> y_true = [[0., 1.], [0., 0.]]
    >>> y_pred = [[1., 1.], [0., 0.]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> l = tf.keras.losses.LogCosh()
    >>> l(y_true, y_pred).numpy()
    0.108

    >>> # Calling with 'sample_weight'.
    >>> l(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
    0.087

    >>> # Using 'sum' reduction type.
    >>> l = tf.keras.losses.LogCosh(
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> l(y_true, y_pred).numpy()
    0.217

    >>> # Using 'none' reduction type.
    >>> l = tf.keras.losses.LogCosh(
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> l(y_true, y_pred).numpy()
    array([0.217, 0.], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd', loss=tf.keras.losses.LogCosh())
    ```
    log_coshc                 2    t         |   t        ||       y)a  Initializes `LogCosh` instance.

        Args:
            reduction: Type of `tf.keras.losses.Reduction` to apply to
                loss. Default value is `AUTO`. `AUTO` indicates that the
                reduction ption will be determined by the usage context. For
                almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
                used under a `tf.distribute.Strategy`, except via
                `Model.compile()` and `Model.fit()`, using `AUTO` or
                `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
                custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Optional name for the instance. Defaults to 'log_cosh'.
        rv   N)rX   r   r   rw   s      r   r   zLogCosh.__init__  s    $ 		Br   rx   rq   s   @r   r   r     s(    #L %0055JC Cr   r   zkeras.losses.KLDivergencec                   P     e Zd ZdZej
                  j                  df fd	Z xZS )KLDivergencea>  Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`.

    `loss = y_true * log(y_true / y_pred)`

    See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence

    Standalone usage:

    >>> y_true = [[0, 1], [0, 0]]
    >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> kl = tf.keras.losses.KLDivergence()
    >>> kl(y_true, y_pred).numpy()
    0.458

    >>> # Calling with 'sample_weight'.
    >>> kl(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
    0.366

    >>> # Using 'sum' reduction type.
    >>> kl = tf.keras.losses.KLDivergence(
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> kl(y_true, y_pred).numpy()
    0.916

    >>> # Using 'none' reduction type.
    >>> kl = tf.keras.losses.KLDivergence(
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> kl(y_true, y_pred).numpy()
    array([0.916, -3.08e-06], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd', loss=tf.keras.losses.KLDivergence())
    ```
    kl_divergencec                 2    t         |   t        ||       y)a7  Initializes `KLDivergence` instance.

        Args:
            reduction: Type of `tf.keras.losses.Reduction` to apply to
                loss. Default value is `AUTO`. `AUTO` indicates that the
                reduction ption will be determined by the usage context. For
                almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
                used under a `tf.distribute.Strategy`, except via
                `Model.compile()` and `Model.fit()`, using `AUTO` or
                `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
                custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Optional name for the instance.
                Defaults to 'kl_divergence'.
        rv   N)rX   r   r   rw   s      r   r   zKLDivergence.__init__+  s    & 	TYGr   rx   rq   s   @r   r   r     s(    $N %0055OH Hr   r   zkeras.losses.Huberc                   R     e Zd ZdZdej
                  j                  df fd	Z xZS )Huberas  Computes the Huber loss between `y_true` & `y_pred`.

    For each value x in `error = y_true - y_pred`:

    ```
    loss = 0.5 * x^2                  if |x| <= d
    loss = 0.5 * d^2 + d * (|x| - d)  if |x| > d
    ```
    where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss

    Standalone usage:

    >>> y_true = [[0, 1], [0, 0]]
    >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> h = tf.keras.losses.Huber()
    >>> h(y_true, y_pred).numpy()
    0.155

    >>> # Calling with 'sample_weight'.
    >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
    0.09

    >>> # Using 'sum' reduction type.
    >>> h = tf.keras.losses.Huber(
    ...     reduction=tf.keras.losses.Reduction.SUM)
    >>> h(y_true, y_pred).numpy()
    0.31

    >>> # Using 'none' reduction type.
    >>> h = tf.keras.losses.Huber(
    ...     reduction=tf.keras.losses.Reduction.NONE)
    >>> h(y_true, y_pred).numpy()
    array([0.18, 0.13], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd', loss=tf.keras.losses.Huber())
    ```
          ?
huber_lossc                 4    t         |   t        |||       y)a  Initializes `Huber` instance.

        Args:
            delta: A float, the point where the Huber loss function changes from
                a quadratic to linear.
            reduction: Type of `tf.keras.losses.Reduction` to apply to
                loss. Default value is `AUTO`. `AUTO` indicates that the
                reduction ption will be determined by the usage context. For
                almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
                used under a `tf.distribute.Strategy`, except via
                `Model.compile()` and `Model.fit()`, using `AUTO` or
                `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
                custom training [tutorial](
                https://www.tensorflow.org/tutorials/distribute/custom_training)
                for more details.
            name: Optional name for the instance. Defaults to 'huber_loss'.
        )r   r   deltaN)rX   r   huber)r   r   r   r   r   s       r   r   zHuber.__init__m  s    . 	TYeLr   rx   rq   s   @r   r   r   A  s,    (X **//	M Mr   r   z keras.metrics.mean_squared_errorzkeras.metrics.msezkeras.metrics.MSEzkeras.losses.mean_squared_errorzkeras.losses.msezkeras.losses.MSEc                     t        j                  |      }t        j                  | |j                        } t	        j
                  t         j                  j                  ||       d      S )a)  Computes the mean squared error between labels and predictions.

    After computing the squared distance between the inputs, the mean value over
    the last dimension is returned.

    `loss = mean(square(y_true - y_pred), axis=-1)`

    Standalone usage:

    >>> y_true = np.random.randint(0, 2, size=(2, 3))
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = tf.keras.losses.mean_squared_error(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> assert np.array_equal(
    ...     loss.numpy(), np.mean(np.square(y_true - y_pred), axis=-1))

    Args:
        y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.

    Returns:
        Mean squared error values. shape = `[batch_size, d0, .. dN-1]`.
    r   r   )r'   convert_to_tensorcastdtyper   meanmathsquared_differencer2   r3   s     r   rt   rt     sK    B !!&)FWWVV\\*F<<2266BLLr   c                     d d  fd fd}t        |t        j                        s  ||j                               S |j                  j                         dd }t        |      dkD  r"t        j                  ||j                        }n!t        j                  g |j                        }||fD cg c]  }|j                   }}|r1|D 	cg c]  }	t        |	       }
}	|
d   |
d   dz
  k(  r|d   d	d |d<   t        j                  |t        |      dkD  
      }t        j                  |      }t        j                  |      5  t!        j"                  |||f|      cd	d	d	       S c c}w c c}	w # 1 sw Y   y	xY w)a  Apply a loss function on a per batch basis.

    Args:
        loss_fn: The loss function
        y_true: truth values (RaggedTensor)
        y_pred: predicted values (RaggedTensor)
        y_pred_extra_dim: whether y_pred has an additional dimension compared to
        y_true

    Returns:
        Loss-function result. A dense tensor if the output has a single
        dimension (per-batch loss value); a ragged tensor otherwise.
    c                 D   t        j                  | j                         D cg c]o  }t        j                  t         j                  j                  t        j                  |t        j                                     t        j                  dg            q c}      S c c}w )zReturns true if this RaggedTensor has the same row_lengths across

           all ragged dimensions and thus can be converted to a dense tensor
           without loss of information.

        Args:
            rt: RaggedTensor.
        r   )
r'   
reduce_allnested_row_lengthsequalr   reduce_variancer   r   floatxconstant)rtrow_lenss     r   rt_is_equiv_densez4_ragged_tensor_apply_loss.<locals>.rt_is_equiv_dense  s}     }} !# 5 5 7  GG++'..*:; KK&	

 
	
s   A4Bc                 &    t        d | D              S )Nc              3   t   K   | ]0  }t        |t        j                        r|j                         n| 2 y wrL   )
isinstancer'   RaggedTensor	to_tensor).0r   s     r   	<genexpr>zG_ragged_tensor_apply_loss.<locals>._convert_to_dense.<locals>.<genexpr>  s0      
 )R__=BLLN2E
s   68)tuple)inputss    r   _convert_to_densez4_ragged_tensor_apply_loss.<locals>._convert_to_dense  s     

 
 	
r   c                      |  }|r;t        |t        j                        s!t        j                  j                  |      }|S |s*t        |t        j                        r|j	                         }|S )zAdapt the result to ragged or dense tensor according to the expected

        output type. This is done so that all the return values of the map
        operation have the same type.
        )r   r'   r   from_tensorr   )r   ragged_outputrloss_fns      r   
_call_lossz-_ragged_tensor_apply_loss.<locals>._call_loss  s\     VAr!?++A.A  :a#AAr   c                       \  }}t        |t        j                        r(t        j                   |       fd fd      S    S )Nc                  "                   S rL   r=   )r   r   r   r   s   r   <lambda>z=_ragged_tensor_apply_loss.<locals>._wrapper.<locals>.<lambda>  s    
#4V#<mL r   c                              S rL   r=   )r   r   r   s   r   r   z=_ragged_tensor_apply_loss.<locals>._wrapper.<locals>.<lambda>  s    
6=9 r   )r   r'   r   cond)r   r   r   r3   r   r   r   r   s   ``  r   _wrapperz+_ragged_tensor_apply_loss.<locals>._wrapper  sF    	6fboo.77!&)L9  r      r   r   )shaper   N)r   )elemsr   )r   r'   r   r   r   as_listlenRaggedTensorSpecr   
TensorSpecnested_row_splits	functoolspartialr   assert_splits_matchcontrol_dependenciesr
   map_fn)r   r2   r3   y_pred_extra_dimr   lshapespecr   nested_splits_listslistrdimsr  assertion_listr   r   r   s   `            @@@r   _ragged_tensor_apply_lossr    si   
*
	  fboo.vv//122\\!!#Ab)F
6{Q""v||D}}2V\\::@&9IJ2"..JJ);<U<<8uQx!|#$6q$9#2$>q!xs6{QGF 445GHN		 	 	0 Q$$VFF3C4PQ Q K =Q Qs   =E6E;F  F	c                 $    t        t        | |      S )a  Implements support for handling RaggedTensors.

    Args:
        y_true: RaggedTensor truth values. shape = `[batch_size, d0, .. dN]`.
        y_pred: RaggedTensor predicted values.
            shape = `[batch_size, d0, .. dN]`.

    Returns:
        Mean squared error values. shape = `[batch_size, d0, .. dN-1]`.
        When the number of dimensions of the batch feature vector [d0, .. dN] is
        greater than one the return value is a RaggedTensor. Otherwise, a Dense
        tensor with dimensions [batch_size] is returned.
    )r  rt   r   s     r   _ragged_tensor_mser    s     %%7HHr   z!keras.metrics.mean_absolute_errorzkeras.metrics.maezkeras.metrics.MAEz keras.losses.mean_absolute_errorzkeras.losses.maezkeras.losses.MAEc                     t        j                  |      }t        j                  | |j                        } t	        j
                  t        j                  || z
        d      S )a  Computes the mean absolute error between labels and predictions.

    `loss = mean(abs(y_true - y_pred), axis=-1)`

    Standalone usage:

    >>> y_true = np.random.randint(0, 2, size=(2, 3))
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> assert np.array_equal(
    ...     loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1))

    Args:
        y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.

    Returns:
        Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`.
    r   r   )r'   r   r   r   r   r   absr   s     r   r{   r{     sF    < !!&)FWWVV\\*F<<v/b99r   c                 $    t        t        | |      S )z-RaggedTensor adapter for mean_absolute_error.)r  r{   r   s     r   _ragged_tensor_maer  ;  s     %%8&&IIr   z,keras.metrics.mean_absolute_percentage_errorzkeras.metrics.mapezkeras.metrics.MAPEz+keras.losses.mean_absolute_percentage_errorzkeras.losses.mapezkeras.losses.MAPEc                 H   t        j                  |      }t        j                  | |j                        } t        j                  | |z
  t        j                  t        j                  |       t        j                               z        }dt        j                  |d      z  S )aD  Computes the mean absolute percentage error between `y_true` & `y_pred`.

    `loss = 100 * mean(abs((y_true - y_pred) / y_true), axis=-1)`

    Standalone usage:

    >>> y_true = np.random.random(size=(2, 3))
    >>> y_true = np.maximum(y_true, 1e-7)  # Prevent division by zero
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = tf.keras.losses.mean_absolute_percentage_error(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> assert np.array_equal(
    ...     loss.numpy(),
    ...     100. * np.mean(np.abs((y_true - y_pred) / y_true), axis=-1))

    Args:
        y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.

    Returns:
        Mean absolute percentage error values. shape = `[batch_size, d0, ..
        dN-1]`.
    g      Y@r   r   )	r'   r   r   r   r  r   maximumepsilonr   )r2   r3   diffs      r   r   r   A  su    B !!&)FWWVV\\*F66	&GOOBFF6NGOO<MNND 7<<2...r   c                 $    t        t        | |      S )zSupport RaggedTensors.)r  r   r   s     r   _ragged_tensor_maper  j       %& r   z,keras.metrics.mean_squared_logarithmic_errorzkeras.metrics.mslezkeras.metrics.MSLEz+keras.losses.mean_squared_logarithmic_errorzkeras.losses.mslezkeras.losses.MSLEc                    t        j                  |      }t        j                  | |j                        } t         j                  j                  t        j                  |t        j                               dz         }t         j                  j                  t        j                  | t        j                               dz         }t        j                  t         j                  j                  ||      d      S )av  Computes the mean squared logarithmic error between `y_true` & `y_pred`.

    `loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)`

    Standalone usage:

    >>> y_true = np.random.randint(0, 2, size=(2, 3))
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = tf.keras.losses.mean_squared_logarithmic_error(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> y_true = np.maximum(y_true, 1e-7)
    >>> y_pred = np.maximum(y_pred, 1e-7)
    >>> assert np.allclose(
    ...     loss.numpy(),
    ...     np.mean(
    ...         np.square(np.log(y_true + 1.) - np.log(y_pred + 1.)), axis=-1))

    Args:
        y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.

    Returns:
        Mean squared logarithmic error values. shape = `[batch_size, d0, ..
        dN-1]`.
    r   r   r   )r'   r   r   r   r   logr   r  r  r   r   )r2   r3   	first_log
second_logs       r   r   r   r  s    F !!&)FWWVV\\*FGOOFGOO4EFLMIW__VW__5FG#MNJ<<
""9j9 r   c                 $    t        t        | |      S )z.Implements support for handling RaggedTensors.)r  r   r   s     r   _ragged_tensor_msler    r  r   c                     t        j                   d      }t        j                   d      }t        j                  t        j                  ||            } fd}t         j                  j
                  j                  || fd      }|S )z!Converts binary labels into -1/1.r   r   c                      d z  dz
  S )Nr   r   r=   r2   s   r   _convert_binary_labelsz5_maybe_convert_labels.<locals>._convert_binary_labels  s    V|c!!r   c                       S rL   r=   r"  s   r   r   z'_maybe_convert_labels.<locals>.<lambda>  s    6 r   )r'   r   r   
logical_orr*   
smart_cond)r2   	are_zerosare_ones	is_binaryr#  updated_y_trues   `     r   _maybe_convert_labelsr+    sm    #Ixx"HbmmIx@AI" __//::)>N r   zkeras.metrics.squared_hingezkeras.losses.squared_hingec           	      
   t        j                  |      }t        j                  | |j                        } t	        |       } t        j                  t        j                  t        j                  d| |z  z
  d            d      S )an  Computes the squared hinge loss between `y_true` & `y_pred`.

    `loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)`

    Standalone usage:

    >>> y_true = np.random.choice([-1, 1], size=(2, 3))
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = tf.keras.losses.squared_hinge(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> assert np.array_equal(
    ...     loss.numpy(),
    ...     np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1))

    Args:
        y_true: The ground truth values. `y_true` values are expected to be -1
            or 1. If binary (0 or 1) labels are provided we will convert them to
            -1 or 1. shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.

    Returns:
        Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
    r   r   r   r   )	r'   r   r   r   r+  r   r   squarer  r   s     r   r   r     se    4 !!&)FWWVV\\*F"6*F<<
		"**S6F?2C89 r   zkeras.metrics.hingezkeras.losses.hingec                     t        j                  |      }t        j                  | |j                        } t	        |       } t        j                  t        j                  d| |z  z
  d      d      S )aC  Computes the hinge loss between `y_true` & `y_pred`.

    `loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)`

    Standalone usage:

    >>> y_true = np.random.choice([-1, 1], size=(2, 3))
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = tf.keras.losses.hinge(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> assert np.array_equal(
    ...     loss.numpy(),
    ...     np.mean(np.maximum(1. - y_true * y_pred, 0.), axis=-1))

    Args:
        y_true: The ground truth values. `y_true` values are expected to be -1
            or 1. If binary (0 or 1) labels are provided we will convert them to
            -1 or 1. shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.

    Returns:
        Hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
    r   r   r   r   )r'   r   r   r   r+  r   r   r  r   s     r   r   r     sW    4 !!&)FWWVV\\*F"6*F<<

3&#8#>RHHr   zkeras.losses.categorical_hingec                 R   t        j                  |      }t        j                  | |j                        } t        j                  | |z  d      }t        j
                  d| z
  |z  d      }t        j                  d|j                        }t        j                  ||z
  dz   |      S )a  Computes the categorical hinge loss between `y_true` & `y_pred`.

    `loss = maximum(neg - pos + 1, 0)`
    where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)`

    Standalone usage:

    >>> y_true = np.random.randint(0, 3, size=(2,))
    >>> y_true = tf.keras.utils.to_categorical(y_true, num_classes=3)
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = tf.keras.losses.categorical_hinge(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> pos = np.sum(y_true * y_pred, axis=-1)
    >>> neg = np.amax((1. - y_true) * y_pred, axis=-1)
    >>> assert np.array_equal(loss.numpy(), np.maximum(0., neg - pos + 1.))

    Args:
        y_true: The ground truth values. `y_true` values are expected to be
        either `{-1, +1}` or `{0, 1}` (i.e. a one-hot-encoded tensor).
        y_pred: The predicted values.

    Returns:
        Categorical hinge loss values.
    r   r   r   r   )r'   r   r   r   
reduce_sum
reduce_maxr  )r2   r3   posnegzeros        r   r   r     s    6 !!&)FWWVV\\*F
--b
1C
--v/b
9C773%D::cCi#ot,,r   zkeras.losses.huberc                 R   t        j                  |t        j                               }t        j                  | t        j                               } t        j                  |t        j                               }t        j                  ||       }t        j
                  |      }t        j                  d|j                        }t        j                  t        j                  ||k  |t        j                  |      z  ||z  |t        j                  |      z  z
        d      S )a!  Computes Huber loss value.

    For each value x in `error = y_true - y_pred`:

    ```
    loss = 0.5 * x^2                  if |x| <= d
    loss = d * |x| - 0.5 * d^2        if |x| > d
    ```
    where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss

    Args:
        y_true: tensor of true targets.
        y_pred: tensor of predicted targets.
        delta: A float, the point where the Huber loss function changes from a
            quadratic to linear.

    Returns:
        Tensor with one scalar loss entry per sample.
    r         ?r   r   )r'   r   r   r   subtractr  r   r   r   wherer-  )r2   r3   r   error	abs_errorhalfs         r   r   r     s    , WWV7>>#34FWWV7>>#34FGGE!12EKK'EuI9??;D<<
299U##Iryy'7 77	

  r   zkeras.losses.log_coshzkeras.losses.logcoshzkeras.metrics.log_coshzkeras.metrics.logcoshc                     t        j                  |      }t        j                  | |j                        } d }t	        j
                   ||| z
        d      S )a  Logarithm of the hyperbolic cosine of the prediction error.

    `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
    to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
    like the mean squared error, but will not be so strongly affected by the
    occasional wildly incorrect prediction.

    Standalone usage:

    >>> y_true = np.random.random(size=(2, 3))
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = tf.keras.losses.logcosh(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> x = y_pred - y_true
    >>> assert np.allclose(
    ...     loss.numpy(),
    ...     np.mean(x + np.log(np.exp(-2. * x) + 1.) - tf.math.log(2.),
    ...             axis=-1),
    ...     atol=1e-5)

    Args:
        y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.

    Returns:
        Logcosh error values. shape = `[batch_size, d0, .. dN-1]`.
    c                     | t         j                  j                  d| z        z   t        j                  t         j                  j	                  d      | j
                        z
  S )Ng       r   )r'   r   softplusr   r  r   )xs    r   _logcoshzlog_cosh.<locals>._logcoshg  sC      **RWWRWW[[5Eqww-OO	
r   r   r   )r'   r   r   r   r   r   )r2   r3   rA  s      r   r   r   A  sJ    F !!&)FWWVV\\*F

 <<&1;;r   z&keras.metrics.categorical_crossentropyz%keras.losses.categorical_crossentropyc                      t        t              rt        d dt                     t	        j
                        t	        j                   j                         t	        j
                  j                        j                  d   dk(  r*t        j                  dj                   dt        d	        fd
}t        j                  j                  j                  | fd       t        j                   |      S )a  Computes the categorical crossentropy loss.

    Standalone usage:

    >>> y_true = [[0, 1, 0], [0, 0, 1]]
    >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
    >>> loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> loss.numpy()
    array([0.0513, 2.303], dtype=float32)

    Args:
        y_true: Tensor of one-hot true targets.
        y_pred: Tensor of predicted targets.
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability distribution.
        label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
            example, if `0.1`, use `0.1 / num_classes` for non-target labels
            and `0.9 + 0.1 / num_classes` for target labels.
        axis: Defaults to -1. The dimension along which the entropy is
            computed.

    Returns:
        Categorical crossentropy loss value.
    -`axis` must be of type `int`. Received: axis=	 of type r6  r   r   zIn loss categorical_crossentropy, expected y_pred.shape to be (batch_size, num_classes) with num_classes > 1. Received: y_pred.shape=B. Consider using 'binary_crossentropy' if you only have 2 classes.   
stacklevelc                      t        j                  t        j                           j                        } dz
  z  | z  z   S )Nr   r'   r   r   r   )num_classesr   r   r3   r2   s    r   _smooth_labelsz0categorical_crossentropy.<locals>._smooth_labels  sD    ggbhhv.t4fllC./k)
 	
r   c                       S rL   r=   r"  s   r   r   z*categorical_crossentropy.<locals>.<lambda>       r   )r   r   )r   boolrK   typer'   r   r   r   r   warningswarnSyntaxWarningr*   r&  r   r   r2   r3   r   r   r   rL  s   `` `` r   r   r   o  s    B $"V9T$ZL:
 	
 !!&)FWWVV\\*F**?&,,OO||B1<<BLL> JOO 	

 __''22F ++Kd r   c                 V    t        j                  t        |||      }t        || |      S )a  Implements support for handling RaggedTensors.

    Args:
        y_true: Tensor of one-hot true targets.
        y_pred: Tensor of predicted targets.
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability distribution.
        label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
            example, if `0.1`, use `0.1 / num_classes` for non-target labels
            and `0.9 + 0.1 / num_classes` for target labels.
        axis: The axis along which to compute crossentropy (the features axis).
            Defaults to -1.

    Returns:
        Categorical crossentropy loss value.

    Expected shape: (batch, sequence_len, n_classes) with sequence_len
    being variable per batch.
    Return shape: (batch, sequence_len).

    When used by CategoricalCrossentropy() with the default reduction
    (SUM_OVER_BATCH_SIZE), the reduction averages the loss over the
    number of elements independent of the batch. E.g. if the RaggedTensor
    has 2 batches with [2, 1] values respectively the resulting loss is
    the sum of the individual loss values divided by 3.
    r   r   r   )r   r  r   r  r2   r3   r   r   r   rY   s         r   '_ragged_tensor_categorical_crossentropyrX    s1    < 
		 '	
B %R88r   z,keras.metrics.categorical_focal_crossentropyz+keras.losses.categorical_focal_crossentropyc                      t        |t              rt        d| dt        |             t	        j
                        t	        j                   j                         t	        j
                  j                        j                  d   dk(  r*t        j                  dj                   dt        d	        fd
}t        j                  j                  j                  | fd       t        j                   ||||      S )aE  Computes the categorical focal crossentropy loss.

    Standalone usage:
    >>> y_true = [[0, 1, 0], [0, 0, 1]]
    >>> y_pred = [[0.05, 0.9, 0.05], [0.1, 0.85, 0.05]]
    >>> loss = tf.keras.losses.categorical_focal_crossentropy(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> loss.numpy()
    array([2.63401289e-04, 6.75912094e-01], dtype=float32)

    Args:
        y_true: Tensor of one-hot true targets.
        y_pred: Tensor of predicted targets.
        alpha: A weight balancing factor for all classes, default is `0.25` as
            mentioned in the reference. It can be a list of floats or a scalar.
            In the multi-class case, alpha may be set by inverse class
            frequency by using `compute_class_weight` from `sklearn.utils`.
        gamma: A focusing parameter, default is `2.0` as mentioned in the
            reference. It helps to gradually reduce the importance given to
            simple examples in a smooth manner. When `gamma` = 0, there is
            no focal effect on the categorical crossentropy.
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability
            distribution.
        label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
            example, if `0.1`, use `0.1 / num_classes` for non-target labels
            and `0.9 + 0.1 / num_classes` for target labels.
        axis: Defaults to -1. The dimension along which the entropy is
            computed.

    Returns:
        Categorical focal crossentropy loss value.
    rC  rD  r6  r   r   zIn loss categorical_focal_crossentropy, expected y_pred.shape to be (batch_size, num_classes) with num_classes > 1. Received: y_pred.shape=rE  rF  rG  c                      t        j                  t        j                        d   j                        } dz
  z  | z  z   S )Nr   r   rJ  )rK  r   r3   r2   s    r   rL  z6categorical_focal_crossentropy.<locals>._smooth_labels	  sD    ggbhhv.r2FLLA./k)
 	
r   c                       S rL   r=   r"  s   r   r   z0categorical_focal_crossentropy.<locals>.<lambda>"	  rN  r   )targetoutputr   r   r   r   )r   rO  rK   rP  r'   r   r   r   r   rQ  rR  rS  r*   r&  r   r   )r2   r3   r   r   r   r   r   rL  s   ``   `  r   r   r     s   ^ $"V9T$ZL:
 	
 !!&)FWWVV\\*F**?&,,OO||B1<<BLL> JOO 	

 __''22F 11 r   c                 Z    t        j                  t        |||||      }t        || |      S )a  Implements support for handling RaggedTensors.

    Expected shape: (batch, sequence_len, n_classes) with sequence_len
    being variable per batch.
    Return shape: (batch, sequence_len).
    When used by CategoricalFocalCrossentropy() with the default reduction
    (SUM_OVER_BATCH_SIZE), the reduction averages the loss over the
    number of elements independent of the batch. E.g. if the RaggedTensor
    has 2 batches with [2, 1] values respectively the resulting loss is
    the sum of the individual loss values divided by 3.

    Args:
        alpha: A weight balancing factor for all classes, default is `0.25` as
            mentioned in the reference. It can be a list of floats or a scalar.
            In the multi-class case, alpha may be set by inverse class
            frequency by using `compute_class_weight` from `sklearn.utils`.
        gamma: A focusing parameter, default is `2.0` as mentioned in the
            reference. It helps to gradually reduce the importance given to
            simple examples in a smooth manner. When `gamma` = 0, there is
            no focal effect on the categorical crossentropy.
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability distribution.
        label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
            example, if `0.1`, use `0.1 / num_classes` for non-target labels
            and `0.9 + 0.1 / num_classes` for target labels.
        axis: Defaults to -1. The dimension along which the entropy is
            computed.

    Returns:
      Categorical focal crossentropy loss value.
    )r   r   r   r   r   )r   r  r   r  )r2   r3   r   r   r   r   r   rY   s           r   -_ragged_tensor_categorical_focal_crossentropyr_  /	  s8    R 
		&'
B %R88r   z-keras.metrics.sparse_categorical_crossentropyz,keras.losses.sparse_categorical_crossentropyc                 6    t        j                  | ||||      S )a  Computes the sparse categorical crossentropy loss.

    Standalone usage:

    >>> y_true = [1, 2]
    >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
    >>> loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> loss.numpy()
    array([0.0513, 2.303], dtype=float32)

    >>> y_true = [[[ 0,  2],
    ...            [-1, -1]],
    ...           [[ 0,  2],
    ...            [-1, -1]]]
    >>> y_pred = [[[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]],
    ...             [[0.2, 0.5, 0.3], [0.0, 1.0, 0.0]]],
    ...           [[[1.0, 0.0, 0.0], [0.0, 0.5, 0.5]],
    ...            [[0.2, 0.5, 0.3], [0.0, 1.0, 0.0]]]]
    >>> loss = tf.keras.losses.sparse_categorical_crossentropy(
    ...   y_true, y_pred, ignore_class=-1)
    >>> loss.numpy()
    array([[[2.3841855e-07, 2.3841855e-07],
            [0.0000000e+00, 0.0000000e+00]],
           [[2.3841855e-07, 6.9314730e-01],
            [0.0000000e+00, 0.0000000e+00]]], dtype=float32)

    Args:
        y_true: Ground truth values.
        y_pred: The predicted values.
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability distribution.
        axis: Defaults to -1. The dimension along which the entropy is
            computed.
        ignore_class: Optional integer. The ID of a class to be ignored during
            loss computation. This is useful, for example, in segmentation
            problems featuring a "void" class (commonly -1 or 255) in
            segmentation maps. By default (`ignore_class=None`), all classes are
            considered.

    Returns:
        Sparse categorical crossentropy loss value.
    r   r   r   )r   r   )r2   r3   r   r   r   s        r   r   r   c	  s'    f 22! r   c                 Z    t        j                  t        |||      }t        || |d      S )a%  Implements support for handling RaggedTensors.

    Expected y_pred shape: (batch, sequence_len, n_classes) with sequence_len
    being variable per batch.
    Return shape: (batch, sequence_len).

    When used by SparseCategoricalCrossentropy() with the default reduction
    (SUM_OVER_BATCH_SIZE), the reduction averages the loss over the
    number of elements independent of the batch. E.g. if the RaggedTensor
    has 2 batches with [2, 1] values respectively, the resulting loss is
    the sum of the individual loss values divided by 3.
    ra  T)r  )r   r  r   r  )r2   r3   r   r   r   rY   s         r   ._ragged_tensor_sparse_categorical_crossentropyrc  	  s3      
		'!	
B %R$OOr   z!keras.metrics.binary_crossentropyz keras.losses.binary_crossentropyc                 t    t        j                  |      }t        j                   |j                         t        j                  |j                         fd}t         j                  j
                  j                  | fd       t        j                  t        j                   ||      |      S )a  Computes the binary crossentropy loss.

    Standalone usage:

    >>> y_true = [[0, 1], [0, 0]]
    >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
    >>> loss = tf.keras.losses.binary_crossentropy(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> loss.numpy()
    array([0.916 , 0.714], dtype=float32)

    Args:
        y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability distribution.
        label_smoothing: Float in [0, 1]. If > `0` then smooth the labels by
            squeezing them towards 0.5 That is, using
            `1. - 0.5 * label_smoothing` for the target class and
            `0.5 * label_smoothing` for the non-target class.
        axis: The axis along which the mean is computed. Defaults to -1.

    Returns:
        Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`.
    r6  c                       d z
  z  d z  z   S Nr   r7  r=   r   r2   s   r   rL  z+binary_crossentropy.<locals>._smooth_labels	      ./#2GGGr   c                       S rL   r=   r"  s   r   r   z%binary_crossentropy.<locals>.<lambda>	  rN  r   )r   r   )	r'   r   r   r   r*   r&  r   r   r   rT  s   `  `  r   r   r   	  s    @ !!&)FWWVV\\*F**?&,,OOH __''22F <<##FFL r   c                 V    t        j                  t        |||      }t        || |      S )a  Implements support for handling RaggedTensors.

    Args:
        y_true: Tensor of one-hot true targets.
        y_pred: Tensor of predicted targets.
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability distribution.
        label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
            example, if `0.1`, use `0.1 / num_classes` for non-target labels
            and `0.9 + 0.1 / num_classes` for target labels.
        axis: Axis along which to compute crossentropy.

    Returns:
        Binary crossentropy loss value.

    Expected shape: (batch, sequence_len) with sequence_len being variable
    per batch.
    Return shape: (batch,); returns the per batch mean of the loss values.

    When used by BinaryCrossentropy() with the default reduction
    (SUM_OVER_BATCH_SIZE), the reduction averages the per batch losses over
    the number of batches.
    rV  )r   r  r   r  rW  s         r   "_ragged_tensor_binary_crossentropyrk  	  s1    6 
		'	
B %R88r   z'keras.metrics.binary_focal_crossentropyz&keras.losses.binary_focal_crossentropyc           
      z    t        j                  |      }t        j                   |j                         t        j                  |j                         fd}t         j                  j
                  j                  | fd       t        j                  t        j                   |||||      |      S )a  Computes the binary focal crossentropy loss.

    According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
    helps to apply a focal factor to down-weight easy examples and focus more on
    hard examples. By default, the focal tensor is computed as follows:

    `focal_factor = (1 - output)**gamma` for class 1
    `focal_factor = output**gamma` for class 0
    where `gamma` is a focusing parameter. When `gamma` = 0, there is no focal
    effect on the binary crossentropy loss.

    If `apply_class_balancing == True`, this function also takes into account a
    weight balancing factor for the binary classes 0 and 1 as follows:

    `weight = alpha` for class 1 (`target == 1`)
    `weight = 1 - alpha` for class 0
    where `alpha` is a float in the range of `[0, 1]`.

    Standalone usage:

    >>> y_true = [[0, 1], [0, 0]]
    >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
    >>> loss = tf.keras.losses.binary_focal_crossentropy(y_true, y_pred,
    ...                                                  gamma=2)
    >>> assert loss.shape == (2,)
    >>> loss.numpy()
    array([0.330, 0.206], dtype=float32)

    Args:
        y_true: Ground truth values, of shape `(batch_size, d0, .. dN)`.
        y_pred: The predicted values, of shape `(batch_size, d0, .. dN)`.
        apply_class_balancing: A bool, whether to apply weight balancing on the
            binary classes 0 and 1.
        alpha: A weight balancing factor for class 1, default is `0.25` as
            mentioned in the reference. The weight for class 0 is `1.0 - alpha`.
        gamma: A focusing parameter, default is `2.0` as mentioned in the
            reference.
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability distribution.
        label_smoothing: Float in `[0, 1]`. If higher than 0 then smooth the
            labels by squeezing them towards `0.5`, i.e., using `1. - 0.5 *
            label_smoothing` for the target class and `0.5 * label_smoothing`
            for the non-target class.
        axis: The axis along which the mean is computed. Defaults to `-1`.

    Returns:
        Binary focal crossentropy loss value.
            shape = `[batch_size, d0, .. dN-1]`.
    r6  c                       d z
  z  d z  z   S rf  r=   rg  s   r   rL  z1binary_focal_crossentropy.<locals>._smooth_labelsQ
  rh  r   c                       S rL   r=   r"  s   r   r   z+binary_focal_crossentropy.<locals>.<lambda>U
  rN  r   )r\  r]  r   r   r   r   r   )	r'   r   r   r   r*   r&  r   r   r   )	r2   r3   r   r   r   r   r   r   rL  s	   `     `  r   r   r   
  s    @ !!&)FWWVV\\*F**?&,,OOH __''22F <<))"7#	
 
 
r   c           	      \    t        j                  t        ||||||      }t        || |      S )a  Implements support for handling RaggedTensors.

    Expected shape: `(batch, sequence_len)` with sequence_len being variable per
    batch.
    Return shape: `(batch,)`; returns the per batch mean of the loss values.

    When used by BinaryFocalCrossentropy() with the default reduction
    (SUM_OVER_BATCH_SIZE), the reduction averages the per batch losses over
    the number of batches.

    Args:
        y_true: Tensor of one-hot true targets.
        y_pred: Tensor of predicted targets.
        apply_class_balancing: A bool, whether to apply weight balancing on the
            binary classes 0 and 1.
        alpha: A weight balancing factor for class 1, default is `0.25` as
            mentioned in the reference [Lin et al., 2018](
            https://arxiv.org/pdf/1708.02002.pdf). The weight for class 0 is
            `1.0 - alpha`.
        gamma: A focusing parameter, default is `2.0` as mentioned in the
            reference.
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability distribution.
        label_smoothing: Float in `[0, 1]`. If > `0` then smooth the labels. For
            example, if `0.1`, use `0.1 / num_classes` for non-target labels
            and `0.9 + 0.1 / num_classes` for target labels.
        axis: Axis along which to compute crossentropy.

    Returns:
        Binary focal crossentropy loss value.
    )r   r   r   r   r   r   )r   r  r   r  )	r2   r3   r   r   r   r   r   r   rY   s	            r   (_ragged_tensor_binary_focal_crossentropyrp  e
  s;    T 
		!3'
B %R88r   zkeras.metrics.kl_divergencez)keras.metrics.kullback_leibler_divergencezkeras.metrics.kldzkeras.metrics.KLDzkeras.losses.kl_divergencez(keras.losses.kullback_leibler_divergencezkeras.losses.kldzkeras.losses.KLDc                    t        j                  |      }t        j                  | |j                        } t	        j
                  | t	        j                         d      } t	        j
                  |t	        j                         d      }t        j                  | t         j                  j                  | |z        z  d      S )az  Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`.

    `loss = y_true * log(y_true / y_pred)`

    See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence

    Standalone usage:

    >>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64)
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = tf.keras.losses.kullback_leibler_divergence(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> y_true = tf.keras.backend.clip(y_true, 1e-7, 1)
    >>> y_pred = tf.keras.backend.clip(y_pred, 1e-7, 1)
    >>> assert np.array_equal(
    ...     loss.numpy(), np.sum(y_true * np.log(y_true / y_pred), axis=-1))

    Args:
        y_true: Tensor of true targets.
        y_pred: Tensor of predicted targets.

    Returns:
        A `Tensor` with loss.

    Raises:
        TypeError: If `y_true` cannot be cast to the `y_pred.dtype`.
    r   r   r   )
r'   r   r   r   r   clipr  r0  r   r  r   s     r   r   r   
  s    N !!&)FWWVV\\*F\\&'//"3Q7F\\&'//"3Q7F=="''++fvo">>RHHr   zkeras.metrics.poissonzkeras.losses.poissonc           	      
   t        j                  |      }t        j                  | |j                        } t	        j
                  || t         j                  j                  |t	        j                         z         z  z
  d      S )a_  Computes the Poisson loss between y_true and y_pred.

    The Poisson loss is the mean of the elements of the `Tensor`
    `y_pred - y_true * log(y_pred)`.

    Standalone usage:

    >>> y_true = np.random.randint(0, 2, size=(2, 3))
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = tf.keras.losses.poisson(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> y_pred = y_pred + 1e-7
    >>> assert np.allclose(
    ...     loss.numpy(), np.mean(y_pred - y_true * np.log(y_pred), axis=-1),
    ...     atol=1e-5)

    Args:
        y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.

    Returns:
        Poisson loss value. shape = `[batch_size, d0, .. dN-1]`.

    Raises:
        InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes.
    r   r   )	r'   r   r   r   r   r   r   r  r  r   s     r   r   r   
  sa    : !!&)FWWVV\\*F<<"''++fw/@&@AAA r   keras.losses.cosine_similarity)zkeras.metrics.cosine_proximityzkeras.metrics.cosinezkeras.losses.cosine_proximityzkeras.losses.cosinert  c                     t         j                  j                  | |      } t         j                  j                  ||      }t        j                  | |z  |       S )aQ  Computes the cosine similarity between labels and predictions.

    Note that it is a number between -1 and 1. When it is a negative number
    between -1 and 0, 0 indicates orthogonality and values closer to -1
    indicate greater similarity. The values closer to 1 indicate greater
    dissimilarity. This makes it usable as a loss function in a setting
    where you try to maximize the proximity between predictions and
    targets. If either `y_true` or `y_pred` is a zero vector, cosine
    similarity will be 0 regardless of the proximity between predictions
    and targets.

    `loss = -sum(l2_norm(y_true) * l2_norm(y_pred))`

    Standalone usage:

    >>> y_true = [[0., 1.], [1., 1.], [1., 1.]]
    >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]
    >>> loss = tf.keras.losses.cosine_similarity(y_true, y_pred, axis=1)
    >>> loss.numpy()
    array([-0., -0.999, 0.999], dtype=float32)

    Args:
        y_true: Tensor of true targets.
        y_pred: Tensor of predicted targets.
        axis: Axis along which to determine similarity.

    Returns:
        Cosine similarity tensor.
    r   )r'   linalgl2_normalizer0  )r2   r3   r   s      r   r   r   
  sO    R YY##F#6FYY##F#6FMM&6/555r   c                     t        | t              xsK t        | t              xr | j                  t        k(  xs$ t        | d      xr | j                  dk(  xs | dk(  }|S )Nr    r   )r   r   rV   rY   r   hasattrr    )lossresults     r   is_categorical_crossentropyr|  '  sn    401 		0t01 433		0 D*% <!;;		0 ..  Mr   zkeras.losses.serializec                     | yt        | t              s"t        j                  dt	        |        d       |rt        j                  |       S t        |       S )a#  Serializes loss function or `Loss` instance.

    Args:
        loss: A TF-Keras `Loss` instance or a loss function.
        use_legacy_format: Boolean, whether to use the legacy serialization
            format. Defaults to `False`.

    Returns:
        Loss configuration dictionary.
    NzzThe `keras.losses.serialize()` API should only be used for objects of type `keras.losses.Loss`. Found an instance of type z+, which may lead to improper serialization.)r   r   rQ  rR  rP  legacy_serializationr   )rz  use_legacy_formats     r   	serializer  7  s[     |dD!NDzlEG	

 #::4@@!$''r   zkeras.losses.deserializec                 v    |r!t        j                  | t               |d      S t        | t               |d      S )a  Deserializes a serialized loss class/function instance.

    Args:
        name: Loss configuration.
        custom_objects: Optional dictionary mapping names (strings) to custom
            objects (classes and functions) to be considered during
            deserialization.
        use_legacy_format: Boolean, whether to use the legacy serialization
            format. Defaults to `False`.

    Returns:
        A TF-Keras `Loss` instance or a loss function.
    zloss function)module_objectscustom_objectsprintable_module_name)r~  r   globals)r   r  r  s      r   deserializer  P  sE     #<<"9)"1	
 	
 $y%-	 r   zkeras.losses.getc                     | yt        | t              rt        |       } d| v}t        | |      S t        | t              rt        |       S t	        |       r| S t        d|        )a  Retrieves a TF-Keras loss as a `function`/`Loss` class instance.

    The `identifier` may be the string name of a loss function or `Loss` class.

    >>> loss = tf.keras.losses.get("categorical_crossentropy")
    >>> type(loss)
    <class 'function'>
    >>> loss = tf.keras.losses.get("CategoricalCrossentropy")
    >>> type(loss)
    <class '...keras.losses.CategoricalCrossentropy'>

    You can also specify `config` of the loss to this function by passing dict
    containing `class_name` and `config` as an identifier. Also note that the
    `class_name` must map to a `Loss` class

    >>> identifier = {"class_name": "CategoricalCrossentropy",
    ...               "config": {"from_logits": True}}
    >>> loss = tf.keras.losses.get(identifier)
    >>> type(loss)
    <class '...keras.losses.CategoricalCrossentropy'>

    Args:
        identifier: A loss identifier. One of None or string name of a loss
            function/class or loss configuration dictionary or a loss function
            or a loss class instance.

    Returns:
        A TF-Keras loss as a `function`/ `Loss` class instance.

    Raises:
        ValueError: If `identifier` cannot be interpreted.
    Nmodule)r  z.Could not interpret loss function identifier: )r   strr  rg   callablerK   )
identifierr  s     r   rn   rn   n  st    D *c"_
$J6:9JKK*d#:&&


8E r   int32)F)r   )Fr   r   )r   r   Fr   r   )Fr   N)Fr   r   Fr   r   )r   )NF)hrO   rQ   r   rQ  tensorflow.compat.v2compatv2r'   tf_keras.srcr   tf_keras.src.savingr   tf_keras.src.saving.legacyr   r~  %tf_keras.src.saving.serialization_libr   r   rf   r   r	   tensorflow.python.ops.raggedr
   r   tensorflow.python.utilr    tensorflow.python.util.tf_exportr   tensorflow.tools.docsr   r   rV   rs   rz   r~   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r*   add_dispatch_supportrt   r  dispatch_for_typesr   r  r{   r  r   r  r   r  r+  r   r   r   r   r   r   rX  r   r_  r   rc  r   rk  r   rp  r   r   r   bceBCEmseMSEmaeMAEmapeMAPEmsleMSLEkldKLDkullback_leibler_divergencelogcoshr   r|  r  r  rn   rT   r7   sparse_softmax_cross_entropyLABEL_DTYPES_FOR_LOSSESr=   r   r   <module>r     s	        ! !   * L J H + ' 8 4 + 9 . !"r r #rj ="EM$ M FM` -.8M* 8M /8Mv ./:N+ :N 0:Nz 89B
"5 B
 :B
J 89=
"5 =
 :=
@ /0i', i' 1i'X 45jF1 jF 6jFZ 45U
1 U
 6U
p 9:HF#6 HF ;HFV :;U
$7 U
 <U
p -.I
* I
 /I
X "#8@ 8@ $8@v )*:H& :H +:Hz -.9L* 9L /9Lx $%5B! 5B &5Bp $%8C! 8C &8Cv )*:H& :H +:Hz "#BM BM $BMJ &% ..M /M:VQr /AI BI" '& ..: /:4 0"//BJ CJ
 21 ../ //@ ;R__M N 21 ..  / F ;R__M N  +-IJ.. / K@ #%9:..I / ;I< ./..- / 0-B "r*..! / +!H 	 ..$< /$<N ,+ ..AC; /	
;| 5rGAC#9 H#9L 21 .. 
	N /	
Nb ;R__M 
	09 N09f 32 ..=A4 /	
4n <booN=AP OP0 ')K ..AC* /*Z 0"//BAC 9 C 9F -, ..  

	P /	
Pf 6H  

	29 I29j !/ .	 .. I /	 IF %'=>.. / ?D $	 .. 6 /	 6J   c c c, ,t, ,t*7 7 7c'

  &'( ((0 () *:  !- "-b IILL44g#W r   