
    ~WhED                        d Z ddlmc mZ ddlmZ ddlmZ	 ddl
mZ ddlmZ ddlmZ  ed           G d	 d
ej                              Z ed           G d dej                              Z ed           G d dej                              Z ed           G d dej                              ZdZeej        _          ed           G d dej                              Z ed           G d dej                              Zeej        _         d Z ed          ej        j        j        d(d                        Z ed          ej        j        j        d                          Z ed!          ej        j        j        d"                         Z ed#          ej        j        j        d)d%                        Z  ed&          ej        j        j        d)d'                        Z!dS )*zAccuracy metrics.    N)backend)utils)base_metric)metrics_utils)keras_exportzkeras.metrics.Accuracyc                   >     e Zd ZdZej        d fd	            Z xZS )Accuracya9  Calculates how often predictions equal labels.

    This metric creates two local variables, `total` and `count` that are used
    to compute the frequency with which `y_pred` matches `y_true`. This
    frequency is ultimately returned as `binary accuracy`: an idempotent
    operation that simply divides `total` by `count`.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.Accuracy()
    >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]])
    >>> m.result().numpy()
    0.75

    >>> m.reset_state()
    >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]],
    ...                sample_weight=[1, 1, 0, 0])
    >>> m.result().numpy()
    0.5

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='mse',
                  metrics=[tf.keras.metrics.Accuracy()])
    ```
    accuracyNc                 Z    t                                          t          ||           d S Ndtype)super__init__r
   selfnamer   	__class__s      d/var/www/html/movieo_spanner_bot/venv/lib/python3.11/site-packages/keras/metrics/accuracy_metrics.pyr   zAccuracy.__init__B   s(    4u55555    )r
   N__name__
__module____qualname____doc__dtensor_utilsinject_meshr   __classcell__r   s   @r   r	   r	      sX        " "H 6 6 6 6 6 6 6 6 6 6r   r	   zkeras.metrics.BinaryAccuracyc                   >     e Zd ZdZej        d fd	            Z xZS )BinaryAccuracya  Calculates how often predictions match binary labels.

    This metric creates two local variables, `total` and `count` that are used
    to compute the frequency with which `y_pred` matches `y_true`. This
    frequency is ultimately returned as `binary accuracy`: an idempotent
    operation that simply divides `total` by `count`.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.
      threshold: (Optional) Float representing the threshold for deciding
      whether prediction values are 1 or 0.

    Standalone usage:

    >>> m = tf.keras.metrics.BinaryAccuracy()
    >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]])
    >>> m.result().numpy()
    0.75

    >>> m.reset_state()
    >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]],
    ...                sample_weight=[1, 0, 0, 1])
    >>> m.result().numpy()
    0.5

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='mse',
                  metrics=[tf.keras.metrics.BinaryAccuracy()])
    ```
    binary_accuracyN      ?c                 f    t                                          t          j        |||           d S )N)r   	threshold)r   r   r   binary_matches)r   r   r   r%   r   s       r   r   zBinaryAccuracy.__init__o   s:    ($ey 	 	
 	
 	
 	
 	
r   )r"   Nr#   r   r   s   @r   r!   r!   G   sX        $ $L 
 
 
 
 
 
 
 
 
 
r   r!   z!keras.metrics.CategoricalAccuracyc                   >     e Zd ZdZej        d fd	            Z xZS )CategoricalAccuracya  Calculates how often predictions match one-hot labels.

    You can provide logits of classes as `y_pred`, since argmax of
    logits and probabilities are same.

    This metric creates two local variables, `total` and `count` that are used
    to compute the frequency with which `y_pred` matches `y_true`. This
    frequency is ultimately returned as `categorical accuracy`: an idempotent
    operation that simply divides `total` by `count`.

    `y_pred` and `y_true` should be passed in as vectors of probabilities,
    rather than as labels. If necessary, use `tf.one_hot` to expand `y_true` as
    a vector.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.CategoricalAccuracy()
    >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
    ...                 [0.05, 0.95, 0]])
    >>> m.result().numpy()
    0.5

    >>> m.reset_state()
    >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
    ...                 [0.05, 0.95, 0]],
    ...                sample_weight=[0.7, 0.3])
    >>> m.result().numpy()
    0.3

    Usage with `compile()` API:

    ```python
    model.compile(
      optimizer='sgd',
      loss='mse',
      metrics=[tf.keras.metrics.CategoricalAccuracy()])
    ```
    categorical_accuracyNc                 R    t                                          d ||           d S )Nc                 j    t          j        t          j                            | d          |          S Naxisr   sparse_categorical_matchestfmathargmaxy_truey_preds     r   <lambda>z.CategoricalAccuracy.__init__.<locals>.<lambda>   s*    =#KvB//$ $ r   r   r   r   r   s      r   r   zCategoricalAccuracy.__init__   s@       	 	
 	
 	
 	
 	
r   )r)   Nr   r   s   @r   r(   r(   v   sX        , ,\ 
 
 
 
 
 
 
 
 
 
r   r(   z'keras.metrics.SparseCategoricalAccuracyc                   >     e Zd ZdZej        d fd	            Z xZS )SparseCategoricalAccuracya7  Calculates how often predictions match integer labels.

    ```python
    acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1))
    ```

    You can provide logits of classes as `y_pred`, since argmax of
    logits and probabilities are same.

    This metric creates two local variables, `total` and `count` that are used
    to compute the frequency with which `y_pred` matches `y_true`. This
    frequency is ultimately returned as `sparse categorical accuracy`: an
    idempotent operation that simply divides `total` by `count`.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.SparseCategoricalAccuracy()
    >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
    >>> m.result().numpy()
    0.5

    >>> m.reset_state()
    >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]],
    ...                sample_weight=[0.7, 0.3])
    >>> m.result().numpy()
    0.3

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
    ```
    sparse_categorical_accuracyNc                 d    t                                          t          j        ||           d S r   )r   r   r   r1   r   s      r   r   z"SparseCategoricalAccuracy.__init__   s8    4d% 	 	
 	
 	
 	
 	
r   )r<   Nr   r   s   @r   r;   r;      sX        * *X 
 
 
 
 
 
 
 
 
 
r   r;   a  Accumulates metric statistics.

For sparse categorical metrics, the shapes of `y_true` and `y_pred` are
different.

Args:
  y_true: Ground truth label values. shape = `[batch_size, d0, .. dN-1]` or
    shape = `[batch_size, d0, .. dN-1, 1]`.
  y_pred: The predicted probability values. shape = `[batch_size, d0, .. dN]`.
  sample_weight: Optional `sample_weight` acts as a
    coefficient for the metric. If a scalar is provided, then the metric is
    simply scaled by the given value. If `sample_weight` is a tensor of size
    `[batch_size]`, then the metric for each sample of the batch is rescaled
    by the corresponding element in the `sample_weight` vector. If the shape
    of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted
    to this shape), then each metric element of `y_pred` is scaled by the
    corresponding value of `sample_weight`. (Note on `dN-1`: all metric
    functions reduce by 1 dimension, usually the last axis (-1)).

Returns:
  Update op.
z%keras.metrics.TopKCategoricalAccuracyc                   >     e Zd ZdZej        d fd	            Z xZS )TopKCategoricalAccuracya  Computes how often targets are in the top `K` predictions.

    Args:
      k: (Optional) Number of top elements to look at for computing accuracy.
        Defaults to 5.
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.TopKCategoricalAccuracy(k=1)
    >>> m.update_state([[0, 0, 1], [0, 1, 0]],
    ...                [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
    >>> m.result().numpy()
    0.5

    >>> m.reset_state()
    >>> m.update_state([[0, 0, 1], [0, 1, 0]],
    ...                [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
    ...                sample_weight=[0.7, 0.3])
    >>> m.result().numpy()
    0.3

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='mse',
                  metrics=[tf.keras.metrics.TopKCategoricalAccuracy()])
    ```
       top_k_categorical_accuracyNc                 T    t                                          d |||           d S )Nc                 l    t          j        t          j                            | d          ||          S r,   r    sparse_top_k_categorical_matchesr2   r3   r4   )ytypks      r   r8   z2TopKCategoricalAccuracy.__init__.<locals>.<lambda>'  s,    mLr++R  r   r   rH   r9   r   rH   r   r   r   s       r   r   z TopKCategoricalAccuracy.__init__$  sC       	 	
 	
 	
 	
 	
r   )r@   rA   Nr   r   s   @r   r?   r?     sX         @ 
 
 
 
 
 
 
 
 
 
r   r?   z+keras.metrics.SparseTopKCategoricalAccuracyc                   @     e Zd ZdZej        	 d fd	            Z xZS )SparseTopKCategoricalAccuracyaN  Computes how often integer targets are in the top `K` predictions.

    Args:
      k: (Optional) Number of top elements to look at for computing accuracy.
        Defaults to 5.
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1)
    >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
    >>> m.result().numpy()
    0.5

    >>> m.reset_state()
    >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
    ...                sample_weight=[0.7, 0.3])
    >>> m.result().numpy()
    0.3

    Usage with `compile()` API:

    ```python
    model.compile(
      optimizer='sgd',
      loss='mse',
      metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()])
    ```
    r@   !sparse_top_k_categorical_accuracyNc                 f    t                                          t          j        |||           d S )NrI   )r   r   r   rE   rJ   s       r   r   z&SparseTopKCategoricalAccuracy.__init__Q  s?     	:	 	 	
 	
 	
 	
 	
r   )r@   rM   Nr   r   s   @r   rL   rL   0  sZ         > CG
 
 
 
 
 
 
 
 
 
r   rL   c                 >   t          j        || g          \  \  }} }| j                            |j                   | j        |j        k    rt          j        || j                  }t          j        t          j        | |          t          j	                              S )N)
r   ,ragged_assert_compatible_and_get_flat_valuesshapeassert_is_compatible_withr   r2   castequalr   floatx)r6   r7   _s      r   r
   r
   b  s     E	 	  L**6<888|v|##..728FF++W^-=-=>>>r   zkeras.metrics.binary_accuracyr#   c                 V    t          j        t          j        | ||          d          S )a  Calculates how often predictions match binary labels.

    Standalone usage:
    >>> y_true = [[1], [1], [0], [0]]
    >>> y_pred = [[1], [1], [0], [0]]
    >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)
    >>> assert m.shape == (4,)
    >>> m.numpy()
    array([1., 1., 1., 1.], dtype=float32)

    Args:
      y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
      y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
      threshold: (Optional) Float representing the threshold for deciding
        whether prediction values are 1 or 0.

    Returns:
      Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`
    r-   r.   )r2   reduce_meanr   r&   )r6   r7   r%   s      r   r"   r"   o  s1    6 >$VVY??b   r   z"keras.metrics.categorical_accuracyc                 j    t          j        t          j                            | d          |          S )a;  Calculates how often predictions match one-hot labels.

    Standalone usage:
    >>> y_true = [[0, 0, 1], [0, 1, 0]]
    >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
    >>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred)
    >>> assert m.shape == (2,)
    >>> m.numpy()
    array([0., 1.], dtype=float32)

    You can provide logits of classes as `y_pred`, since argmax of
    logits and probabilities are same.

    Args:
      y_true: One-hot ground truth values.
      y_pred: The prediction values.

    Returns:
      Categorical accuracy values.
    r-   r.   r0   r5   s     r   r)   r)     s0    6 3
vB''  r   z)keras.metrics.sparse_categorical_accuracyc                     t          j        | |          }|j        j        dk    r'|j        d         dk    rt	          j        |dg          }|S )a9  Calculates how often predictions match integer labels.

    Standalone usage:
    >>> y_true = [2, 1]
    >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
    >>> m = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
    >>> assert m.shape == (2,)
    >>> m.numpy()
    array([0., 1.], dtype=float32)

    You can provide logits of classes as `y_pred`, since argmax of
    logits and probabilities are same.

    Args:
      y_true: Integer ground truth values.
      y_pred: The prediction values.

    Returns:
      Sparse categorical accuracy values.
       r-   )r   r1   rQ   ndimsr2   squeeze)r6   r7   matchess      r   r<   r<     sQ    8 6vvFFG }Q7=#4#9#9*Wrd++Nr   z(keras.metrics.top_k_categorical_accuracyr@   c                 l    t          j        t          j                            | d          ||          S )aE  Computes how often targets are in the top `K` predictions.

    Standalone usage:
    >>> y_true = [[0, 0, 1], [0, 1, 0]]
    >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
    >>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
    >>> assert m.shape == (2,)
    >>> m.numpy()
    array([1., 1.], dtype=float32)

    Args:
      y_true: The ground truth values.
      y_pred: The prediction values.
      k: (Optional) Number of top elements to look at for computing accuracy.
        Defaults to 5.

    Returns:
      Top K categorical accuracy value.
    r-   r.   rD   r6   r7   rH   s      r   rA   rA     s2    4 9
vB''  r   z/keras.metrics.sparse_top_k_categorical_accuracyc                 .    t          j        | ||          S )a]  Computes how often integer targets are in the top `K` predictions.

    Standalone usage:
    >>> y_true = [2, 1]
    >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
    >>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy(
    ...     y_true, y_pred, k=3)
    >>> assert m.shape == (2,)
    >>> m.numpy()
    array([1., 1.], dtype=float32)

    Args:
      y_true: tensor of true targets.
      y_pred: tensor of predicted targets.
      k: (Optional) Number of top elements to look at for computing accuracy.
        Defaults to 5.

    Returns:
      Sparse top K categorical accuracy value.
    )r   rE   r`   s      r   rM   rM     s    8 9&&!LLLr   )r#   )r@   )"r   tensorflow.compat.v2compatv2r2   kerasr   keras.dtensorr   r   keras.metricsr   keras.utilsr    tensorflow.python.util.tf_exportr   MeanMetricWrapperr	   r!   r(   r;   *_SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRINGupdate_stater?   rL   r
   __internal__dispatchadd_dispatch_supportr"   r)   r<   rA   rM    r   r   <module>rq      s     ! ! ! ! ! ! ! ! !       0 0 0 0 0 0 % % % % % % % % % % % % : 9 9 9 9 9 &'''6 '6 '6 '6 '6{, '6 '6 (''6T ,--+
 +
 +
 +
 +
[2 +
 +
 .-+
\ 1227
 7
 7
 7
 7
+7 7
 7
 327
t 7881
 1
 1
 1
 1
 = 1
 1
 981
h. *0 /  & .
 566*
 *
 *
 *
 *
k; *
 *
 76*
Z ;<<)
 )
 )
 )
 )
K$A )
 )
 =<)
Z /  * 2

? 
? 
? -...   /. /.< 233.  /. 43< 9::.    /. ;: F 899.   /. :9: ?@@.M M M /. A@M M Mr   