
    ~WhA                     &   d Z ddlZddlZddlZddlmZ ddlmZ ddlm	Z	 ddl
mZ dadZdZd	Zd
ZdZdZdZdZ ed          dd            Ze                    ede          e_          ed          dd            Zd Zd Z	 ddZd Zd ZdS )z@Utilities for ImageNet data preprocessing & prediction decoding.    N)activations)backend)
data_utils)keras_exportzUhttps://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.jsonat  
  Preprocesses a tensor or Numpy array encoding a batch of images.

  Usage example with `applications.MobileNet`:

  ```python
  i = tf.keras.layers.Input([None, None, 3], dtype = tf.uint8)
  x = tf.cast(i, tf.float32)
  x = tf.keras.applications.mobilenet.preprocess_input(x)
  core = tf.keras.applications.MobileNet()
  x = core(x)
  model = tf.keras.Model(inputs=[i], outputs=[x])

  image = tf.image.decode_png(tf.io.read_file('file.png'))
  result = model(image)
  ```

  Args:
    x: A floating point `numpy.array` or a `tf.Tensor`, 3D or 4D with 3 color
      channels, with values in the range [0, 255].
      The preprocessed data are written over the input data
      if the data types are compatible. To avoid this
      behaviour, `numpy.copy(x)` can be used.
    data_format: Optional data format of the image tensor/array. Defaults to
      None, in which case the global setting
      `tf.keras.backend.image_data_format()` is used (unless you changed it,
      it defaults to "channels_last").{mode}

  Returns:
      Preprocessed `numpy.array` or a `tf.Tensor` with type `float32`.
      {ret}

  Raises:
      {error}
  a  
    mode: One of "caffe", "tf" or "torch". Defaults to "caffe".
      - caffe: will convert the images from RGB to BGR,
          then will zero-center each color channel with
          respect to the ImageNet dataset,
          without scaling.
      - tf: will scale pixels between -1 and 1,
          sample-wise.
      - torch: will scale pixels between 0 and 1 and then
          will normalize each channel with respect to the
          ImageNet dataset.
  zE
    ValueError: In case of unknown `mode` or `data_format` argument.z;
    ValueError: In case of unknown `data_format` argument.zH
      The inputs pixel values are scaled between -1 and 1, sample-wise.z
      The input pixels values are scaled between 0 and 1 and each channel is
      normalized with respect to the ImageNet dataset.z
      The images are converted from RGB to BGR, then each color channel is
      zero-centered with respect to the ImageNet dataset, without scaling.z2keras.applications.imagenet_utils.preprocess_inputcaffec                    |dvrt          d|           |t          j                    }n|dvrt          d|           t          | t          j                  rt          | ||          S t          | ||          S )z@Preprocesses a tensor or Numpy array encoding a batch of images.>   tfr   torchzDExpected mode to be one of `caffe`, `tf` or `torch`. Received: mode=N>   channels_lastchannels_firstz]Expected data_format to be one of `channels_first` or `channels_last`. Received: data_format=)data_formatmode)
ValueErrorr   image_data_format
isinstancenpndarray_preprocess_numpy_input_preprocess_symbolic_input)xr   r   s      g/var/www/html/movieo_spanner_bot/venv/lib/python3.11/site-packages/keras/applications/imagenet_utils.pypreprocess_inputr   g   s     +++%"% %
 
 	

 /11	?	?	?D6AD D
 
 	

 !RZ   Q&qkMMMM)!4PPPP     )r   reterrorz4keras.applications.imagenet_utils.decode_predictions   c                 0   t          | j                  dk    s| j        d         dk    r$t          dt          | j                  z             t          Xt          j        dt          dd	          }t          |          5 }t          j
        |          addd           n# 1 swxY w Y   g }| D ]c                                | d         ddd
         }fd|D             }|                    d d           |                    |           d|S )a  Decodes the prediction of an ImageNet model.

    Args:
      preds: Numpy array encoding a batch of predictions.
      top: Integer, how many top-guesses to return. Defaults to 5.

    Returns:
      A list of lists of top class prediction tuples
      `(class_name, class_description, score)`.
      One list of tuples per sample in batch input.

    Raises:
      ValueError: In case of invalid shape of the `pred` array
        (must be 2D).
          i  zx`decode_predictions` expects a batch of predictions (i.e. a 2D array of shape (samples, 1000)). Found array with shape: Nzimagenet_class_index.jsonmodels c2c37ea517e94d9795004a39431a14cb)cache_subdir	file_hashc                 r    g | ]3}t          t          t          |                             |         fz   4S  )tupleCLASS_INDEXstr).0ipreds     r   
<listcomp>z&decode_predictions.<locals>.<listcomp>   s7    OOOa%CFF+,,Qz9OOOr   c                     | d         S )Nr   r'   )r   s    r   <lambda>z$decode_predictions.<locals>.<lambda>   s
    !A$ r   T)keyreverse)lenshaper   r*   r)   r   get_fileCLASS_INDEX_PATHopenjsonloadargsortsortappend)predstopfpathfresultstop_indicesresultr-   s          @r   decode_predictionsrD      so   & 5;1A$ 6 6' *-U[)9)9:
 
 	
 #'!8	
 
 
 %[[ 	'A)A,,K	' 	' 	' 	' 	' 	' 	' 	' 	' 	' 	' 	' 	' 	' 	'G  llnncTUU+DDbD1OOOO;OOO555vNs   B##B'*B'c                    t          | j        j        t          j                  s(|                     t          j                    d          } |dk    r| dz  } | dz  } | S |dk    r| dz  } g d}g d	}nC|d
k    r*| j        dk    r| ddddf         } n| ddddddf         } n| ddddf         } g d}d}|d
k    r| j        dk    r| dddddfxx         |d         z  cc<   | dddddfxx         |d         z  cc<   | dddddfxx         |d         z  cc<   |W| dddddfxx         |d         z  cc<   | dddddfxx         |d         z  cc<   | dddddfxx         |d         z  cc<   nI| dddddddfxx         |d         z  cc<   | dddddddfxx         |d         z  cc<   | dddddddfxx         |d         z  cc<   |`| dddddddfxx         |d         z  cc<   | dddddddfxx         |d         z  cc<   | dddddddfxx         |d         z  cc<   n| dxx         |d         z  cc<   | dxx         |d         z  cc<   | dxx         |d         z  cc<   |B| dxx         |d         z  cc<   | dxx         |d         z  cc<   | dxx         |d         z  cc<   | S )a  Preprocesses a Numpy array encoding a batch of images.

    Args:
      x: Input array, 3D or 4D.
      data_format: Data format of the image array.
      mode: One of "caffe", "tf" or "torch".
        - caffe: will convert the images from RGB to BGR,
            then will zero-center each color channel with
            respect to the ImageNet dataset,
            without scaling.
        - tf: will scale pixels between -1 and 1,
            sample-wise.
        - torch: will scale pixels between 0 and 1 and then
            will normalize each channel with respect to the
            ImageNet dataset.

    Returns:
        Preprocessed Numpy array.
    F)copyr	        _@      ?r
        o@g
ףp=
?gv/?gCl?gZd;O?gy&1?g?r      Nr%   .gjtY@g`"1]@gQ^@r   r    r   ).r   ).r    ).r   )	
issubclassdtypetyper   floatingastyper   floatxndim)r   r   r   meanstds        r   r   r      s   ( aglBK00 3HHW^%%EH22t||	U
	S		U
$$$###***v{{dddCiLaaa2slO #ttt)A))) &&&6Q;;aAAAgJJJ$q'!JJJaAAAgJJJ$q'!JJJaAAAgJJJ$q'!JJJ!QQQ'


c!f$


!QQQ'


c!f$


!QQQ'


c!f$


aaaAAAqqqjMMMT!W$MMMaaaAAAqqqjMMMT!W$MMMaaaAAAqqqjMMMT!W$MMM!!!Q111*Q'!!!Q111*Q'!!!Q111*Q'	&			T!W				&			T!W				&			T!W			?fIIIQIIIfIIIQIIIfIIIQIIIHr   c           	         |dk    r| dz  } | dz  } | S |dk    r| dz  } g d}g d}nP|dk    r7t          j        |           d	k    r| d
d
ddf         } n| d
d
d
d
ddf         } n| dd
d
df         } g d}d
}t          j        t          j        |                     }t          j        |           t          j        |          k    r=t          j        | t          j        |t          j        |                     |          } nt          j        | ||          } |Zt          j        t          j        |          t          j        |                     }|dk    rt          j        |d          }| |z  } | S )a  Preprocesses a tensor encoding a batch of images.

    Args:
      x: Input tensor, 3D or 4D.
      data_format: Data format of the image tensor.
      mode: One of "caffe", "tf" or "torch".
        - caffe: will convert the images from RGB to BGR,
            then will zero-center each color channel with
            respect to the ImageNet dataset,
            without scaling.
        - tf: will scale pixels between -1 and 1,
            sample-wise.
        - torch: will scale pixels between 0 and 1 and then
            will normalize each channel with respect to the
            ImageNet dataset.

    Returns:
        Preprocessed tensor.
    r	   rG   rH   r
   rI   rJ   rK   r   rL   Nr%   .rM   )r   )rO   )r%   r    r    )	r   rT   constantr   arrayrO   bias_addcastreshape)r   r   r   rU   rV   mean_tensor
std_tensors          r   r   r      s   ( t||	U
	S		U
$$$###***|A!##dddCiLaaa2slO #ttt)A)))"BHTNN?33K }Q7=5555LgmA&6&677#
 
 
 Q[99
%bhsmm7=;K;KLLL
*** Z@@J	ZHr   c                 V   |dk    r| rt          |           dk    r|dk    rE| d         dvr/t          j        dt          | d                   z   dz   d	           | d         ||f}nV| d
         dvr/t          j        dt          | d
                   z   dz   d	           ||| d
         f}n|dk    rd||f}n||df}|dk    r!|r| | |k    rt	          d| d|            |S | r|dk    r| t          |           dk    rt	          d          | d         dk    r|dk    rt	          d|  d          | d         | d         |k     s| d         $| d         |k     rt	          d| d| d|            n| t          |           dk    rt	          d          | d
         dk    r|dk    rt	          d|  d          | d         | d         |k     s| d         $| d         |k     rt	          d| d| d|            n|r|} n|dk    rd} nd} |rd| v rt	          d|            | S )a  Internal utility to compute/validate a model's input shape.

    Args:
      input_shape: Either None (will return the default network input shape),
        or a user-provided shape to be validated.
      default_size: Default input width/height for the model.
      min_size: Minimum input width/height accepted by the model.
      data_format: Image data format to use.
      require_flatten: Whether the model is expected to
        be linked to a classifier via a Flatten layer.
      weights: One of `None` (random initialization)
        or 'imagenet' (pre-training on ImageNet).
        If weights='imagenet' input channels must be equal to 3.

    Returns:
      An integer shape tuple (may include None entries).

    Raises:
      ValueError: In case of invalid argument values.
    imagenetrL   r   r   >   r    rL   z]This model usually expects 1 or 3 input channels. However, it was passed an input_shape with z input channels.r   )
stacklevelr%   NzXWhen setting `include_top=True` and loading `imagenet` weights, `input_shape` should be z.  Received: input_shape=z0`input_shape` must be a tuple of three integers.z6The input must have 3 channels; Received `input_shape=`r    zInput size must be at least r   z; Received: input_shape=)rL   NN)NNrL   z[If `include_top` is True, you should specify a static `input_shape`. Received: input_shape=)r3   warningswarnr*   r   )input_shapedefault_sizemin_sizer   require_flattenweightsdefault_shapes          r   obtain_input_shaperk   6  s   8 *[1A1AQ1F1F***1~V++B+a.))* ))  !    )^\<HMM2f,,B+b/**+ ))  !    *<RIMM***l;MM)<;M*"m++ ;/<; ; .9; ;    ..***&{##q(($J   q>Q&&7j+@+@$7(37 7 7  
  N.;q>H3L3L!!n0[^h5N5N$5x 5 5$5 5'25 5   &{##q(($J   r?a''Gz,A,A$7(37 7 7  
  N.;q>H3L3L!!n0[^h5N5N$5#5 5&.5 5'25 5    	.'KK...-- ;7)47 7  
 r   c                    t          j                    dk    rdnd}t          j        |           ||dz            }t          |t                    r||f}|d         d}nd|d         dz  z
  d|d         dz  z
  f}|d         dz  |d         dz  f}|d         |d         z
  |d         f|d         |d         z
  |d         ffS )zReturns a tuple for zero-padding for 2D convolution with downsampling.

    Args:
      inputs: Input tensor.
      kernel_size: An integer or tuple/list of 2 integers.

    Returns:
      A tuple.
    r   r   r    r   N)r    r    )r   r   	int_shaper   int)inputskernel_sizeimg_dim
input_sizeadjustcorrects         r   correct_padru     s     ,..2BBBaaG"6**7gk+BCJ+s## 1"K0!}jma''Z]Q->)>?1~"KNa$78G	fQi	,	fQi	, r   c                     |dS t          j        |           } | t          j        d          t          j        d          hvrt          d|            dS )a@  validates that the classifer_activation is compatible with the weights.

    Args:
      classifier_activation: str or callable activation function
      weights: The pretrained weights to load.

    Raises:
      ValueError: if an activation other than `None` or `softmax` are used with
        pretrained weights.
    NsoftmaxzOnly `None` and `softmax` activations are allowed for the `classifier_activation` argument when using pretrained weights, with `include_top=True`; Received: classifier_activation=)r   getr   )classifier_activationri   s     r   validate_activationrz     s}     'O,ABB	""%   = &;= =
 
 	
	 r   )Nr   )r   )N)__doc__r8   rc   numpyr   kerasr   r   keras.utilsr    tensorflow.python.util.tf_exportr   r)   r6   PREPROCESS_INPUT_DOCPREPROCESS_INPUT_MODE_DOC"PREPROCESS_INPUT_DEFAULT_ERROR_DOCPREPROCESS_INPUT_ERROR_DOCPREPROCESS_INPUT_RET_DOC_TFPREPROCESS_INPUT_RET_DOC_TORCHPREPROCESS_INPUT_RET_DOC_CAFFEr   formatrD   r   r   rk   ru   rz   r'   r   r   <module>r      s   G F                   " " " " " " : 9 9 9 9 9% " H &H "> K ": "N 
 BCCQ Q Q DCQ, 066	"

, 7     DEE( ( ( FE(VF F FR9 9 9D v v v vr  2
 
 
 
 
r   