o
    eip                     @   s  d dl mZ d dlmZ d dlmZmZ d dlmZm	Z	m
Z
 d dlZd dlmZ ddlmZmZmZ dd	lmZmZmZmZmZ dd
lmZmZmZmZmZmZm Z m!Z!m"Z"m#Z#m$Z$ ddl%m&Z&m'Z' ddl(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z. ddl/m0Z0m1Z1 e- rddlm2Z2 e+ rd dl3Z3e, rd dl4m5  m6  m7Z8 ddlm9Z9 ndZ9e.:e;Z<edddddddddddddej=fde>dB de?dB de>dB de?e@e? B dB de?e@e? B dB de>dB dedB de>dB dedB de	d deAe)B dB defdd ZBd4d!d"d#eCdB d$d"fd%d&ZDd'ee d$e@e fd(d)ZEd*e@d" d$eFeCd+f fd,d-ZGd.e
ejHd"f d/eCd$e@e
ejHd"f  fd0d1ZIe*G d2d3 d3eZJdS )5    )Iterable)deepcopy)	lru_cachepartial)AnyOptionalUnionN)validate_typed_dict   )BaseImageProcessorBatchFeatureget_size_dict)convert_to_rgbget_resize_output_image_sizeget_size_with_aspect_ratiogroup_images_by_shapereorder_images)ChannelDimension
ImageInput	ImageTypeSizeDictget_image_size#get_image_size_for_max_height_widthget_image_typeinfer_channel_dimension_formatmake_flat_list_of_imagesvalidate_kwargsvalidate_preprocess_arguments)ImagesKwargsUnpack)
TensorTypeauto_docstringis_torch_availableis_torchvision_availableis_vision_availablelogging)is_rocm_platformis_torchdynamo_compiling)PILImageResampling)pil_torch_interpolation_mapping
   maxsize
do_rescalerescale_factordo_normalize
image_mean	image_stddo_center_crop	crop_size	do_resizesizeinterpolationtvF.InterpolationModereturn_tensorsdata_formatc                 C   sJ   t | |||||||||	d
 |
dur|
dkrtd|tjkr#tddS )z
    Checks validity of typically used arguments in an `ImageProcessorFast` `preprocess` method.
    Raises `ValueError` if arguments incompatibility is caught.
    )
r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   Nptz6Only returning PyTorch tensors is currently supported.z6Only channel first data format is currently supported.)r   
ValueErrorr   FIRST)r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r8   r9    r=   f/home/ubuntu/transcripts/venv/lib/python3.10/site-packages/transformers/image_processing_utils_fast.py"validate_fast_preprocess_argumentsI   s"   
r?   tensortorch.Tensoraxisreturnc                 C   s6   |du r|   S z| j |dW S  ty   |  Y S w )zF
    Squeezes a tensor, but only if the axis specified has dim 1.
    N)rB   )squeezer;   )r@   rB   r=   r=   r>   safe_squeezep   s   rE   valuesc                 C   s   dd t |  D S )zO
    Return the maximum value across all indices of an iterable of values.
    c                 S   s   g | ]}t |qS r=   )max).0values_ir=   r=   r>   
<listcomp>       z&max_across_indices.<locals>.<listcomp>)zip)rF   r=   r=   r>   max_across_indices}   s   rM   images.c                 C   s    t dd | D \}}}||fS )zH
    Get the maximum height and width across all images in a batch.
    c                 S   s   g | ]}|j qS r=   )shaperH   imgr=   r=   r>   rJ      s    z(get_max_height_width.<locals>.<listcomp>)rM   )rN   _
max_height	max_widthr=   r=   r>   get_max_height_width   s   rU   image
patch_sizec                 C   sj   g }t | tjd\}}td||D ]!}td||D ]}| dd||| ||| f }|| qq|S )a6  
    Divides an image into patches of a specified size.

    Args:
        image (`Union[np.array, "torch.Tensor"]`):
            The input image.
        patch_size (`int`):
            The size of each patch.
    Returns:
        list: A list of Union[np.array, "torch.Tensor"] representing the patches.
    )channel_dimr   N)r   r   r<   rangeappend)rV   rW   patchesheightwidthijpatchr=   r=   r>   divide_to_patches   s   "ra   c                        s  e Zd ZdZdZdZdZdZdZdZ	dZ
dZdZdZdZdZdZdZdZejZdZdZdgZdZeZdZdee f fddZed	e fd
dZ!						d\de"d de#de$dB de%dB de de dB de dB d	e&e'd df fddZ(		d]ddde#de)d de d	df
dd Z*e+		d]ddd!e'e$e$f de)d de d	df
d"d#Z,ddd$e-d	dfd%d&Z.ddd'e-e/e- B d(e-e/e- B d	dfd)d*Z0e1d+d,						d^d-e dB d.e-e"e- B dB d/e-e"e- B dB d0e dB d1e-dB d2e)d3 d	e'fd4d5Z2ddd0e d1e-d-e d.e-e"e- B d/e-e"e- B d	dfd6d7Z3ddde#d	dfd8d9Z4de5d	e5fd:d;Z6de7fd<d=Z8	>d_de5d?e$d	e5fd@dAZ9			d`de5dBe dB dCe%eB dB d2e)d3 d	df
dDdEZ:				>dade5dBe dB dCe%eB dB d2e)d3 d?e$d	e"d fdFdGZ;							dbde#dB dHe#dB de#dB dIe dB d.e-e"e- B dB d/e-e"e- B dB dJedB d	e7fdKdLZ<												dcd0e dB d1e-dB d-e dB d.e-e'e- B dB d/e-e'e- B dB dMe dB de#dB dNe dB dHe#dB de)d dOe%e=B dB dJedB fdPdQZ>e?de5dee d	e@fdRdSZAddTde5dBe dCed2e&e%d3f dB dee d	e@fdUdVZBde"d dMe de#de)d dNe dHe#d0e d1e-d-e d.e-e"e- B dB d/e-e"e- B dB dWe dB de#dB de dB dOe%e=B dB d	e@f dXdYZC fdZd[ZD  ZES )dBaseImageProcessorFasta3  
    Base class for fast image processors using PyTorch and TorchVision for image transformations.

    This class provides a complete implementation for standard image preprocessing operations (resize, crop, rescale,
    normalize) with GPU support and batch processing optimizations. Most image processors can be implemented by simply
    setting class attributes; only processors requiring custom logic need to override methods.

    Basic Implementation
    --------------------

    For processors that only need standard operations (resize, center crop, rescale, normalize), define class
    attributes:

        class MyImageProcessorFast(BaseImageProcessorFast):
            resample = PILImageResampling.BILINEAR
            image_mean = IMAGENET_DEFAULT_MEAN
            image_std = IMAGENET_DEFAULT_STD
            size = {"height": 224, "width": 224}
            do_resize = True
            do_rescale = True
            do_normalize = True

    Custom Processing
    -----------------

    Override `_preprocess` (most common):
        For custom image processing logic, override `_preprocess`. This method receives a list of torch tensors with
        channel dimension first and should return a BatchFeature. Use `group_images_by_shape` and `reorder_images` for
        efficient batch processing:

            def _preprocess(
                self,
                images: list[torch.Tensor],
                do_resize: bool,
                size: SizeDict,
                # ... other parameters
                **kwargs,
            ) -> BatchFeature:
                # Group images by shape for batched operations
                grouped_images, indices = group_images_by_shape(images)
                processed_groups = {}

                for shape, stacked_images in grouped_images.items():
                    if do_resize:
                        stacked_images = self.resize(stacked_images, size)
                    # Custom processing here
                    processed_groups[shape] = stacked_images

                processed_images = reorder_images(processed_groups, indices)
                return BatchFeature(data={"pixel_values": torch.stack(processed_images)})

    Override `_preprocess_image_like_inputs` (for additional inputs):
        For processors handling multiple input types (e.g., images + segmentation maps), override this method:

            def _preprocess_image_like_inputs(
                self,
                images: ImageInput,
                segmentation_maps: Optional[ImageInput] = None,
                do_convert_rgb: bool,
                input_data_format: ChannelDimension,
                device: Optional[torch.device] = None,
                **kwargs,
            ) -> BatchFeature:
                images = self._prepare_image_like_inputs(images, do_convert_rgb, input_data_format, device)
                batch_feature = self._preprocess(images, **kwargs)

                if segmentation_maps is not None:
                    # Process segmentation maps separately
                    maps = self._prepare_image_like_inputs(segmentation_maps, ...)
                    batch_feature["labels"] = self._preprocess(maps, ...)

                return batch_feature

    Override `_further_process_kwargs` (for custom kwargs formatting):
        To format custom kwargs before validation:

            def _further_process_kwargs(self, custom_param=None, **kwargs):
                kwargs = super()._further_process_kwargs(**kwargs)
                if custom_param is not None:
                    kwargs["custom_param"] = self._format_custom_param(custom_param)
                return kwargs

    Override `_validate_preprocess_kwargs` (for custom validation):
        To add custom validation logic:

            def _validate_preprocess_kwargs(self, custom_param=None, **kwargs):
                super()._validate_preprocess_kwargs(**kwargs)
                if custom_param is not None and custom_param < 0:
                    raise ValueError("custom_param must be non-negative")

    Override `_prepare_images_structure` (for nested inputs):
        By default, nested image lists are flattened. Override to preserve structure:

            def _prepare_images_structure(self, images, expected_ndims=3):
                # Custom logic to handle nested structure
                return images  # Return as-is or with custom processing

    Custom Parameters
    -----------------

    To add parameters beyond `ImagesKwargs`, create a custom kwargs class and set it as `valid_kwargs`:

        class MyImageProcessorKwargs(ImagesKwargs):
            custom_param: Optional[int] = None
            another_param: Optional[bool] = None

        class MyImageProcessorFast(BaseImageProcessorFast):
            valid_kwargs = MyImageProcessorKwargs
            custom_param = 10  # default value

            def _preprocess(self, images, custom_param, **kwargs):
                # Use custom_param in processing
                ...

    Key Notes
    ---------

    - Images in `_preprocess` are always torch tensors with channel dimension first, regardless of input format
    - Arguments not provided by users default to class attribute values
    - Use batch processing utilities (`group_images_by_shape`, `reorder_images`) for GPU efficiency
    - Image loading, format conversion, and argument handling are automatic - focus only on processing logic
    NTgp?pixel_valueskwargsc              	      s   t  jdi | | |}|d| j}|d ur$t||d| jdnd | _|d| j}|d ur8t|ddnd | _|d| j}|d urLt|ddnd | _| j	j
D ]}||d }|d urft| || qSt| |tt| |d  qSt| j	j
 | _d S )	Nr5   default_to_squarer5   re   r3   
param_namepad_sizer5   rh   r=   )super__init__filter_out_unused_kwargspopr5   r   re   r3   ri   valid_kwargs__annotations__setattrr   getattrlistkeys_valid_kwargs_names)selfrd   r5   r3   ri   keykwarg	__class__r=   r>   rl   :  s"   
zBaseImageProcessorFast.__init__rC   c                 C   s   dS )zv
        `bool`: Whether or not this image processor is a fast processor (backed by PyTorch and TorchVision).
        Tr=   )rv   r=   r=   r>   is_fastR  s   zBaseImageProcessorFast.is_fastr   constantFrN   rA   ri   
fill_valuepadding_modereturn_maskdisable_grouping	is_nested)rA   rA   c                 K   sb  |dur|j r
|jstd| d|j |jf}nt|}t|||d\}	}
i }i }|	 D ]l\}}|jdd }|d |d  }|d |d  }|dk sQ|dk r\td| d	| d||krodd||f}tj||||d
}|||< |rt	j
|t	jdddddddf }d|dd|d d|d f< |||< q.t||
|d}|rt||
|d}||fS |S )ax  
        Pads images to `(pad_size["height"], pad_size["width"])` or to the largest size in the batch.

        Args:
            images (`list[torch.Tensor]`):
                Images to pad.
            pad_size (`SizeDict`, *optional*):
                Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
            fill_value (`int`, *optional*, defaults to `0`):
                The constant value used to fill the padded area.
            padding_mode (`str`, *optional*, defaults to "constant"):
                The padding mode to use. Can be any of the modes supported by
                `torch.nn.functional.pad` (e.g. constant, reflection, replication).
            return_mask (`bool`, *optional*, defaults to `False`):
                Whether to return a pixel mask to denote padded regions.
            disable_grouping (`bool`, *optional*, defaults to `False`):
                Whether to disable grouping of images by size.

        Returns:
            `Union[tuple[torch.Tensor, torch.Tensor], torch.Tensor]`: The padded images and pixel masks if `return_mask` is `True`.
        NzCPad size must contain 'height' and 'width' keys only. Got pad_size=.)r   r   r   r
   zrPadding dimensions are negative. Please make sure that the `pad_size` is larger than the image size. Got pad_size=z, image_size=)fillr~   dtype.)r   )r\   r]   r;   rU   r   itemsrO   tvFpadtorch
zeros_likeint64r   )rv   rN   ri   r}   r~   r   r   r   rd   grouped_imagesgrouped_images_indexprocessed_images_groupedprocessed_masks_groupedrO   stacked_images
image_sizepadding_heightpadding_widthpaddingstacked_masksprocessed_imagesprocessed_masksr=   r=   r>   r   Y  sH    
$zBaseImageProcessorFast.padrV   r5   r6   r7   	antialiasc                 K   s   |dur|nt jj}|jr|jrt| dd |j|j}n8|jr-t||jdtj	d}n*|j
rB|jrBt| dd |j
|j}n|jrO|jrO|j|jf}ntd| dt ret re| ||||S t j||||dS )a  
        Resize an image to `(size["height"], size["width"])`.

        Args:
            image (`torch.Tensor`):
                Image to resize.
            size (`SizeDict`):
                Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
            interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
                `InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
            antialias (`bool`, *optional*, defaults to `True`):
                Whether to use antialiasing.

        Returns:
            `torch.Tensor`: The resized image.
        Nr   F)r5   re   input_data_formatzjSize must contain 'height' and 'width' keys, or 'max_height' and 'max_width', or 'shortest_edge' key. Got r   r6   r   )r   InterpolationModeBILINEARshortest_edgelongest_edger   r5   r   r   r<   rS   rT   r   r\   r]   r;   r'   r&   compile_friendly_resizeresize)rv   rV   r5   r6   r   rd   new_sizer=   r=   r>   r     s4   zBaseImageProcessorFast.resizer   c                 C   s   | j tjkr5|  d } tj| |||d} | d } t| dkd| } t| dk d| } |  tj} | S tj| |||d} | S )z}
        A wrapper around `tvF.resize` so that it is compatible with torch.compile when the image is a uint8 tensor.
           r      r   )	r   r   uint8floatr   r   whereroundto)rV   r   r6   r   r=   r=   r>   r     s   
z.BaseImageProcessorFast.compile_friendly_resizescalec                 K   s   || S )a?  
        Rescale an image by a scale factor. image = image * scale.

        Args:
            image (`torch.Tensor`):
                Image to rescale.
            scale (`float`):
                The scaling factor to rescale pixel values by.

        Returns:
            `torch.Tensor`: The rescaled image.
        r=   )rv   rV   r   rd   r=   r=   r>   rescale  s   zBaseImageProcessorFast.rescalemeanstdc                 K   s   t |||S )a  
        Normalize an image. image = (image - image_mean) / image_std.

        Args:
            image (`torch.Tensor`):
                Image to normalize.
            mean (`torch.Tensor`, `float` or `Iterable[float]`):
                Image mean to use for normalization.
            std (`torch.Tensor`, `float` or `Iterable[float]`):
                Image standard deviation to use for normalization.

        Returns:
            `torch.Tensor`: The normalized image.
        )r   	normalize)rv   rV   r   r   rd   r=   r=   r>   r     s   z BaseImageProcessorFast.normalizer*   r+   r/   r0   r1   r-   r.   deviceztorch.devicec                 C   sB   |r|rt j||dd|  }t j||dd|  }d}|||fS )Nr   g      ?F)r   r@   )rv   r/   r0   r1   r-   r.   r   r=   r=   r>   !_fuse_mean_std_and_rescale_factor  s
   

z8BaseImageProcessorFast._fuse_mean_std_and_rescale_factorc                 C   sR   | j ||||||jd\}}}|r| |jtjd||}|S |r'| ||}|S )z/
        Rescale and normalize images.
        )r/   r0   r1   r-   r.   r   r   )r   r   r   r   r   float32r   )rv   rN   r-   r.   r/   r0   r1   r=   r=   r>   rescale_and_normalize-  s   	z,BaseImageProcessorFast.rescale_and_normalizec                 K   s"  |j du s
|jdu rtd|  |jdd \}}|j |j}}||ks+||krx||kr5|| d nd||kr@|| d nd||krM|| d d nd||krZ|| d d ndg}tj||dd}|jdd \}}||krx||krx|S t|| d }	t|| d }
t||	|
||S )	a  
        Note: override torchvision's center_crop to have the same behavior as the slow processor.
        Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along
        any edge, the image is padded with 0's and then center cropped.

        Args:
            image (`"torch.Tensor"`):
                Image to center crop.
            size (`dict[str, int]`):
                Size of the output image.

        Returns:
            `torch.Tensor`: The center cropped image.
        Nz=The size dictionary must have keys 'height' and 'width'. Got r      r   r
   )r   g       @)	r\   r]   r;   rt   rO   r   r   intcrop)rv   rV   r5   rd   image_heightimage_widthcrop_height
crop_widthpadding_ltrbcrop_top	crop_leftr=   r=   r>   center_cropI  s"   z"BaseImageProcessorFast.center_cropc                 C   s   t |S )a'  
        Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image
        as is.
        Args:
            image (ImageInput):
                The image to convert.

        Returns:
            ImageInput: The converted image.
        )r   )rv   rV   r=   r=   r>   r   r  s   z%BaseImageProcessorFast.convert_to_rgbc                 C   sB   | j du r|S | j D ]}||v rtd| d || q
|S )zJ
        Filter out the unused kwargs from the kwargs dictionary.
        Nz!This processor does not use the `z ` parameter. It will be ignored.)unused_kwargsloggerwarning_oncern   )rv   rd   
kwarg_namer=   r=   r>   rm     s   


z/BaseImageProcessorFast.filter_out_unused_kwargs   expected_ndimsc                 C   s   |  |}t||dS )z
        Prepare the images structure for processing.

        Args:
            images (`ImageInput`):
                The input images to process.

        Returns:
            `ImageInput`: The images with a valid nesting.
        r   )fetch_imagesr   )rv   rN   r   r=   r=   r>   _prepare_images_structure  s   
z0BaseImageProcessorFast._prepare_images_structuredo_convert_rgbr   c                 C   s   t |}|tjtjtjfvrtd| |r| |}|tjkr't|}n|tjkr3t	
| }|jdkr=|d}|d u rEt|}|tjkrS|ddd }|d ur\||}|S )NzUnsupported input image type r   r   r
   )r   r   PILTORCHNUMPYr;   r   r   pil_to_tensorr   
from_numpy
contiguousndim	unsqueezer   r   LASTpermuter   )rv   rV   r   r   r   
image_typer=   r=   r>   _process_image  s$   






z%BaseImageProcessorFast._process_imagec                    sn   | j ||d}t| j|||d t|dkot|d ttf}|r, fdd|D }|S  fdd|D }|S )a  
        Prepare image-like inputs for processing.

        Args:
            images (`ImageInput`):
                The image-like inputs to process.
            do_convert_rgb (`bool`, *optional*):
                Whether to convert the images to RGB.
            input_data_format (`str` or `ChannelDimension`, *optional*):
                The input data format of the images.
            device (`torch.device`, *optional*):
                The device to put the processed images on.
            expected_ndims (`int`, *optional*):
                The expected number of dimensions for the images. (can be 2 for segmentation maps etc.)

        Returns:
            List[`torch.Tensor`]: The processed images.
        r   r   r   r   r   c                    s   g | ]} fd d|D qS )c                       g | ]} |qS r=   r=   rP   process_image_partialr=   r>   rJ     rK   zPBaseImageProcessorFast._prepare_image_like_inputs.<locals>.<listcomp>.<listcomp>r=   )rH   nested_listr   r=   r>   rJ     s    zEBaseImageProcessorFast._prepare_image_like_inputs.<locals>.<listcomp>c                    r   r=   r=   rP   r   r=   r>   rJ     rK   )r   r   r   len
isinstancers   tuple)rv   rN   r   r   r   r   has_nested_structurer   r=   r   r>   _prepare_image_like_inputs  s   
z1BaseImageProcessorFast._prepare_image_like_inputsr3   re   r9   c           
      K   s   |du ri }|durt di t||d}|dur$t di t|dd}|dur3t di t|dd}t|tr<t|}t|trEt|}|du rLtj}||d< ||d< ||d< ||d< ||d	< ||d
< |d}	t|	tt	frtt
|	 n|	|d< |S )z
        Update kwargs that need further processing before being validated
        Can be overridden by subclasses to customize the processing of kwargs.
        Nrf   r3   rg   ri   rj   r5   r0   r1   r9   resampler6   r=   )r   r   r   rs   r   r   r<   rn   r(   r   r)   )
rv   r5   r3   ri   re   r0   r1   r9   rd   r   r=   r=   r>   _further_process_kwargs  s0   


z.BaseImageProcessorFast._further_process_kwargsr4   r2   r8   c                 K   s$   t |||||||||	|
||d dS )z@
        validate the kwargs for the preprocess method.
        )r-   r.   r/   r0   r1   r4   r5   r2   r3   r6   r8   r9   N)r?   )rv   r-   r.   r/   r0   r1   r4   r5   r2   r3   r6   r8   r9   rd   r=   r=   r>   _validate_preprocess_kwargs$  s   
z2BaseImageProcessorFast._validate_preprocess_kwargsc                 O   s   t | | jd t| j| | jD ]}||t| |d  q|d}|d}|d}| jdi |}| j	di | |d | j
|g|R |||d|S )N)captured_kwargsvalid_processor_keysr   r   r   r9   r   r=   )r   rt   ru   r	   ro   
setdefaultrr   rn   r   r   _preprocess_image_like_inputs)rv   rN   argsrd   r   r   r   r   r=   r=   r>   
preprocessF  s&   




z!BaseImageProcessorFast.preprocessr   c                O   s*   | j ||||d}| j|g|R i |S )z
        Preprocess image-like inputs.
        To be overridden by subclasses when image-like inputs other than images should be processed.
        It can be used for segmentation maps, depth maps, etc.
        )rN   r   r   r   )r   _preprocess)rv   rN   r   r   r   r   rd   r=   r=   r>   r   e  s   z4BaseImageProcessorFast._preprocess_image_like_inputsdo_padc              	   K   s   t ||d\}}i }| D ]\}}|r| j|||d}|||< qt||}t ||d\}}i }| D ]\}}|r@| ||}| ||||	|
|}|||< q4t||}|r^| j|||d}td|i|dS )N)r   )rV   r5   r6   )ri   r   rc   )datatensor_type)r   r   r   r   r   r   r   r   )rv   rN   r4   r5   r6   r2   r3   r-   r.   r/   r0   r1   r   ri   r   r8   rd   r   r   resized_images_groupedrO   r   resized_imagesr   r   r=   r=   r>   r   y  s(   



z"BaseImageProcessorFast._preprocessc                    sv   t   }i }| D ]!\}}|d u r(tt| |d}|dkr'|d ur'|||< q|||< q|dd  |dd  |S )N	NOT_FOUND_valid_processor_keysru   )rk   to_dictr   rr   typern   )rv   encoder_dictfiltered_dictrw   valueclass_defaultry   r=   r>   r     s   

zBaseImageProcessorFast.to_dict)Nr   r|   FFF)NT)NNNNNN)r   )NNN)NNNr   )NNNNNNN)NNNNNNNNNNNN)F__name__
__module____qualname____doc__r   r0   r1   r5   re   r3   r4   r2   r   ri   r-   r.   r/   r   r8   r   r<   r9   r   r   model_input_namesimage_seq_lengthr   ro   r   r   rl   propertyboolr{   rs   r   r   strr   r   r   r   r   staticmethodr   r   r   r   r   r   r   r   r   r   r   dictrm   r   r   r   r   r    r   r!   r   r   r   r   r   __classcell__r=   r=   ry   r>   rb      sJ   {	

K
8








)



)

.

2	


"$
	


/rb   )N)Kcollections.abcr   copyr   	functoolsr   r   typingr   r   r   numpynphuggingface_hub.dataclassesr	   image_processing_utilsr   r   r   image_transformsr   r   r   r   r   image_utilsr   r   r   r   r   r   r   r   r   r   r   processing_utilsr   r   utilsr    r!   r"   r#   r$   r%   utils.import_utilsr&   r'   r(   r   $torchvision.transforms.v2.functional
transformsv2
functionalr   r)   
get_loggerr   r   r<   r   r   rs   r   r?   r   rE   rM   r   rU   ndarrayra   rb   r=   r=   r=   r>   <module>   s   4 
	

&

