o
    i                     @   s  d dl Z d dlZd dlZd dlmZ d dlmZ d dlmZm	Z	m
Z
mZ d dlZddlmZ ddlmZmZ ddlmZ dd	lmZmZmZ dd
lmZmZ ddlmZmZmZm Z m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z) ddl*m+Z+ ddl,m-Z- ddl.m/Z/m0Z0m1Z1m2Z2m3Z3m4Z4m5Z5m6Z6m7Z7 e& rd dl8Z8e( rd dl9m:Z; e)<e=Z>dZ?e!de?e-ddG dd deZ@e"e@jAe@_Ae@jAjBdure@jAjBjCdddde@jA_BdS dS )    N)deepcopy)partial)AnyCallableOptionalUnion   )custom_object_save)BatchFeatureget_size_dict)BaseImageProcessorFast)ChannelDimensionSizeDictvalidate_kwargs)UnpackVideosKwargs)IMAGE_PROCESSOR_NAMEPROCESSOR_NAMEVIDEO_PROCESSOR_NAME
TensorTypeadd_start_docstrings	copy_funcdownload_urlis_offline_modeis_remote_urlis_torch_availableis_torchcodec_availableis_torchvision_v2_availableloggingcached_file)requires)	
VideoInputVideoMetadatagroup_videos_by_shapeis_valid_video
load_videomake_batched_metadatamake_batched_videosreorder_videosto_channel_dimension_format)
functionala  
    Args:
        do_resize (`bool`, *optional*, defaults to `self.do_resize`):
            Whether to resize the video's (height, width) dimensions to the specified `size`. Can be overridden by the
            `do_resize` parameter in the `preprocess` method.
        size (`dict`, *optional*, defaults to `self.size`):
            Size of the output video after resizing. Can be overridden by the `size` parameter in the `preprocess`
            method.
        size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
            The size by which to make sure both the height and width can be divided.
        default_to_square (`bool`, *optional*, defaults to `self.default_to_square`):
            Whether to default to a square video when resizing, if size is an int.
        resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
            Resampling filter to use if resizing the video. Only has an effect if `do_resize` is set to `True`. Can be
            overridden by the `resample` parameter in the `preprocess` method.
        do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
            Whether to center crop the video to the specified `crop_size`. Can be overridden by `do_center_crop` in the
            `preprocess` method.
        crop_size (`dict[str, int]` *optional*, defaults to `self.crop_size`):
            Size of the output video after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
            method.
        do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
            Whether to rescale the video by the specified scale `rescale_factor`. Can be overridden by the
            `do_rescale` parameter in the `preprocess` method.
        rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`):
            Scale factor to use if rescaling the video. Only has an effect if `do_rescale` is set to `True`. Can be
            overridden by the `rescale_factor` parameter in the `preprocess` method.
        do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
            Whether to normalize the video. Can be overridden by the `do_normalize` parameter in the `preprocess`
            method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
        image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
            Mean to use if normalizing the video. This is a float or list of floats the length of the number of
            channels in the video. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
            overridden by the `image_mean` parameter in the `preprocess` method.
        image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
            Standard deviation to use if normalizing the video. This is a float or list of floats the length of the
            number of channels in the video. Can be overridden by the `image_std` parameter in the `preprocess` method.
            Can be overridden by the `image_std` parameter in the `preprocess` method.
        do_convert_rgb (`bool`, *optional*, defaults to `self.image_std`):
            Whether to convert the video to RGB.
        video_metadata (`VideoMetadata`, *optional*):
            Metadata of the video containing information about total duration, fps and total number of frames.
        do_sample_frames (`int`, *optional*, defaults to `self.do_sample_frames`):
            Whether to sample frames from the video before processing or to process the whole video.
        num_frames (`int`, *optional*, defaults to `self.num_frames`):
            Maximum number of frames to sample when `do_sample_frames=True`.
        fps (`int` or `float`, *optional*, defaults to `self.fps`):
            Target frames to sample per second when `do_sample_frames=True`.
        return_tensors (`str` or `TensorType`, *optional*):
            Returns stacked tensors if set to `pt, otherwise returns a list of tensors.
        data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
            The channel dimension format for the output video. Can be one of:
            - `"channels_first"` or `ChannelDimension.FIRST`: video in (num_channels, height, width) format.
            - `"channels_last"` or `ChannelDimension.LAST`: video in (height, width, num_channels) format.
            - Unset: Use the channel dimension format of the input video.
        input_data_format (`ChannelDimension` or `str`, *optional*):
            The channel dimension format for the input video. If unset, the channel dimension format is inferred
            from the input video. Can be one of:
            - `"channels_first"` or `ChannelDimension.FIRST`: video in (num_channels, height, width) format.
            - `"channels_last"` or `ChannelDimension.LAST`: video in (height, width, num_channels) format.
            - `"none"` or `ChannelDimension.NONE`: video in (height, width) format.
        device (`torch.device`, *optional*):
            The device to process the videos on. If unset, the device is inferred from the input videos.
        return_metadata (`bool`, *optional*):
            Whether to return video metadata or not.
        z!Constructs a base VideoProcessor.)visiontorchvision)backendsc                       s.  e Zd ZdZdZdZdZdZdZdZ	dZ
dZdZdZdZdZdZdZdZdZdZdZeZdgZdee ddf fdd	Zdefd
dZdddefddZ		dTdede e! de e"e!e#f  fddZ$		dTdede"ee%f de e& de e' de(d f
ddZ)		dTdede e"e*e+f  de e* de(d fddZ,e-e.dedee defdd Z/	dUde(d d!e&d"e&d#e0d$e d% d&e&d'e0d(e&d)e#d*e&d+e e"e#e(e# f  d,e e"e#e(e# f  d-e e"e*e1f  defd.d/Z2e3					0dVd1e"e*e4j5f d2e e"e*e4j5f  d3e&d4e&d5e e"e*e&f  d6e*fd7d8Z6dWd9e"e*e4j5f d:e&fd;d<Z7e3d1e"e*e4j5f de8e%e*e9f e%e*e9f f fd=d>Z:e3d?e%e*e9f fd@dAZ;de%e*e9f fdBdCZ<de*fdDdEZ=dFe"e*e4j5f fdGdHZ>dIdJ Z?e3dKe"e*e4j5f fdLdMZ@e3dXdOdPZAdUdQe"e*e(e* e(e(e*  f fdRdSZB  ZCS )YBaseVideoProcessorNTgp?Fpixel_values_videoskwargsreturnc                    s&  t    |dd | _| D ](\}}zt| || W q ty8 } ztd| d| d|   |d }~ww |d| j	}|d urOt
||d| jdnd | _	|d| j}|d urct
|dd	nd | _t| jj | _| jD ]}||d urt| |||  qrt| |tt| |d  qrd S )
Nprocessor_classz
Can't set z with value z for sizedefault_to_square)r4   r5   	crop_size)
param_name)super__init__pop_processor_classitemssetattrAttributeErrorloggererrorr4   r   r5   r6   listvalid_kwargs__annotations__keysmodel_valid_processing_keysgetr   getattr)selfr1   keyvalueerrr4   r6   	__class__ W/home/ubuntu/.local/lib/python3.10/site-packages/transformers/video_processing_utils.pyr9      s.   

zBaseVideoProcessor.__init__c                 K   s   | j |fi |S N)
preprocess)rH   videosr1   rN   rN   rO   __call__   s   zBaseVideoProcessor.__call__videoztorch.Tensorc                 C   s   t |}|jd dks|ddddddf dk  s|S |ddddddf d }d|ddddddf  d |ddddddf |dddddddf   }|S )z
        Converts a video to RGB format.

        Args:
            video (`"torch.Tensor"`):
                The video to convert.

        Returns:
            `torch.Tensor`: The converted video.
           .N   g     o@r   )Fgrayscale_to_rgbshapeany)rH   rT   alpharN   rN   rO   convert_to_rgb   s   
.Tz!BaseVideoProcessor.convert_to_rgbmetadata
num_framesfpsc                 K   s   |dur|durt d|dur|n| j}|dur|n| j}|j}|du r?|dur?|du s2|jdu r6t dt||j | }||krNt d| d| d|dur_td|||  }|S td| }|S )a%  
        Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames.
        If `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames`
        and `fps` are mutually exclusive.

        Args:
            metadata (`VideoMetadata`):
                Metadata of the video containing information about total duration, fps and total number of frames.
            num_frames (`int`, *optional*):
                Maximum number of frames to sample. Defaults to `self.num_frames`.
            fps (`int` or `float`, *optional*):
                Target frames to sample per second. Defaults to `self.fps`.

        Returns:
            np.ndarray:
                Indices to sample video frames.
        Nzc`num_frames`, `fps`, and `sample_indices_fn` are mutually exclusive arguments, please use only one!zAsked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. Please pass in `VideoMetadata` object or use a fixed `num_frames` per input videoz(Video can't be sampled. The `num_frames=z` exceeds `total_num_frames=z`. r   )
ValueErrorr_   r`   total_num_framesinttorcharange)rH   r^   r_   r`   r1   rb   indicesrN   rN   rO   sample_frames   s,   z BaseVideoProcessor.sample_framesrR   video_metadatado_sample_framessample_indices_fnc           
      C   s   t |}t||d}t|d r<|r<g }g }t||D ]\}}||d}	|	|_|||	  || q|}|}||fS t|d sft|d tr]dd | |D }|rYt	d||fS | j
||d\}}||fS )zB
        Decode input videos and sample frames if needed.
        )rh   r   )r^   c                 S   s$   g | ]}t jd d |D ddqS )c                 S   s   g | ]}t |qS rN   )rX   pil_to_tensor).0imagerN   rN   rO   
<listcomp>:  s    zKBaseVideoProcessor._decode_and_sample_videos.<locals>.<listcomp>.<listcomp>r   dim)rd   stack)rl   imagesrN   rN   rO   rn   9  s    z@BaseVideoProcessor._decode_and_sample_videos.<locals>.<listcomp>zUSampling frames from a list of images is not supported! Set `do_sample_frames=False`.rj   )r(   r'   r%   zipframes_indicesappend
isinstancerA   fetch_imagesra   fetch_videos)
rH   rR   rh   ri   rj   sampled_videossampled_metadatarT   r^   rf   rN   rN   rO   _decode_and_sample_videos  s2   

z,BaseVideoProcessor._decode_and_sample_videosinput_data_formatdevicec                 C   sV   g }|D ]$}t |tjrt|tj|}t| }|dur#|	|}|
| q|S )z:
        Prepare the input videos for processing.
        N)rw   npndarrayr*   r   FIRSTrd   
from_numpy
contiguoustorv   )rH   rR   r}   r~   processed_videosrT   rN   rN   rO   _prepare_input_videosF  s   	
z(BaseVideoProcessor._prepare_input_videosc                 K   s  t | t| jj dg d | jjD ]}||t| |d  q|d}|d}|d}|d}|rAt| j	fi |nd }| j
||||d\}}| j|||d}| jdi |}| jdi | |d	 |d
}	| jdd|i|}
|	r||
d< |
S )Nreturn_tensors)captured_kwargsvalid_processor_keysr}   ri   r~   rh   )rh   ri   rj   )rR   r}   r~   data_formatreturn_metadatarR   rN   )r   rD   rA   rB   rC   
setdefaultrG   r:   r   rg   r|   r   _further_process_kwargs_validate_preprocess_kwargs_preprocess)rH   rR   r1   
kwarg_namer}   ri   r~   rh   rj   r   preprocessed_videosrN   rN   rO   rQ   ]  s4   






zBaseVideoProcessor.preprocessdo_convert_rgb	do_resizer4   interpolationzF.InterpolationModedo_center_cropr6   
do_rescalerescale_factordo_normalize
image_mean	image_stdr   c              	   K   s   t |\}}i }| D ]\}}|r| |}|r!| j|||d}|||< qt||}t |\}}i }| D ]\}}|rC| ||}| |||	|
||}|||< q7t||}|r`tj|ddn|}t	d|i|dS )N)r4   r   r   ro   r0   )datatensor_type)
r$   r<   r]   resizer)   center_croprescale_and_normalizerd   rq   r
   )rH   rR   r   r   r4   r   r   r6   r   r   r   r   r   r   r1   grouped_videosgrouped_videos_indexresized_videos_groupedrZ   stacked_videosresized_videosprocessed_videos_groupedr   rN   rN   rO   r     s*   




zBaseVideoProcessor._preprocessmainpretrained_model_name_or_path	cache_dirforce_downloadlocal_files_onlytokenrevisionc           
      K   s   ||d< ||d< ||d< ||d< | dd}|dur*tdt |dur(td|}|dur2||d	< | j|fi |\}	}| j|	fi |S )
a  
        Instantiate a type of [`~video_processing_utils.VideoProcessorBase`] from an video processor.

        Args:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                This can be either:

                - a string, the *model id* of a pretrained video hosted inside a model repo on
                  huggingface.co.
                - a path to a *directory* containing a video processor file saved using the
                  [`~video_processing_utils.VideoProcessorBase.save_pretrained`] method, e.g.,
                  `./my_model_directory/`.
                - a path or url to a saved video processor JSON *file*, e.g.,
                  `./my_model_directory/video_preprocessor_config.json`.
            cache_dir (`str` or `os.PathLike`, *optional*):
                Path to a directory in which a downloaded pretrained model video processor should be cached if the
                standard cache should not be used.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force to (re-)download the video processor files and override the cached versions if
                they exist.
            resume_download:
                Deprecated and ignored. All downloads are now resumed by default when possible.
                Will be removed in v5 of Transformers.
            proxies (`dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
            token (`str` or `bool`, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
                the token generated when running `hf auth login` (stored in `~/.huggingface`).
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
                identifier allowed by git.


                <Tip>

                To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.

                </Tip>

            return_unused_kwargs (`bool`, *optional*, defaults to `False`):
                If `False`, then this function returns just the final video processor object. If `True`, then this
                functions returns a `Tuple(video_processor, unused_kwargs)` where *unused_kwargs* is a dictionary
                consisting of the key/value pairs whose keys are not video processor attributes: i.e., the part of
                `kwargs` which has not been used to update `video_processor` and is otherwise ignored.
            subfolder (`str`, *optional*, defaults to `""`):
                In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
                specify the folder name here.
            kwargs (`dict[str, Any]`, *optional*):
                The values in kwargs of any keys which are video processor attributes will be used to override the
                loaded values. Behavior concerning key/value pairs whose keys are *not* video processor attributes is
                controlled by the `return_unused_kwargs` keyword parameter.

        Returns:
            A video processor of type [`~video_processing_utils.ImagVideoProcessorBase`].

        Examples:

        ```python
        # We can't instantiate directly the base class *VideoProcessorBase* so let's show the examples on a
        # derived class: *LlavaOnevisionVideoProcessor*
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
            "llava-hf/llava-onevision-qwen2-0.5b-ov-hf"
        )  # Download video_processing_config from huggingface.co and cache.
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
            "./test/saved_model/"
        )  # E.g. video processor (or model) was saved using *save_pretrained('./test/saved_model/')*
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained("./test/saved_model/video_preprocessor_config.json")
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
            "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", do_normalize=False, foo=False
        )
        assert video_processor.do_normalize is False
        video_processor, unused_kwargs = LlavaOnevisionVideoProcessor.from_pretrained(
            "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", do_normalize=False, foo=False, return_unused_kwargs=True
        )
        assert video_processor.do_normalize is False
        assert unused_kwargs == {"foo": False}
        ```r   r   r   r   use_auth_tokenNrThe `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.V`token` and `use_auth_token` are both specified. Please set only the argument `token`.r   )r:   warningswarnFutureWarningra   get_video_processor_dict	from_dict)
clsr   r   r   r   r   r   r1   r   video_processor_dictrN   rN   rO   from_pretrained  s&   Zz"BaseVideoProcessor.from_pretrainedsave_directorypush_to_hubc           	      K   s  | dd}|durtdt |ddurtd||d< tj|r-t	d| dtj
|dd	 |rW| d
d}| d|tjjd }| j|fi |}| |}| jdurct| || d tj|t}| | td|  |r| j|||||dd |gS )aq  
        Save an video processor object to the directory `save_directory`, so that it can be re-loaded using the
        [`~video_processing_utils.VideoProcessorBase.from_pretrained`] class method.

        Args:
            save_directory (`str` or `os.PathLike`):
                Directory where the video processor JSON file will be saved (will be created if it does not exist).
            push_to_hub (`bool`, *optional*, defaults to `False`):
                Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
                repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
                namespace).
            kwargs (`dict[str, Any]`, *optional*):
                Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
        r   Nr   r   r   zProvided path (z#) should be a directory, not a fileT)exist_okcommit_messagerepo_id)configzVideo processor saved in )r   r   )r:   r   r   r   rF   ra   ospathisfileAssertionErrormakedirssplitsep_create_repo_get_files_timestamps_auto_classr	   joinr   to_json_filer?   info_upload_modified_files)	rH   r   r   r1   r   r   r   files_timestampsoutput_video_processor_filerN   rN   rO   save_pretrained(  sB   


z"BaseVideoProcessor.save_pretrainedc                    sD  | dd | dd| dd| dd| dd	| dd}| d	d| d
d| dd| dd}| dd}|durVtdt 	durTtd|	d|d
|durc|
d< t rosotd dtt	j
}t	j
r}d}nFtr}t}n;t}z 	
fddtttfD }	|	d }W n ty     ty   td d dt dw z(t|ddd}
|
 }W d   n1 sw   Y  t|}|d |}W n tjy   td!| d"w |rtd#|  ||fS td#| d$|  ||fS )%a  
        From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
        video processor of type [`~video_processing_utils.VideoProcessorBase`] using `from_dict`.

        Parameters:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
            subfolder (`str`, *optional*, defaults to `""`):
                In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
                specify the folder name here.

        Returns:
            `tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the video processor object.
        r   Nr   Fresume_downloadproxiesr   r   r   r   	subfolder _from_pipeline
_from_autor   r   video processor)	file_typefrom_auto_classusing_pipelinez+Offline mode: forcing local_files_only=TrueTc                    s8   g | ]}t | 	
d d durqS )F)filenamer   r   r   r   r   r   
user_agentr   r   %_raise_exceptions_for_missing_entriesNr   )rl   r   r   r   r   r   r   resolved_filer   r   r   r   r   rN   rO   rn     s(    z?BaseVideoProcessor.get_video_processor_dict.<locals>.<listcomp>r   z Can't load video processor for 'z'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'z2' is the correct path to a directory containing a z filerutf-8encodingvideo_processorz"It looks like the config file at 'z' is not a valid JSON file.zloading configuration file z from cache at )r:   r   r   r   ra   r   r?   r   strr   r   isdirr   r   r   r   r   r   OSError	ExceptionopenreadjsonloadsrF   JSONDecodeError)r   r   r1   r   from_pipeliner   is_localresolved_video_processor_filevideo_processor_fileresolved_video_processor_filesreadertextr   rN   r   rO   r   e  s   



	


z+BaseVideoProcessor.get_video_processor_dictr   c                 K   s   |  }|dd}d|v rd|v r|d|d< d|v r(d|v r(|d|d< | di |}g }| D ]\}}t||rIt||| || q5|D ]}||d qLtd|  |rc||fS |S )a  
        Instantiates a type of [`~video_processing_utils.VideoProcessorBase`] from a Python dictionary of parameters.

        Args:
            video_processor_dict (`dict[str, Any]`):
                Dictionary that will be used to instantiate the video processor object. Such a dictionary can be
                retrieved from a pretrained checkpoint by leveraging the
                [`~video_processing_utils.VideoProcessorBase.to_dict`] method.
            kwargs (`dict[str, Any]`):
                Additional parameters from which to initialize the video processor object.

        Returns:
            [`~video_processing_utils.VideoProcessorBase`]: The video processor object instantiated from those
            parameters.
        return_unused_kwargsFr4   r6   NzVideo processor rN   )copyr:   r<   hasattrr=   rv   r?   r   )r   r   r1   r   r   	to_removerI   rJ   rN   rN   rO   r     s&   

zBaseVideoProcessor.from_dictc                 C   s2   t | j}|dd |dd | jj|d< |S )z
        Serializes this instance to a Python dictionary.

        Returns:
            `dict[str, Any]`: Dictionary of all the attributes that make up this video processor instance.
        rE   N_valid_kwargs_namesvideo_processor_type)r   __dict__r:   rM   __name__)rH   outputrN   rN   rO   to_dict  s
   
zBaseVideoProcessor.to_dictc                 C   sb   |   }| D ]\}}t|tjr| ||< q|dd}|dur'||d< tj|dddd S )z
        Serializes this instance to a JSON string.

        Returns:
            `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
        r;   Nr3      T)indent	sort_keys
)	r   r<   rw   r   r   tolistr:   r   dumps)rH   
dictionaryrI   rJ   r;   rN   rN   rO   to_json_string  s   z!BaseVideoProcessor.to_json_stringjson_file_pathc                 C   sB   t |ddd}||   W d   dS 1 sw   Y  dS )z
        Save this instance to a JSON file.

        Args:
            json_file_path (`str` or `os.PathLike`):
                Path to the JSON file in which this image_processor instance's parameters will be saved.
        wr   r   N)r   writer  )rH   r  writerrN   rN   rO   r   +  s   "zBaseVideoProcessor.to_json_filec                 C   s   | j j d|   S )N )rM   r   r  )rH   rN   rN   rO   __repr__6  s   zBaseVideoProcessor.__repr__	json_filec                 C   sN   t |ddd}| }W d   n1 sw   Y  t|}| di |S )a  
        Instantiates a video processor of type [`~video_processing_utils.VideoProcessorBase`] from the path to a JSON
        file of parameters.

        Args:
            json_file (`str` or `os.PathLike`):
                Path to the JSON file containing the parameters.

        Returns:
            A video processor of type [`~video_processing_utils.VideoProcessorBase`]: The video_processor object
            instantiated from that JSON file.
        r   r   r   NrN   )r   r   r   r   )r   r  r   r   r   rN   rN   rO   from_json_file9  s
   

z!BaseVideoProcessor.from_json_fileAutoVideoProcessorc                 C   sD   t |ts|j}ddlm  m} t||st| d|| _dS )a	  
        Register this class with a given auto class. This should only be used for custom video processors as the ones
        in the library are already mapped with `AutoVideoProcessor `.

        <Tip warning={true}>

        This API is experimental and may have some slight breaking changes in the next releases.

        </Tip>

        Args:
            auto_class (`str` or `type`, *optional*, defaults to `"AutoVideoProcessor "`):
                The auto class to register this new video processor with.
        r   Nz is not a valid auto class.)	rw   r   r   transformers.models.automodelsautor   ra   r   )r   
auto_classauto_modulerN   rN   rO   register_for_auto_classL  s   


z*BaseVideoProcessor.register_for_auto_classvideo_url_or_urlsc                    sL   d}t  std d}t|trtt fdd|D  S t|| dS )z
        Convert a single or a list of urls into the corresponding `np.array` objects.

        If a single url is passed, the return value will be a single object. If a list is passed a list of objects is
        returned.
        
torchcodecz`torchcodec` is not installed and cannot be used to decode the video by default. Falling back to `torchvision`. Note that `torchvision` decoding is deprecated and will be removed in future versions. r-   c                    s   g | ]	}j | d qS )rs   )ry   )rl   xrj   rH   rN   rO   rn   v  s    z3BaseVideoProcessor.fetch_videos.<locals>.<listcomp>)backendrj   )r   r   r   rw   rA   rt   r&   )rH   r  rj   r  rN   r  rO   ry   f  s   
zBaseVideoProcessor.fetch_videos)NNrP   )NFFNr   )F)r  )Dr   
__module____qualname__r   resampler   r   r4   size_divisorr5   r6   r   r   r   r   r   r   ri   r`   r_   rh   r   r   rB   model_input_namesr   r9   r
   rS   r"   r]   r#   r   rc   r   floatrg   dictboolr   rA   r|   r   r   r   r   BASE_VIDEO_PROCESSOR_DOCSTRINGrQ   r   r   r   classmethodr   PathLiker   r   tupler   r   r   r   r  r   r  r  r  ry   __classcell__rN   rN   rL   rO   r/      s   

9

+
6	

.q=u,.r/   r   r  zvideo processor file)objectobject_classobject_files)Dr   r   r   r   r   	functoolsr   typingr   r   r   r   numpyr   dynamic_module_utilsr	   image_processing_utilsr
   r   image_processing_utils_fastr   image_utilsr   r   r   processing_utilsr   r   utilsr   r   r   r   r   r   r   r   r   r   r   r   r   	utils.hubr    utils.import_utilsr!   video_utilsr"   r#   r$   r%   r&   r'   r(   r)   r*   rd   torchvision.transforms.v2r+   rX   
get_loggerr   r?   r"  r/   r   __doc__formatrN   rN   rN   rO   <module>   sN   <,
D     j