o
    ۷i                  
   @   sx  d dl Z d dlmZmZ d dlZd dlZd dlmZm	Z	 ddl
mZmZ ddlmZmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZmZ ddlmZ ddlmZ ddlmZ ddl m!Z! e rod dl"m#  m$Z% dZ&ndZ&e'e(Z)dZ*				d)de+de+de,de,fddZ-				d*de+dB de.ej/B dB d e0e+ dB d!e0e, dB fd"d#Z1d+d%d&Z2G d'd( d(eeeZ3dS ),    N)AnyCallable)T5EncoderModelT5TokenizerFast   )MultiPipelineCallbacksPipelineCallback)FromSingleFileMixinLTXVideoLoraLoaderMixin)AutoencoderKLLTXVideo)LTXVideoTransformer3DModel)FlowMatchEulerDiscreteScheduler)is_torch_xla_availableloggingreplace_example_docstring)randn_tensor)VideoProcessor   )DiffusionPipeline   )LTXPipelineOutputTFax  
    Examples:
        ```py
        >>> import torch
        >>> from diffusers import LTXPipeline
        >>> from diffusers.utils import export_to_video

        >>> pipe = LTXPipeline.from_pretrained("Lightricks/LTX-Video", torch_dtype=torch.bfloat16)
        >>> pipe.to("cuda")

        >>> prompt = "A woman with long brown hair and light skin smiles at another woman with long blonde hair. The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage"
        >>> negative_prompt = "worst quality, inconsistent motion, blurry, jittery, distorted"

        >>> video = pipe(
        ...     prompt=prompt,
        ...     negative_prompt=negative_prompt,
        ...     width=704,
        ...     height=480,
        ...     num_frames=161,
        ...     num_inference_steps=50,
        ... ).frames[0]
        >>> export_to_video(video, "output.mp4", fps=24)
        ```
            ?ffffff?base_seq_lenmax_seq_len
base_shift	max_shiftc                 C   s,   || ||  }|||  }| | | }|S N )image_seq_lenr   r   r   r   mbmur    r    Z/home/ubuntu/vllm_env/lib/python3.10/site-packages/diffusers/pipelines/ltx/pipeline_ltx.pycalculate_shiftF   s   r&   num_inference_stepsdevice	timestepssigmasc                 K   s  |dur|durt d|dur>dtt| jj v }|s(t d| j d| jd||d| | j}t	|}||fS |durpdtt| jj v }|sZt d| j d| jd||d	| | j}t	|}||fS | j|fd
|i| | j}||fS )a  
    Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
    custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.

    Args:
        scheduler (`SchedulerMixin`):
            The scheduler to get timesteps from.
        num_inference_steps (`int`):
            The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
            must be `None`.
        device (`str` or `torch.device`, *optional*):
            The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
        timesteps (`list[int]`, *optional*):
            Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
            `num_inference_steps` and `sigmas` must be `None`.
        sigmas (`list[float]`, *optional*):
            Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
            `num_inference_steps` and `timesteps` must be `None`.

    Returns:
        `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
        second element is the number of inference steps.
    NzYOnly one of `timesteps` or `sigmas` can be passed. Please choose one to set custom valuesr)   zThe current scheduler class zx's `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.)r)   r(   r*   zv's `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.)r*   r(   r(   r    )

ValueErrorsetinspect	signatureset_timesteps
parameterskeys	__class__r)   len)	schedulerr'   r(   r)   r*   kwargsaccepts_timestepsaccept_sigmasr    r    r%   retrieve_timestepsT   s2   r8           c                 C   sX   |j ttd|jdd}| j ttd| jdd}| ||  }|| d| |   } | S )a  
    Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on
    Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are
    Flawed](https://huggingface.co/papers/2305.08891).

    Args:
        noise_cfg (`torch.Tensor`):
            The predicted noise tensor for the guided diffusion process.
        noise_pred_text (`torch.Tensor`):
            The predicted noise tensor for the text-guided diffusion process.
        guidance_rescale (`float`, *optional*, defaults to 0.0):
            A rescale factor applied to the noise predictions.

    Returns:
        noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor.
    r   T)dimkeepdim)stdlistrangendim)	noise_cfgnoise_pred_textguidance_rescalestd_textstd_cfgnoise_pred_rescaledr    r    r%   rescale_noise_cfg   s
   rF   c                5       s  e Zd ZdZdZg Zg dZdedede	de
def
 fd	d
Z					d\deee B dededejdB dejdB f
ddZ										d]deee B deee B dB dededejdB dejdB dejdB dejdB dedejdB dejdB fddZ					d^ddZed_d ejd!ed"ed#ejfd$d%Ze	d_d ejd&ed'ed(ed!ed"ed#ejfd)d*Ze	+d`d ejd,ejd-ejd.ed#ejf
d/d0Ze	+d`d ejd,ejd-ejd.ed#ejf
d1d2Z			3	4	5				dad6ed7ed'ed(ed&edejdB dejdB d8ejdB d ejdB d#ejfd9d:Ze d;d< Z!e d=d> Z"e d?d@ Z#e dAdB Z$e dCdD Z%e dEdF Z&e dGdH Z'e( e)e*ddd3d4d5dIdJddKdLddddddddLddMdddd gdfdeee B deee B dB d'ed(ed&edNedOedPee dQedRededB d8ejeej B dB d ejdB dejdB dejdB dejdB dejdB dSeee B dTeee B dB dUedB dVedWe+ee,f dB dXe-eegdf dB dYee def2dZd[Z.  Z/S )bLTXPipelinea  
    Pipeline for text-to-video generation.

    Reference: https://github.com/Lightricks/LTX-Video

    Args:
        transformer ([`LTXVideoTransformer3DModel`]):
            Conditional Transformer architecture to denoise the encoded video latents.
        scheduler ([`FlowMatchEulerDiscreteScheduler`]):
            A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
        vae ([`AutoencoderKLLTXVideo`]):
            Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
        text_encoder ([`T5EncoderModel`]):
            [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically
            the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant.
        tokenizer (`CLIPTokenizer`):
            Tokenizer of class
            [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
        tokenizer (`T5TokenizerFast`):
            Second Tokenizer of class
            [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast).
    ztext_encoder->transformer->vae)latentsprompt_embedsnegative_prompt_embedsr4   vaetext_encoder	tokenizertransformerc                    s   t    | j|||||d t| dd d ur| jjnd| _t| dd d ur*| jjnd| _t| dd d ur:| j	j
jnd| _t| dd urI| j	j
jnd| _t| jd| _t| dd d urb| jj| _d S d	| _d S )
N)rK   rL   rM   rN   r4   rK          rN   r   )vae_scale_factorrM      )super__init__register_modulesgetattrrK   spatial_compression_ratiovae_spatial_compression_ratiotemporal_compression_ratiovae_temporal_compression_ratiorN   config
patch_sizetransformer_spatial_patch_sizepatch_size_ttransformer_temporal_patch_sizer   video_processorrM   model_max_lengthtokenizer_max_length)selfr4   rK   rL   rM   rN   r2   r    r%   rT      s*   
	zLTXPipeline.__init__Nr   rR   promptnum_videos_per_promptmax_sequence_lengthr(   dtypec                 C   s8  |p| j }|p
| jj}t|tr|gn|}t|}| j|d|dddd}|j}|j}	|		 
|}	| j|dddj}
|
jd |jd kret||
se| j|
d d |d df }td	| d
|  | |
|d }|j
||d}|j\}}}|d|d}||| |d}|	|d}	|	|d}	||	fS )N
max_lengthTpt)paddingri   
truncationadd_special_tokensreturn_tensorslongest)rk   rn   r   zXThe following part of your input was truncated because `max_sequence_length` is set to  z	 tokens: r   )rh   r(   )_execution_devicerL   rh   
isinstancestrr3   rM   	input_idsattention_maskbooltoshapetorchequalbatch_decodeloggerwarningrepeatview)rc   re   rf   rg   r(   rh   
batch_sizetext_inputstext_input_idsprompt_attention_maskuntruncated_idsremoved_textrI   _seq_lenr    r    r%   _get_t5_prompt_embeds   sB   
  z!LTXPipeline._get_t5_prompt_embedsTnegative_promptdo_classifier_free_guidancerI   rJ   r   negative_prompt_attention_maskc              
   C   s  |
p| j }
t|tr|gn|}|durt|}n|jd }|du r-| j|||	|
|d\}}|r|du r|p6d}t|trA||g n|}|dur^t|t|ur^tdt| dt| d|t|krwtd| d	t| d
| d	| d	| j|||	|
|d\}}||||fS )a"  
        Encodes the prompt into text encoder hidden states.

        Args:
            prompt (`str` or `list[str]`, *optional*):
                prompt to be encoded
            negative_prompt (`str` or `list[str]`, *optional*):
                The prompt or prompts not to guide the image generation. If not defined, one has to pass
                `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
                less than `1`).
            do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
                Whether to use classifier free guidance or not.
            num_videos_per_prompt (`int`, *optional*, defaults to 1):
                Number of videos that should be generated per prompt. torch device to place the resulting embeddings on
            prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
                provided, text embeddings will be generated from `prompt` input argument.
            negative_prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
                weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
                argument.
            device: (`torch.device`, *optional*):
                torch device
            dtype: (`torch.dtype`, *optional*):
                torch dtype
        Nr   )re   rf   rg   r(   rh    z?`negative_prompt` should be the same type to `prompt`, but got z != .z`negative_prompt`: z has batch size z, but `prompt`: zT. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.)	rq   rr   rs   r3   rx   r   type	TypeErrorr+   )rc   re   r   r   rf   rI   rJ   r   r   rg   r(   rh   r   r    r    r%   encode_prompt  sL   
(



zLTXPipeline.encode_promptc	           	         st  |d dks|d dkrt d| d| d|d ur8t fdd|D s8t d j d	 fd
d|D  |d urK|d urKt d| d| d|d u rW|d u rWt d|d urnt|tsnt|tsnt dt| |d urz|d u rzt d|d ur|d u rt d|d ur|d ur|j|jkrt d|j d|j d|j|jkrt d|j d|j dd S d S d S )NrO   r   z8`height` and `width` have to be divisible by 32 but are z and r   c                 3   s    | ]}| j v V  qd S r   _callback_tensor_inputs.0krc   r    r%   	<genexpr>|  s    

z+LTXPipeline.check_inputs.<locals>.<genexpr>z2`callback_on_step_end_tensor_inputs` has to be in z, but found c                    s   g | ]	}| j vr|qS r    r   r   r   r    r%   
<listcomp>  s    z,LTXPipeline.check_inputs.<locals>.<listcomp>zCannot forward both `prompt`: z and `prompt_embeds`: z2. Please make sure to only forward one of the two.zeProvide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.z2`prompt` has to be of type `str` or `list` but is zEMust provide `prompt_attention_mask` when specifying `prompt_embeds`.zWMust provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.zu`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` z != `negative_prompt_embeds` z`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but got: `prompt_attention_mask` z% != `negative_prompt_attention_mask` )r+   allr   rr   rs   r=   r   rx   )	rc   re   heightwidth"callback_on_step_end_tensor_inputsrI   rJ   r   r   r    r   r%   check_inputsn  sR   zLTXPipeline.check_inputsrH   r\   r^   returnc              
   C   sl   | j \}}}}}|| }|| }	|| }
| |d|||	||
|} | dddddddd	dd	dd} | S )
Nrp   r   r         r   r         )rx   reshapepermuteflatten)rH   r\   r^   r   num_channels
num_framesr   r   post_patch_num_framespost_patch_heightpost_patch_widthr    r    r%   _pack_latents  s    (
zLTXPipeline._pack_latentsr   r   r   c              
   C   sV   |  d}| ||||d|||} | dddddddd	dd	dddd} | S )
Nr   rp   r   r   r   r   r   r   r   )sizer   r   r   )rH   r   r   r   r\   r^   r   r    r    r%   _unpack_latents  s   
0zLTXPipeline._unpack_latents      ?latents_meanlatents_stdscaling_factorc                 C   sP   | ddddd| j| j}| ddddd| j| j}| | | | } | S Nr   rp   r   rw   r(   rh   rH   r   r   r   r    r    r%   _normalize_latents     zLTXPipeline._normalize_latentsc                 C   sP   | ddddd| j| j}| ddddd| j| j}| | | | } | S r   r   r   r    r    r%   _denormalize_latents  r   z LTXPipeline._denormalize_latents        r   num_channels_latents	generatorc
                 C   s   |	d ur|	j ||dS || j }|| j }|d | j d }|||||f}
t|tr=t||kr=tdt| d| dt|
|||d}	| |	| j	| j
}	|	S )Nr(   rh   r   z/You have passed a list of generators of length z+, but requested an effective batch size of z@. Make sure the batch size matches the length of the generators.r   r(   rh   )rw   rX   rZ   rr   r=   r3   r+   r   r   r]   r_   )rc   r   r   r   r   r   rh   r(   r   rH   rx   r    r    r%   prepare_latents  s"   


zLTXPipeline.prepare_latentsc                 C      | j S r   _guidance_scaler   r    r    r%   guidance_scale     zLTXPipeline.guidance_scalec                 C   r   r   )_guidance_rescaler   r    r    r%   rB     r   zLTXPipeline.guidance_rescalec                 C   s
   | j dkS )Nr   r   r   r    r    r%   r     s   
z'LTXPipeline.do_classifier_free_guidancec                 C   r   r   )_num_timestepsr   r    r    r%   num_timesteps  r   zLTXPipeline.num_timestepsc                 C   r   r   )_current_timestepr   r    r    r%   current_timestep  r   zLTXPipeline.current_timestepc                 C   r   r   )_attention_kwargsr   r    r    r%   attention_kwargs  r   zLTXPipeline.attention_kwargsc                 C   r   r   )
_interruptr   r    r    r%   	interrupt  r   zLTXPipeline.interrupt   2   r   r9   pil
frame_rater'   r)   r   rB   decode_timestepdecode_noise_scaleoutput_typereturn_dictr   callback_on_step_endr   c           3      C   s  t |ttfr
|j}| j||||||||d |	| _|
| _|| _d| _d| _	|dur2t |t
r2d}n|dur@t |tr@t|}n|jd }| j}| j||| j|||||||d
\}}}}| jrqtj||gdd}tj||gdd}| jjj}| || ||||tj|||	}|d | j d }|| j }|| j }|| | } tdd| |}!t| | jjd	d
| jjdd| jjdd| jjdd}"trd}#n|}#t | j||#||!|"d\}}t!t||| jj"  d}$t|| _#| j| | j| jf}%| j$|d}&t%|D ]\}'}(| j&rq|(| _	| jrt|gd n|})|)'|j(})|()|)jd }*| j*d | j|)||*|||||%|dd
d }+W d   n	1 sOw   Y  |++ }+| jrz|+,d\},}-|,| j-|-|,   }+| j.dkrzt/|+|-| j.d}+| jj0|+|(|ddd }|duri }.|D ]
}/t1 |/ |.|/< q|| |'|(|.}0|02d|}|02d|}|'t|d ks|'d |$kr|'d | jj" dkr|&3  trt45  qW d   n	1 sw   Y  |dkr|}1n| 6||||| j7| j8}| 9|| j:j;| j:j<| j:jj=}|'|j(}| j:jj>sd}*nMt?|j|||j(d}2t |ts,|g| }|du r4|}nt |ts?|g| }tj@|||j(d}*tj@|||j(dddddddf }d| | ||2  }|'| j:j(}| j:jA||*ddd }1| jBjC|1|d}1| D  |s|1fS tE|1dS ) u  
        Function invoked when calling the pipeline for generation.

        Args:
            prompt (`str` or `list[str]`, *optional*):
                The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
                instead.
            height (`int`, defaults to `512`):
                The height in pixels of the generated image. This is set to 480 by default for the best results.
            width (`int`, defaults to `704`):
                The width in pixels of the generated image. This is set to 848 by default for the best results.
            num_frames (`int`, defaults to `161`):
                The number of video frames to generate
            num_inference_steps (`int`, *optional*, defaults to 50):
                The number of denoising steps. More denoising steps usually lead to a higher quality image at the
                expense of slower inference.
            timesteps (`list[int]`, *optional*):
                Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
                in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
                passed will be used. Must be in descending order.
            guidance_scale (`float`, defaults to `3 `):
                Guidance scale as defined in [Classifier-Free Diffusion
                Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2.
                of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting
                `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to
                the text `prompt`, usually at the expense of lower image quality.
            guidance_rescale (`float`, *optional*, defaults to 0.0):
                Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
                Flawed](https://huggingface.co/papers/2305.08891) `guidance_scale` is defined as `φ` in equation 16. of
                [Common Diffusion Noise Schedules and Sample Steps are
                Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when
                using zero terminal SNR.
            num_videos_per_prompt (`int`, *optional*, defaults to 1):
                The number of videos to generate per prompt.
            generator (`torch.Generator` or `list[torch.Generator]`, *optional*):
                One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
                to make generation deterministic.
            latents (`torch.Tensor`, *optional*):
                Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
                generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
                tensor will be generated by sampling using the supplied random `generator`.
            prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
                provided, text embeddings will be generated from `prompt` input argument.
            prompt_attention_mask (`torch.Tensor`, *optional*):
                Pre-generated attention mask for text embeddings.
            negative_prompt_embeds (`torch.FloatTensor`, *optional*):
                Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not
                provided, negative_prompt_embeds will be generated from `negative_prompt` input argument.
            negative_prompt_attention_mask (`torch.FloatTensor`, *optional*):
                Pre-generated attention mask for negative text embeddings.
            decode_timestep (`float`, defaults to `0.0`):
                The timestep at which generated video is decoded.
            decode_noise_scale (`float`, defaults to `None`):
                The interpolation factor between random noise and denoised latents at the decode timestep.
            output_type (`str`, *optional*, defaults to `"pil"`):
                The output format of the generate image. Choose between
                [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`~pipelines.ltx.LTXPipelineOutput`] instead of a plain tuple.
            attention_kwargs (`dict`, *optional*):
                A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
                `self.processor` in
                [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
            callback_on_step_end (`Callable`, *optional*):
                A function that calls at the end of each denoising steps during the inference. The function is called
                with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
                callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
                `callback_on_step_end_tensor_inputs`.
            callback_on_step_end_tensor_inputs (`list`, *optional*):
                The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
                will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
                `._callback_tensor_inputs` attribute of your pipeline class.
            max_sequence_length (`int` defaults to `128 `):
                Maximum sequence length to use with the `prompt`.

        Examples:

        Returns:
            [`~pipelines.ltx.LTXPipelineOutput`] or `tuple`:
                If `return_dict` is `True`, [`~pipelines.ltx.LTXPipelineOutput`] is returned, otherwise a `tuple` is
                returned where the first element is a list with the generated images.
        )re   r   r   r   rI   rJ   r   r   FNr   r   )
re   r   r   rf   rI   rJ   r   r   rg   r(   )r:   r   base_image_seq_lenr   max_image_seq_lenr   r   r   r   r   cpu)r*   r$   )totalr   cond_uncond)
hidden_statesencoder_hidden_statestimestepencoder_attention_maskr   r   r   rope_interpolation_scaler   r   )rB   )r   rH   rI   latentr   r   )r   )frames)Frr   r   r   tensor_inputsr   r   r   r   r   r   rs   r=   r3   rx   rq   r   r   ry   catrN   r[   in_channelsr   float32rZ   rX   nplinspacer&   r4   getXLA_AVAILABLEr8   maxorderr   progress_bar	enumerater   rw   rh   expandcache_contextfloatchunkr   rB   rF   steplocalspopupdatexm	mark_stepr   r]   r_   r   rK   r   r   r   timestep_conditioningr   tensordecoder`   postprocess_videomaybe_free_model_hooksr   )3rc   re   r   r   r   r   r   r'   r)   r   rB   rf   r   rH   rI   r   rJ   r   r   r   r   r   r   r   r   rg   r   r(   r   latent_num_frameslatent_heightlatent_widthvideo_sequence_lengthr*   r$   timestep_devicenum_warmup_stepsr   r   itlatent_model_inputr   
noise_prednoise_pred_uncondrA   callback_kwargsr   callback_outputsvideonoiser    r    r%   __call__  sL  r







6
9



zLTXPipeline.__call__)Nr   rR   NN)
NTr   NNNNrR   NN)NNNNN)r   r   )r   )	r   rR   r   r   r   NNNN)0__name__
__module____qualname____doc__model_cpu_offload_seq_optional_componentsr   r   r   r   r   r   rT   rs   r=   intry   r(   rh   r   rv   Tensorr   r   staticmethodr   r   r   r   r   	Generatorr   propertyr   rB   r   r   r   r   r   no_gradr   EXAMPLE_DOC_STRINGdictr   r   r  __classcell__r    r    rd   r%   rG      s   &

4
	

X
5"		

!







	

rG   )r   r   r   r   )NNNN)r9   )4r-   typingr   r   numpyr   ry   transformersr   r   	callbacksr   r   loadersr	   r
   models.autoencodersr   models.transformersr   
schedulersr   utilsr   r   r   utils.torch_utilsr   r`   r   pipeline_utilsr   pipeline_outputr   torch_xla.core.xla_modelcore	xla_modelr   r   
get_loggerr  r|   r  r  r   r&   rs   r(   r=   r8   rF   rG   r    r    r    r%   <module>   sb   





<