o
    Gi>                     @   s0  d dl Z d dlZd dlZd dlmZ d dlZd dlmZm	Z	 d dl
Z
d dlmZmZmZ ddlmZmZ ddlmZ ddlmZmZmZ ddlmZ dd	lmZmZmZmZmZm Z m!Z!m"Z"m#Z# dd
l$m%Z%m&Z&m'Z' ddl(m)Z) ddl*m+Z+ ddl,m-Z- ddgddgddgddgddgddgddgddgddgddgddgdZ.ddgdd gd!d"gdd#gd$d$gd%dgd#dgd"d!gd&d'gd dgddgdZ/e rd dl0m1  m2Z3 d(Z4nd)Z4e 5e6Z7e rd d*l8m9Z9 e rd dl:Z:d+Z;				d4d,e<dB d-e=e
j>B dB d.e?e< dB d/e?e@ dB fd0d1ZAG d2d3 d3e+eZBdS )5    N)AnyCallable)Gemma2PreTrainedModelGemmaTokenizerGemmaTokenizerFast   )MultiPipelineCallbacksPipelineCallback)SanaLoraLoaderMixin)AutoencoderDCAutoencoderKLWanSanaVideoTransformer3DModel)DPMSolverMultistepScheduler)	BACKENDS_MAPPINGUSE_PEFT_BACKENDis_bs4_availableis_ftfy_availableis_torch_xla_availableloggingreplace_example_docstringscale_lora_layersunscale_lora_layers)
get_deviceis_torch_versionrandn_tensor)VideoProcessor   )DiffusionPipeline   )SanaVideoPipelineOutputg      |@g      @g      ~@g      @g     @g      @g     @g     @g     @g      @g     @g     @g      @)z0.5z0.57z0.68z0.78z1.0z1.13z1.29z1.46z1.67z1.75z2.0g      @g      @g      @g      @g      @g      @g      @g      @g     @g      @TF)BeautifulSoupa>  
    Examples:
        ```py
        >>> import torch
        >>> from diffusers import SanaVideoPipeline
        >>> from diffusers.utils import export_to_video

        >>> pipe = SanaVideoPipeline.from_pretrained("Efficient-Large-Model/SANA-Video_2B_480p_diffusers")
        >>> pipe.transformer.to(torch.bfloat16)
        >>> pipe.text_encoder.to(torch.bfloat16)
        >>> pipe.vae.to(torch.float32)
        >>> pipe.to("cuda")
        >>> motion_score = 30

        >>> prompt = "Evening, backlight, side lighting, soft light, high contrast, mid-shot, centered composition, clean solo shot, warm color. A young Caucasian man stands in a forest, golden light glimmers on his hair as sunlight filters through the leaves. He wears a light shirt, wind gently blowing his hair and collar, light dances across his face with his movements. The background is blurred, with dappled light and soft tree shadows in the distance. The camera focuses on his lifted gaze, clear and emotional."
        >>> negative_prompt = "A chaotic sequence with misshapen, deformed limbs in heavy motion blur, sudden disappearance, jump cuts, jerky movements, rapid shot changes, frames out of sync, inconsistent character shapes, temporal artifacts, jitter, and ghosting effects, creating a disorienting visual experience."
        >>> motion_prompt = f" motion score: {motion_score}."
        >>> prompt = prompt + motion_prompt

        >>> output = pipe(
        ...     prompt=prompt,
        ...     negative_prompt=negative_prompt,
        ...     height=480,
        ...     width=832,
        ...     frames=81,
        ...     guidance_scale=6,
        ...     num_inference_steps=50,
        ...     generator=torch.Generator(device="cuda").manual_seed(42),
        ... ).frames[0]

        >>> export_to_video(output, "sana-video-output.mp4", fps=16)
        ```
num_inference_stepsdevice	timestepssigmasc                 K   s  |dur|durt d|dur>dtt| jj v }|s(t d| j d| jd||d| | j}t	|}||fS |durpdtt| jj v }|sZt d| j d| jd||d	| | j}t	|}||fS | j|fd
|i| | j}||fS )a  
    Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
    custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.

    Args:
        scheduler (`SchedulerMixin`):
            The scheduler to get timesteps from.
        num_inference_steps (`int`):
            The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
            must be `None`.
        device (`str` or `torch.device`, *optional*):
            The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
        timesteps (`list[int]`, *optional*):
            Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
            `num_inference_steps` and `sigmas` must be `None`.
        sigmas (`list[float]`, *optional*):
            Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
            `num_inference_steps` and `timesteps` must be `None`.

    Returns:
        `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
        second element is the number of inference steps.
    NzYOnly one of `timesteps` or `sigmas` can be passed. Please choose one to set custom valuesr#   zThe current scheduler class zx's `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.)r#   r"   r$   zv's `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.)r$   r"   r"    )

ValueErrorsetinspect	signatureset_timesteps
parameterskeys	__class__r#   len)	schedulerr!   r"   r#   r$   kwargsaccepts_timestepsaccept_sigmasr%   r%   f/home/ubuntu/.local/lib/python3.10/site-packages/diffusers/pipelines/sana_video/pipeline_sana_video.pyretrieve_timesteps   s2   r4   c                :       s   e Zd ZdZedZdZg dZde	e
B dedeeB ded	ef
 fd
dZ			dWdeee B dejdejdededee dB fddZ												dXdeee B dedededejdB dejdB dejdB dejdB d ejdB dededee dB d!edB fd"d#Zd$d% Z						dYd&d'ZdZd(d)Zd*d+ Z	,	-	.	/				d[d0ed1ed2ed3ed4edejdB dejdB d5ej eej  B dB d6ejdB d7ejfd8d9Z!e"d:d; Z#e"d<d= Z$e"d>d? Z%e"d@dA Z&e"dBdC Z'e( e)e*dddDdddEdd-d.d/dFdddddddGdddddd6gdg dHfdeee B dedIedJee dKee dLededB d2ed3edMedNed5ej eej  B dB d6ejdB dejdB dejdB dejdB d ejdB dOedB dPededQedRe+ee,f dB dSe-eegdf dB dTee dedee d7e.e/B f6dUdVZ0  Z1S )\SanaVideoPipelinea  
    Pipeline for text-to-video generation using [Sana](https://huggingface.co/papers/2509.24695). This model inherits
    from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all
    pipelines (downloading, saving, running on a particular device, etc.).

    Args:
        tokenizer ([`GemmaTokenizer`] or [`GemmaTokenizerFast`]):
            The tokenizer used to tokenize the prompt.
        text_encoder ([`Gemma2PreTrainedModel`]):
            Text encoder model to encode the input prompts.
        vae ([`AutoencoderKLWan` or `AutoencoderDCAEV`]):
            Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations.
        transformer ([`SanaVideoTransformer3DModel`]):
            Conditional Transformer to denoise the input latents.
        scheduler ([`DPMSolverMultistepScheduler`]):
            A scheduler to be used in combination with `transformer` to denoise the encoded video latents.
    u5   [#®•©™&@·º½¾¿¡§~\)\(\]\[\}\{\|\\/\*]{1,}ztext_encoder->transformer->vae)latentsprompt_embedsnegative_prompt_embeds	tokenizertext_encodervaetransformerr/   c                    sp   t    | j|||||d t| dd r| jjjnd| _t| dd r(| jjjnd| _	| j	| _
t| j	d| _d S )N)r9   r:   r;   r<   r/   r;         )vae_scale_factor)super__init__register_modulesgetattrr;   configscale_factor_temporalvae_scale_factor_temporalscale_factor_spatialvae_scale_factor_spatialr?   r   video_processor)selfr9   r:   r;   r<   r/   r-   r%   r3   rA      s   

zSanaVideoPipeline.__init__F,  Npromptr"   dtypeclean_captionmax_sequence_lengthcomplex_human_instructionc                    s   t |tr|gn|}t| dddurd| j_| j||d}|s"|}nd|  fdd|D }t| j }|| d }| j|d	|d
d
dd}	|	j	}
|	j
}||}| j|
||d}|d j||d}||fS )a  
        Encodes the prompt into text encoder hidden states.

        Args:
            prompt (`str` or `list[str]`, *optional*):
                prompt to be encoded
            device: (`torch.device`, *optional*):
                torch device to place the resulting embeddings on
            clean_caption (`bool`, defaults to `False`):
                If `True`, the function will preprocess and clean the provided caption before encoding.
            max_sequence_length (`int`, defaults to 300): Maximum sequence length to use for the prompt.
            complex_human_instruction (`list[str]`, defaults to `complex_human_instruction`):
                If `complex_human_instruction` is not empty, the function will use the complex Human instruction for
                the prompt.
        r9   Nright)rO   
c                    s   g | ]} | qS r%   r%   ).0p
chi_promptr%   r3   
<listcomp>      z>SanaVideoPipeline._get_gemma_prompt_embeds.<locals>.<listcomp>r   
max_lengthTpt)paddingrZ   
truncationadd_special_tokensreturn_tensors)attention_maskr   rN   r"   )
isinstancestrrC   r9   padding_side_text_preprocessingjoinr.   encode	input_idsr`   tor:   )rJ   rM   r"   rN   rO   rP   rQ   max_length_allnum_chi_prompt_tokenstext_inputstext_input_idsprompt_attention_maskr7   r%   rV   r3   _get_gemma_prompt_embeds   s0   

z*SanaVideoPipeline._get_gemma_prompt_embedsT r   do_classifier_free_guidancenegative_promptnum_videos_per_promptr7   r8   rn   negative_prompt_attention_mask
lora_scalec                 C   s$  |du r| j }| jdur| jj}nd}|dur,t| tr,|| _| jdur,tr,t| j| |dur8t|tr8d}n|durFt|t	rFt
|}n|jd }t| dddurWd| j_|}dgt	t| d d }|du r| j||||
||d\}}|dd|f }|dd|f }|j\}}}|d|d}||| |d}||d}||d}|r|du rt|tr|g| n|}| j||||
|dd\}}	|r|jd }|j||d	}|d|d}||| |d}|	|d}	|	|d}	nd}d}	| jdurt| trtrt| j| ||||	fS )
aQ  
        Encodes the prompt into text encoder hidden states.

        Args:
            prompt (`str` or `list[str]`, *optional*):
                prompt to be encoded
            negative_prompt (`str` or `list[str]`, *optional*):
                The prompt not to guide the video generation. If not defined, one has to pass `negative_prompt_embeds`
                instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For
                PixArt-Alpha, this should be "".
            do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
                whether to use classifier free guidance or not
            num_videos_per_prompt (`int`, *optional*, defaults to 1):
                number of videos that should be generated per prompt
            device: (`torch.device`, *optional*):
                torch device to place the resulting embeddings on
            prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
                provided, text embeddings will be generated from `prompt` input argument.
            negative_prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated negative text embeddings. For Sana, it's should be the embeddings of the "" string.
            clean_caption (`bool`, defaults to `False`):
                If `True`, the function will preprocess and clean the provided caption before encoding.
            max_sequence_length (`int`, defaults to 300): Maximum sequence length to use for the prompt.
            complex_human_instruction (`list[str]`, defaults to `complex_human_instruction`):
                If `complex_human_instruction` is not empty, the function will use the complex Human instruction for
                the prompt.
        Nr   r   r9   rR   )rM   r"   rN   rO   rP   rQ   Fra   )_execution_devicer:   rN   rb   r
   _lora_scaler   r   rc   listr.   shaperC   r9   rd   rangero   repeatviewri   r   )rJ   rM   rq   rr   rs   r"   r7   r8   rn   rt   rO   rP   rQ   ru   rN   
batch_sizerZ   select_indexbs_embedseq_len_r%   r%   r3   encode_prompt$  sr   -




	
	
zSanaVideoPipeline.encode_promptc                 C   sX   dt t| jjj v }i }|r||d< dt t| jjj v }|r*||d< |S )Neta	generator)r'   r(   r)   r/   stepr+   r,   )rJ   r   r   accepts_etaextra_step_kwargsaccepts_generatorr%   r%   r3   prepare_extra_step_kwargs  s   z+SanaVideoPipeline.prepare_extra_step_kwargsc
           
         s  |d dks|d dkrt d| d| d|d ur8t fdd|D s8t d j d	 fd
d|D  |d urK|d urKt d| d| d|d u rW|d u rWt d|d urnt|tsnt|tsnt dt| |d ur|d urt d| d| d|d ur|d urt d| d| d|d ur|d u rt d|d ur|	d u rt d|d ur|d ur|j|jkrt d|j d|j d|j|	jkrt d|j d|	j dd S d S d S )N    r   z8`height` and `width` have to be divisible by 32 but are z and .c                 3   s    | ]}| j v V  qd S N_callback_tensor_inputsrT   krJ   r%   r3   	<genexpr>  s    

z1SanaVideoPipeline.check_inputs.<locals>.<genexpr>z2`callback_on_step_end_tensor_inputs` has to be in z, but found c                    s   g | ]	}| j vr|qS r%   r   r   r   r%   r3   rX     s    z2SanaVideoPipeline.check_inputs.<locals>.<listcomp>zCannot forward both `prompt`: z and `prompt_embeds`: z2. Please make sure to only forward one of the two.zeProvide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.z2`prompt` has to be of type `str` or `list` but is z and `negative_prompt_embeds`: z'Cannot forward both `negative_prompt`: zEMust provide `prompt_attention_mask` when specifying `prompt_embeds`.zWMust provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.zu`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` z != `negative_prompt_embeds` z`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but got: `prompt_attention_mask` z% != `negative_prompt_attention_mask` )r&   allr   rb   rc   ry   typerz   )
rJ   rM   heightwidth"callback_on_step_end_tensor_inputsrr   r7   r8   rn   rt   r%   r   r3   check_inputs  sj   zSanaVideoPipeline.check_inputsc                    s    rt  sttd d d td d  r0t s0ttd d d td d t|ttfs:|g}dt	f fdd	fd
d|D S )Nbs4rv   zSetting `clean_caption=True`z#Setting `clean_caption` to False...Fftfytextc                    s,    r | }  | } | S |   } | S r   )_clean_captionlowerstrip)r   )rO   rJ   r%   r3   process	  s   

z6SanaVideoPipeline._text_preprocessing.<locals>.processc                    s   g | ]} |qS r%   r%   )rT   t)r   r%   r3   rX     rY   z9SanaVideoPipeline._text_preprocessing.<locals>.<listcomp>)
r   loggerwarningr   formatr   rb   tuplery   rc   )rJ   r   rO   r%   )rO   r   rJ   r3   re     s   



z%SanaVideoPipeline._text_preprocessingc                 C   s  t |}t|}|  }tdd|}tdd|}tdd|}t|ddj}tdd|}td	d|}td
d|}tdd|}tdd|}tdd|}tdd|}tdd|}tdd|}tdd|}tdd|}tdd|}tdd|}tdd|}tdd|}tdd|}tdd|}tdd|}tdd|}tdd|}td d|}td!d|}t| j	d|}td"d|}t
d#}tt||d$krt|d|}t|}tt|}td%d|}td&d|}td'd|}td(d|}td)d|}td*d|}td+d|}td,d|}td-d|}td.d|}td/d0|}td1d2|}td3d|}|  td4d5|}td6d|}td7d|}td8d|}| S )9Nz<person>personzk\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))rp   zh\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))zhtml.parser)featuresz
@[\w\d]+\bz[\u31c0-\u31ef]+z[\u31f0-\u31ff]+z[\u3200-\u32ff]+z[\u3300-\u33ff]+z[\u3400-\u4dbf]+z[\u4dc0-\u4dff]+z[\u4e00-\u9fff]+z|[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+-u   [`´«»“”¨]"u   [‘’]'z&quot;?z&ampz"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3} z\d:\d\d\s+$z\\nz
#\d{1,3}\bz	#\d{5,}\bz
\b\d{6,}\bz0[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)z
[\"\']{2,}z[\.]{2,}z\s+\.\s+z	(?:\-|\_)r   z\b[a-zA-Z]{1,3}\d{3,15}\bz\b[a-zA-Z]+\d+[a-zA-Z]+\bz\b\d+[a-zA-Z]+\d+\bz!(worldwide\s+)?(free\s+)?shippingz(free\s)?download(\sfree)?z\bclick\b\s(?:for|on)\s\w+z9\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?z\bpage\s+\d+\bz*\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\bu   \b\d+\.?\d*[xх×]\d+\.?\d*\bz
\b\s+\:\s+z: z(\D[,\./])\bz\1 z\s+z^[\"\']([\w\W]+)[\"\']$z\1z^[\'\_,\-\:;]z[\'\_,\-\:\-\+]$z^\.\S+$)rc   ulunquote_plusr   r   resubr    r   bad_punct_regexcompiler.   findallr   fix_texthtmlunescape)rJ   captionregex2r%   r%   r3   r     s   
	

z SanaVideoPipeline._clean_caption     @  Q   r~   num_channels_latentsr   r   
num_framesr   r6   returnc
                 C   s   |	d ur|	j ||dS |d | j d }
|||
t|| j t|| j f}t|tr=t||kr=tdt| d| d|	d u rKt||||d}	|	S |	j ||d}	|	S )N)r"   rN   r   z/You have passed a list of generators of length z+, but requested an effective batch size of z@. Make sure the batch size matches the length of the generators.)r   r"   rN   )	ri   rF   intrH   rb   ry   r.   r&   r   )rJ   r~   r   r   r   r   rN   r"   r   r6   num_latent_framesrz   r%   r%   r3   prepare_latents  s(   z!SanaVideoPipeline.prepare_latentsc                 C      | j S r   _guidance_scaler   r%   r%   r3   guidance_scale     z SanaVideoPipeline.guidance_scalec                 C   r   r   )_attention_kwargsr   r%   r%   r3   attention_kwargs  r   z"SanaVideoPipeline.attention_kwargsc                 C   s
   | j dkS )N      ?r   r   r%   r%   r3   rq     s   
z-SanaVideoPipeline.do_classifier_free_guidancec                 C   r   r   )_num_timestepsr   r%   r%   r3   num_timesteps  r   zSanaVideoPipeline.num_timestepsc                 C   r   r   )
_interruptr   r%   r%   r3   	interrupt  r   zSanaVideoPipeline.interrupt2   g      @g        pil)zGiven a user prompt, generate an 'Enhanced prompt' that provides detailed visual descriptions suitable for video generation. Evaluate the level of detail in the user prompt:z- If the prompt is simple, focus on adding specifics about colors, shapes, sizes, textures, motion, and temporal relationships to create vivid and dynamic scenes.zo- If the prompt is already detailed, refine and enhance the existing details slightly without overcomplicating.z8Here are examples of how to transform or refine prompts:z- User Prompt: A cat sleeping -> Enhanced: A small, fluffy white cat slowly settling into a curled position, peacefully falling asleep on a warm sunny windowsill, with gentle sunlight filtering through surrounding pots of blooming red flowers.a  - User Prompt: A busy city street -> Enhanced: A bustling city street scene at dusk, featuring glowing street lamps gradually lighting up, a diverse crowd of people in colorful clothing walking past, and a double-decker bus smoothly passing by towering glass skyscrapers.zPlease generate only the enhanced description for the prompt below and avoid including any additional commentary or evaluations:zUser Prompt: r!   r#   r$   r   framesr   output_typereturn_dictuse_resolution_binningr   callback_on_step_endr   c           6      C   s  t |ttfr
|j}|r4| jjjdkrt}n| jjjdkr t}nt	d||	}}| j
j||	|d\}}	| |||	||||||	 || _|| _d| _|durVt |trVd}n|durdt |trdt|}n|jd }| j}| jdurx| jd	dnd} | j|| j||||||||||| d
\}}}}| jrtj||gdd}tj||gdd}t| j||||\}}| jjj}!| || |!||	|
tj|||	}|  ||}"t!t||| jj"  d}#t|| _#| jj$}$| j%|d}%t&|D ]\}&}'| j'rq| jrt|gd n|}(|'(|(jd })| j|(j)|$d|j)|$d||)d| jdd }*|** }*| jr8|*+d\}+},|+||,|+   }*| jjj,d |!krK|*j+dddd }*| jj-|*|'|fi |"ddid }|duri }-|D ]
}.t. |. |-|.< qf|| |&|'|-}/|//d|}|//d|}|//d|}|&t|d ks|&d |#kr|&d | jj" dkr|%0  t1rt23  qW d   n	1 sw   Y  |dkr|}0n|)| j4j$}t5tt6 tj7}1t8ddrtj9n|1j9}2t:| j4jj;<d| j4jj=ddd)|j>|j$}3dt:| j4jj?<d| j4jj=ddd)|j>|j$ }4||4 |3 }z| j4j@|ddd }0W n |2yC }5 ztAB|5 d W Y d}5~5nd}5~5ww |rO| j
C|0||}0| j
jD|0|d}0| E  |sa|0fS tF|0dS )u  
        Function invoked when calling the pipeline for generation.

        Args:
            prompt (`str` or `list[str]`, *optional*):
                The prompt or prompts to guide the video generation. If not defined, one has to pass `prompt_embeds`.
                instead.
            negative_prompt (`str` or `list[str]`, *optional*):
                The prompt or prompts not to guide the video generation. If not defined, one has to pass
                `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
                less than `1`).
            num_inference_steps (`int`, *optional*, defaults to 50):
                The number of denoising steps. More denoising steps usually lead to a higher quality video at the
                expense of slower inference.
            timesteps (`list[int]`, *optional*):
                Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
                in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
                passed will be used. Must be in descending order.
            sigmas (`list[float]`, *optional*):
                Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
                their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
                will be used.
            guidance_scale (`float`, *optional*, defaults to 4.5):
                Guidance scale as defined in [Classifier-Free Diffusion
                Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2.
                of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting
                `guidance_scale > 1`. Higher guidance scale encourages to generate videos that are closely linked to
                the text `prompt`, usually at the expense of lower video quality.
            num_videos_per_prompt (`int`, *optional*, defaults to 1):
                The number of videos to generate per prompt.
            height (`int`, *optional*, defaults to 480):
                The height in pixels of the generated video.
            width (`int`, *optional*, defaults to 832):
                The width in pixels of the generated video.
            frames (`int`, *optional*, defaults to 81):
                The number of frames in the generated video.
            eta (`float`, *optional*, defaults to 0.0):
                Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only
                applies to [`schedulers.DDIMScheduler`], will be ignored for others.
            generator (`torch.Generator` or `list[torch.Generator]`, *optional*):
                One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
                to make generation deterministic.
            latents (`torch.Tensor`, *optional*):
                Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video
                generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
                tensor will be generated by sampling using the supplied random `generator`.
            prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
                provided, text embeddings will be generated from `prompt` input argument.
            prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings.
            negative_prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not
                provided, negative_prompt_embeds will be generated from `negative_prompt` input argument.
            negative_prompt_attention_mask (`torch.Tensor`, *optional*):
                Pre-generated attention mask for negative text embeddings.
            output_type (`str`, *optional*, defaults to `"pil"`):
                The output format of the generated video. Choose between mp4 or `np.array`.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`SanaVideoPipelineOutput`] instead of a plain tuple.
            attention_kwargs:
                A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
                `self.processor` in
                [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
            clean_caption (`bool`, *optional*, defaults to `True`):
                Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to
                be installed. If the dependencies are not installed, the embeddings will be created from the raw
                prompt.
            use_resolution_binning (`bool` defaults to `True`):
                If set to `True`, the requested height and width are first mapped to the closest resolutions using
                `ASPECT_RATIO_480_BIN` or `ASPECT_RATIO_720_BIN`. After the produced latents are decoded into videos,
                they are resized back to the requested resolution. Useful for generating non-square videos.
            callback_on_step_end (`Callable`, *optional*):
                A function that calls at the end of each denoising steps during the inference. The function is called
                with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
                callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
                `callback_on_step_end_tensor_inputs`.
            callback_on_step_end_tensor_inputs (`List`, *optional*):
                The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
                will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
                `._callback_tensor_inputs` attribute of your pipeline class.
            max_sequence_length (`int` defaults to `300`):
                Maximum sequence length to use with the `prompt`.
            complex_human_instruction (`list[str]`, *optional*):
                Instructions for complex human attention:
                https://github.com/NVlabs/Sana/blob/main/configs/sana_app_config/Sana_1600M_app.yaml#L55.

        Examples:

        Returns:
            [`~pipelines.sana_video.pipeline_output.SanaVideoPipelineOutput`] or `tuple`:
                If `return_dict` is `True`, [`~pipelines.sana_video.pipeline_output.SanaVideoPipelineOutput`] is
                returned, otherwise a `tuple` is returned where the first element is a list with the generated videos
              zInvalid sample size)ratiosFNr   r   scale)rr   rs   r"   r7   r8   rn   rt   rO   rP   rQ   ru   )dim)totalr   )rN   )encoder_hidden_statesencoder_attention_masktimestepr   r   r   r6   r7   r8   latentz>=z2.5.0r   )r   z. 
Try to use VAE tiling for large images. For example: 
pipe.vae.enable_tiling(tile_sample_min_width=512, tile_sample_min_height=512))r   )r   )Grb   r	   r   tensor_inputsr<   rD   sample_sizeASPECT_RATIO_480_BINASPECT_RATIO_720_BINr&   rI   classify_height_width_binr   r   r   r   rc   ry   r.   rz   rw   r   getr   rq   torchcatr4   r/   in_channelsr   float32r   maxorderr   rN   progress_bar	enumerater   expandri   floatchunkout_channelsr   localspopupdateXLA_AVAILABLExm	mark_stepr;   rC   r   cudar   OutOfMemoryErrortensorlatents_meanr}   z_dimr"   latents_stddecodewarningswarnresize_and_crop_tensorpostprocess_videomaybe_free_model_hooksr   )6rJ   rM   rr   r!   r#   r$   r   rs   r   r   r   r   r   r6   r7   rn   r8   rt   r   r   rO   r   r   r   r   rP   rQ   aspect_ratio_binorig_height
orig_widthr~   r"   ru   latent_channelsr   num_warmup_stepstransformer_dtyper   ir   latent_model_inputr   
noise_prednoise_pred_uncondnoise_pred_textcallback_kwargsr   callback_outputsvideotorch_accelerator_module	oom_errorr   r   er%   r%   r3   __call__  s   






$
6
2
&
zSanaVideoPipeline.__call__)FrL   N)Trp   r   NNNNNFrL   NN)NNNNNN)F)r   r   r   r   NNNN)2__name__
__module____qualname____doc__r   r   r   model_cpu_offload_seqr   r   r   r   r   r   r   r   rA   rc   ry   r   r"   rN   boolr   ro   Tensorr   r   r   r   re   r   	Generatorr   propertyr   r   rq   r   r   no_gradr   EXAMPLE_DOC_STRINGdictr   r   r   r   r  __classcell__r%   r%   rK   r3   r5      s   



=
	


 

Cu	

#





	
%r5   )NNNN)Cr   r(   r   urllib.parseparser   r   typingr   r   r   transformersr   r   r   	callbacksr   r	   loadersr
   modelsr   r   r   
schedulersr   utilsr   r   r   r   r   r   r   r   r   utils.torch_utilsr   r   r   rI   r   pipeline_utilsr   pipeline_outputr   r   r   torch_xla.core.xla_modelcore	xla_modelr   r   
get_loggerr  r   r   r    r   r  r   rc   r"   ry   r   r4   r5   r%   r%   r%   r3   <module>   s   ,
&


;