o
    ۷ij                  
   @   sT  d dl Z d dlZd dlmZmZ d dlZd dlmZmZ ddl	m
Z
mZ ddlmZ ddlmZmZ ddlmZ dd	lmZ dd
lmZmZ ddlmZmZmZ ddlmZ ddlmZ ddl m!Z! e rqd dl"m#  m$Z% dZ&ndZ&e'e(Z)dZ*dd Z+				dde,dB de-ej.B dB de/e, dB de/e0 dB fddZ1G dd deeZ2dS )    N)AnyCallable)T5EncoderModelT5Tokenizer   )MultiPipelineCallbacksPipelineCallback)CogVideoXLoraLoaderMixin)AutoencoderKLCogVideoXCogVideoXTransformer3DModel)get_3d_rotary_pos_embed)DiffusionPipeline)CogVideoXDDIMSchedulerCogVideoXDPMScheduler)is_torch_xla_availableloggingreplace_example_docstring)randn_tensor)VideoProcessor   )CogVideoXPipelineOutputTFa  
    Examples:
        ```python
        >>> import torch
        >>> from diffusers import CogVideoXPipeline
        >>> from diffusers.utils import export_to_video

        >>> # Models: "THUDM/CogVideoX-2b" or "THUDM/CogVideoX-5b"
        >>> pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=torch.float16).to("cuda")
        >>> prompt = (
        ...     "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. "
        ...     "The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other "
        ...     "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, "
        ...     "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. "
        ...     "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical "
        ...     "atmosphere of this unique musical performance."
        ... )
        >>> video = pipe(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0]
        >>> export_to_video(video, "output.mp4", fps=8)
        ```
c                 C   s   |}|}| \}}|| }||| kr|}t t|| | }	n|}	t t|| | }t t|| d }
t t||	 d }|
|f|
| ||	 ffS )Ng       @)intround)src	tgt_width
tgt_heighttwthhwrresize_heightresize_widthcrop_top	crop_left r%   e/home/ubuntu/vllm_env/lib/python3.10/site-packages/diffusers/pipelines/cogvideo/pipeline_cogvideox.pyget_resize_crop_region_for_gridE   s   r'   num_inference_stepsdevice	timestepssigmasc                 K   s  |dur|durt d|dur>dtt| jj v }|s(t d| j d| jd||d| | j}t	|}||fS |durpdtt| jj v }|sZt d| j d| jd||d	| | j}t	|}||fS | j|fd
|i| | j}||fS )a  
    Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
    custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.

    Args:
        scheduler (`SchedulerMixin`):
            The scheduler to get timesteps from.
        num_inference_steps (`int`):
            The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
            must be `None`.
        device (`str` or `torch.device`, *optional*):
            The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
        timesteps (`list[int]`, *optional*):
            Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
            `num_inference_steps` and `sigmas` must be `None`.
        sigmas (`list[float]`, *optional*):
            Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
            `num_inference_steps` and `timesteps` must be `None`.

    Returns:
        `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
        second element is the number of inference steps.
    NzYOnly one of `timesteps` or `sigmas` can be passed. Please choose one to set custom valuesr*   zThe current scheduler class zx's `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.)r*   r)   r+   zv's `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.)r+   r)   r)   r%   )

ValueErrorsetinspect	signatureset_timesteps
parameterskeys	__class__r*   len)	schedulerr(   r)   r*   r+   kwargsaccepts_timestepsaccept_sigmasr%   r%   r&   retrieve_timestepsX   s2   r9   c                0       s  e Zd ZdZg ZdZg dZdedede	de
deeB f
 fd	d
Z					dKdeee B dededejdB dejdB f
ddZ								dLdeee B deee B dB dededejdB dejdB dedejdB dejdB fddZ	dMddZdejdejfd d!Zd"d# Z		dNd$d%ZdOd&d'ZdOd(d)Zd*ed+ed,edejdeejejf f
d-d.Ze d/d0 Z!e d1d2 Z"e d3d4 Z#e d5d6 Z$e d7d8 Z%e& e'e(dddddd9dd:d;dd<ddddd=ddddgdfdeee B dB deee B dB d*edB d+edB d,edB d>ed?ee dB d@e)dAededBe)dCej*eej* B dB dej+dB dej+dB dej+dB dDedEedFe,ee-f dB dGe.eegdf e/B e0B dB dHee dede1eB f,dIdJZ2  Z3S )PCogVideoXPipelinea  
    Pipeline for text-to-video generation using CogVideoX.

    This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
    library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)

    Args:
        vae ([`AutoencoderKL`]):
            Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations.
        text_encoder ([`T5EncoderModel`]):
            Frozen text-encoder. CogVideoX uses
            [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel); specifically the
            [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant.
        tokenizer (`T5Tokenizer`):
            Tokenizer of class
            [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
        transformer ([`CogVideoXTransformer3DModel`]):
            A text conditioned `CogVideoXTransformer3DModel` to denoise the encoded video latents.
        scheduler ([`SchedulerMixin`]):
            A scheduler to be used in combination with `transformer` to denoise the encoded video latents.
    ztext_encoder->transformer->vae)latentsprompt_embedsnegative_prompt_embeds	tokenizertext_encodervaetransformerr5   c                    s   t    | j|||||d t| dd r dt| jjjd  nd| _t| dd r.| jjj	nd| _
t| dd r<| jjjnd| _t| jd| _d S )	N)r>   r?   r@   rA   r5   r@      r         gffffff?)vae_scale_factor)super__init__register_modulesgetattrr4   r@   configblock_out_channelsvae_scale_factor_spatialtemporal_compression_ratiovae_scale_factor_temporalscaling_factorvae_scaling_factor_imager   video_processor)selfr>   r?   r@   rA   r5   r3   r%   r&   rG      s   

$zCogVideoXPipeline.__init__Nr      promptnum_videos_per_promptmax_sequence_lengthr)   dtypec                 C   s  |p| j }|p
| jj}t|tr|gn|}t|}| j|d|dddd}|j}| j|dddj}	|	jd |jd kr[t	
||	s[| j|	d d |d df }
td	| d
|
  | ||d }|j||d}|j\}}}|d|d}||| |d}|S )N
max_lengthTpt)paddingrY   
truncationadd_special_tokensreturn_tensorslongest)r[   r^   r   zXThe following part of your input was truncated because `max_sequence_length` is set to  z	 tokens: r   )rX   r)   )_execution_devicer?   rX   
isinstancestrr4   r>   	input_idsshapetorchequalbatch_decodeloggerwarningtorepeatview)rR   rU   rV   rW   r)   rX   
batch_sizetext_inputstext_input_idsuntruncated_idsremoved_textr<   _seq_lenr%   r%   r&   _get_t5_prompt_embeds   s:   
  z'CogVideoXPipeline._get_t5_prompt_embedsTnegative_promptdo_classifier_free_guidancer<   r=   c
              
   C   s  |p| j }t|tr|gn|}|durt|}
n|jd }
|du r+| j|||||	d}|r|du r|p4d}t|tr?|
|g n|}|dur\t|t|ur\tdt| dt| d|
t|krutd| d	t| d
| d	|
 d	| j|||||	d}||fS )a"  
        Encodes the prompt into text encoder hidden states.

        Args:
            prompt (`str` or `list[str]`, *optional*):
                prompt to be encoded
            negative_prompt (`str` or `list[str]`, *optional*):
                The prompt or prompts not to guide the image generation. If not defined, one has to pass
                `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
                less than `1`).
            do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
                Whether to use classifier free guidance or not.
            num_videos_per_prompt (`int`, *optional*, defaults to 1):
                Number of videos that should be generated per prompt. torch device to place the resulting embeddings on
            prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
                provided, text embeddings will be generated from `prompt` input argument.
            negative_prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
                weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
                argument.
            device: (`torch.device`, *optional*):
                torch device
            dtype: (`torch.dtype`, *optional*):
                torch dtype
        Nr   )rU   rV   rW   r)   rX    z?`negative_prompt` should be the same type to `prompt`, but got z != .z`negative_prompt`: z has batch size z, but `prompt`: zT. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.)	ra   rb   rc   r4   re   ru   type	TypeErrorr,   )rR   rU   rv   rw   rV   r<   r=   rW   r)   rX   rn   r%   r%   r&   encode_prompt   sL   
&

zCogVideoXPipeline.encode_promptc
                 C   s   t |trt||krtdt| d| d||d | j d ||| j || j f}
|	d u r9t|
|||d}	n|	|}	|	| jj	 }	|	S )Nz/You have passed a list of generators of length z+, but requested an effective batch size of z@. Make sure the batch size matches the length of the generators.r   )	generatorr)   rX   )
rb   listr4   r,   rN   rL   r   rk   r5   init_noise_sigma)rR   rn   num_channels_latents
num_framesheightwidthrX   r)   r}   r;   re   r%   r%   r&   prepare_latentsE  s"   
z!CogVideoXPipeline.prepare_latentsr;   returnc                 C   s2   | ddddd}d| j | }| j|j}|S )Nr   rB   r   r   rD   )permuterP   r@   decodesample)rR   r;   framesr%   r%   r&   decode_latents_  s   z CogVideoXPipeline.decode_latentsc                 C   sX   dt t| jjj v }i }|r||d< dt t| jjj v }|r*||d< |S )Netar}   )r-   r.   r/   r5   stepr1   r2   )rR   r}   r   accepts_etaextra_step_kwargsaccepts_generatorr%   r%   r&   prepare_extra_step_kwargsg  s   z+CogVideoXPipeline.prepare_extra_step_kwargsc                    sj  |d dks|d dkrt d| d| d|d ur8t fdd|D s8t d j d	 fd
d|D  |d urK|d urKt d| d| d|d u rW|d u rWt d|d urnt|tsnt|tsnt dt| |d ur|d urt d| d| d|d ur|d urt d| d| d|d ur|d ur|j|jkrt d|j d|j dd S d S d S )NrC   r   z7`height` and `width` have to be divisible by 8 but are z and ry   c                 3   s    | ]}| j v V  qd S N_callback_tensor_inputs.0krR   r%   r&   	<genexpr>  s    

z1CogVideoXPipeline.check_inputs.<locals>.<genexpr>z2`callback_on_step_end_tensor_inputs` has to be in z, but found c                    s   g | ]	}| j vr|qS r%   r   r   r   r%   r&   
<listcomp>  s    z2CogVideoXPipeline.check_inputs.<locals>.<listcomp>zCannot forward both `prompt`: z and `prompt_embeds`: z2. Please make sure to only forward one of the two.zeProvide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.z2`prompt` has to be of type `str` or `list` but is z and `negative_prompt_embeds`: z'Cannot forward both `negative_prompt`: zu`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` z != `negative_prompt_embeds` )r,   allr   rb   rc   r~   rz   re   )rR   rU   r   r   rv   "callback_on_step_end_tensor_inputsr<   r=   r%   r   r&   check_inputsy  sR   
zCogVideoXPipeline.check_inputsc                 C   s   d| _ | j  dS )zEnables fused QKV projections.TN)fusing_transformerrA   fuse_qkv_projectionsr   r%   r%   r&   r     s   z&CogVideoXPipeline.fuse_qkv_projectionsc                 C   s(   | j s
td dS | j  d| _ dS )z)Disable QKV projection fusion if enabled.zKThe Transformer was not initially fused for QKV projections. Doing nothing.FN)r   ri   rj   rA   unfuse_qkv_projectionsr   r%   r%   r&   r     s   

z(CogVideoXPipeline.unfuse_qkv_projectionsr   r   r   c              	   C   s   || j | jjj  }|| j | jjj  }| jjj}| jjj}| jjj| }	| jjj| }
|d u rLt||f|	|
}t| jjj	|||f||d\}}||fS || d | }t| jjj	d ||f|d|
|	f|d\}}||fS )N)	embed_dimcrops_coords	grid_sizetemporal_sizer)   r   slice)r   r   r   r   	grid_typemax_sizer)   )
rL   rA   rJ   
patch_sizepatch_size_tsample_widthsample_heightr'   r   attention_head_dim)rR   r   r   r   r)   grid_height
grid_widthpp_tbase_size_widthbase_size_heightgrid_crops_coords	freqs_cos	freqs_sinbase_num_framesr%   r%   r&   %_prepare_rotary_positional_embeddings  s:   





z7CogVideoXPipeline._prepare_rotary_positional_embeddingsc                 C      | j S r   )_guidance_scaler   r%   r%   r&   guidance_scale     z CogVideoXPipeline.guidance_scalec                 C   r   r   )_num_timestepsr   r%   r%   r&   num_timesteps  r   zCogVideoXPipeline.num_timestepsc                 C   r   r   )_attention_kwargsr   r%   r%   r&   attention_kwargs  r   z"CogVideoXPipeline.attention_kwargsc                 C   r   r   )_current_timestepr   r%   r%   r&   current_timestep  r   z"CogVideoXPipeline.current_timestepc                 C   r   r   )
_interruptr   r%   r%   r&   	interrupt  r   zCogVideoXPipeline.interrupt2      Fg        pilr(   r*   r   use_dynamic_cfgr   r}   output_typereturn_dictr   callback_on_step_endr   c           .      C   s  t |ttfr
|j}|p| jjj| j }|p| jjj| j }|p$| jjj	}d}
| 
||||||| || _|| _d| _d| _|durJt |trJd}n|durXt |trXt|}n|jd }| j}|dk}| j||||
||||d\}}|r~tj||gdd}trd}n|}t| j|||\}}t|| _|d | j d }| jjj}d}|dur|| dkr|||  }||| j 7 }| jjj}| ||
 |||||j|||	}|  ||}| jjj!r| "|||#d|nd}t$t||| jj%  d} | j&|d	#}!d}"t'|D ]\}#}$| j(rq|$| _|rt|gd
 n|}%| j)|%|$}%|$*|%jd }&| j+d | j|%||&||ddd }'W d   n	1 sLw   Y  |', }'|	rqd|dt-.t-j/||$0  | d   d
   | _|r|'1d
\}(})|(| j2|)|(   }'t | jt3s| jj4|'|$|fi |ddid }n| jj4|'|"|$|#dkr||#d  nd|fi |ddi\}}"|5|j}|duri }*|D ]
}+t6 |+ |*|+< q|| |#|$|*},|,7d|}|,7d|}|,7d|}|#t|d ks|#d | kr|#d | jj% dkr|!8  trt9:  qW d   n	1 s#w   Y  d| _|dksH|dd|df }| ;|}-| j<j=|-|d}-n|}-| >  |sT|-fS t?|-dS )a  
        Function invoked when calling the pipeline for generation.

        Args:
            prompt (`str` or `list[str]`, *optional*):
                The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
                instead.
            negative_prompt (`str` or `list[str]`, *optional*):
                The prompt or prompts not to guide the image generation. If not defined, one has to pass
                `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
                less than `1`).
            height (`int`, *optional*, defaults to self.transformer.config.sample_height * self.vae_scale_factor_spatial):
                The height in pixels of the generated image. This is set to 480 by default for the best results.
            width (`int`, *optional*, defaults to self.transformer.config.sample_height * self.vae_scale_factor_spatial):
                The width in pixels of the generated image. This is set to 720 by default for the best results.
            num_frames (`int`, defaults to `48`):
                Number of frames to generate. Must be divisible by self.vae_scale_factor_temporal. Generated video will
                contain 1 extra frame because CogVideoX is conditioned with (num_seconds * fps + 1) frames where
                num_seconds is 6 and fps is 8. However, since videos can be saved at any fps, the only condition that
                needs to be satisfied is that of divisibility mentioned above.
            num_inference_steps (`int`, *optional*, defaults to 50):
                The number of denoising steps. More denoising steps usually lead to a higher quality image at the
                expense of slower inference.
            timesteps (`list[int]`, *optional*):
                Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
                in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
                passed will be used. Must be in descending order.
            guidance_scale (`float`, *optional*, defaults to 7.0):
                Guidance scale as defined in [Classifier-Free Diffusion
                Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2.
                of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting
                `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to
                the text `prompt`, usually at the expense of lower image quality.
            num_videos_per_prompt (`int`, *optional*, defaults to 1):
                The number of videos to generate per prompt.
            generator (`torch.Generator` or `list[torch.Generator]`, *optional*):
                One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
                to make generation deterministic.
            latents (`torch.FloatTensor`, *optional*):
                Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
                generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
                tensor will be generated by sampling using the supplied random `generator`.
            prompt_embeds (`torch.FloatTensor`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
                provided, text embeddings will be generated from `prompt` input argument.
            negative_prompt_embeds (`torch.FloatTensor`, *optional*):
                Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
                weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
                argument.
            output_type (`str`, *optional*, defaults to `"pil"`):
                The output format of the generate image. Choose between
                [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
                of a plain tuple.
            attention_kwargs (`dict`, *optional*):
                A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
                `self.processor` in
                [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
            callback_on_step_end (`Callable`, *optional*):
                A function that calls at the end of each denoising steps during the inference. The function is called
                with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
                callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
                `callback_on_step_end_tensor_inputs`.
            callback_on_step_end_tensor_inputs (`list`, *optional*):
                The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
                will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
                `._callback_tensor_inputs` attribute of your pipeline class.
            max_sequence_length (`int`, defaults to `226`):
                Maximum sequence length in encoded prompt. Must be consistent with
                `self.transformer.config.max_text_seq_length` otherwise may lead to poor results.

        Examples:

        Returns:
            [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipelineOutput`] or `tuple`:
            [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipelineOutput`] if `return_dict` is True, otherwise a
            `tuple`. When returning a tuple, the first element is a list with the generated images.
        r   NFr   g      ?)rV   r<   r=   rW   r)   )dimcpu)totalrB   cond_uncond)hidden_statesencoder_hidden_statestimestepimage_rotary_embr   r   g      @r   r;   r<   r=   latent)videor   )r   )@rb   r   r   tensor_inputsrA   rJ   r   rL   r   sample_framesr   r   r   r   r   rc   r~   r4   re   ra   r|   rf   catXLA_AVAILABLEr9   r5   r   rN   r   in_channelsr   rX   r    use_rotary_positional_embeddingsr   sizemaxorderprogress_bar	enumerater   scale_model_inputexpandcache_contextfloatmathcospiitemchunkr   r   r   rk   localspopupdatexm	mark_stepr   rQ   postprocess_videomaybe_free_model_hooksr   ).rR   rU   rv   r   r   r   r(   r*   r   r   rV   r   r}   r;   r<   r=   r   r   r   r   r   rW   rn   r)   rw   timestep_devicelatent_framesr   additional_frameslatent_channelsr   r   num_warmup_stepsr   old_pred_original_sampleitlatent_model_inputr   
noise_prednoise_pred_uncondnoise_pred_textcallback_kwargsr   callback_outputsr   r%   r%   r&   __call__  s  j	







	&&	
6C


zCogVideoXPipeline.__call__)Nr   rT   NN)NTr   NNrT   NNr   )NN)r   N)4__name__
__module____qualname____doc___optional_componentsmodel_cpu_offload_seqr   r   r   r
   r   r   r   rG   rc   r~   r   rf   r)   rX   ru   boolTensorr|   r   r   r   r   r   r   tupler   propertyr   r   r   r   r   no_gradr   EXAMPLE_DOC_STRINGr   	GeneratorFloatTensordictr   r   r   r   r   r   __classcell__r%   r%   rS   r&   r:      s:   

-
	

R


3

,





	
r:   )NNNN)3r.   r   typingr   r   rf   transformersr   r   	callbacksr   r   loadersr	   modelsr
   r   models.embeddingsr   pipelines.pipeline_utilsr   
schedulersr   r   utilsr   r   r   utils.torch_utilsr   rQ   r   pipeline_outputr   torch_xla.core.xla_modelcore	xla_modelr   r   
get_loggerr   ri   r	  r'   r   rc   r)   r~   r   r9   r:   r%   r%   r%   r&   <module>   sH   



;