o
    pi                  
   @   sR  d dl Z d dlmZmZmZmZmZmZ d dlZd dl	m
Z
mZmZmZ ddlmZ ddlmZmZmZ ddlmZmZmZmZ ddlmZ dd	lmZ dd
lmZmZm Z m!Z!m"Z"m#Z# ddl$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z* ddl+m,Z, ddl-m.Z. ddl/m0Z0 ddl1m2Z2 ddl3m4Z4m5Z5 ddl6m7Z7 e'8e9Z:dZ;G dd de4e5eeee0e2	Z<dS )    N)AnyCallableDictListOptionalUnion)CLIPImageProcessorCLIPTextModelCLIPTokenizerCLIPVisionModelWithProjection   )PipelineImageInput)IPAdapterMixinStableDiffusionLoraLoaderMixinTextualInversionLoaderMixin)AutoencoderKLImageProjectionUNet2DConditionModelUNetMotionModel)adjust_lora_scale_text_encoder)MotionAdapter)DDIMSchedulerDPMSolverMultistepSchedulerEulerAncestralDiscreteSchedulerEulerDiscreteSchedulerLMSDiscreteSchedulerPNDMScheduler)USE_PEFT_BACKEND	deprecateloggingreplace_example_docstringscale_lora_layersunscale_lora_layers)randn_tensor)VideoProcessor   )FreeInitMixin)AnimateDiffFreeNoiseMixin)DiffusionPipelineStableDiffusionMixin   )AnimateDiffPipelineOutputa  
    Examples:
        ```py
        >>> import torch
        >>> from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler
        >>> from diffusers.utils import export_to_gif

        >>> adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")
        >>> pipe = AnimateDiffPipeline.from_pretrained("frankjoshua/toonyou_beta6", motion_adapter=adapter)
        >>> pipe.scheduler = DDIMScheduler(beta_schedule="linear", steps_offset=1, clip_sample=False)
        >>> output = pipe(prompt="A corgi walking in the park")
        >>> frames = output.frames[0]
        >>> export_to_gif(frames, "animation.gif")
        ```
c                /       sb  e Zd ZdZdZg dZg dZ		dHdedede	d	e
eef d
ede
eeeeeef dedef fddZ					dIdeej deej dee dee fddZdJddZdd ZdKdefddZdd Z 						dLd d!Z!	dJd"d#Z"e#d$d% Z$e#d&d' Z%e#d(d) Z&e#d*d+ Z'e#d,d- Z(e) e*e+ddddd.d/dd0d1ddddddd2d3dddd4gdfd5e
e,e-e, f d6ee d7ee d8ee d9ed:ed;ee
e,e-e, f  d<ee d=ed>ee
ej.e-ej. f  d4eej deej deej d?ee/ d@ee-ej  dAee, dBe0dCee1e,e2f  dee dDee3eee1gdf  dEe-e, def,dFdGZ4  Z5S )MAnimateDiffPipelineav  
    Pipeline for text-to-video generation.

    This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
    implemented for all pipelines (downloading, saving, running on a particular device, etc.).

    The pipeline also inherits the following loading methods:
        - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
        - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
        - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
        - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters

    Args:
        vae ([`AutoencoderKL`]):
            Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
        text_encoder ([`CLIPTextModel`]):
            Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
        tokenizer (`CLIPTokenizer`):
            A [`~transformers.CLIPTokenizer`] to tokenize text.
        unet ([`UNet2DConditionModel`]):
            A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents.
        motion_adapter ([`MotionAdapter`]):
            A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents.
        scheduler ([`SchedulerMixin`]):
            A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
            [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
    z&text_encoder->image_encoder->unet->vae)feature_extractorimage_encodermotion_adapter)latentsprompt_embedsnegative_prompt_embedsNvaetext_encoder	tokenizerunetr/   	schedulerr-   r.   c	           	   
      sf   t    t|trt||}| j||||||||d dt| jj	j
d  | _td| jd| _d S )N)r3   r4   r5   r6   r/   r7   r-   r.   r%   r*   F)	do_resizevae_scale_factor)super__init__
isinstancer   r   from_unet2dregister_moduleslenr3   configblock_out_channelsr9   r$   video_processor)	selfr3   r4   r5   r6   r/   r7   r-   r.   	__class__ r/home/ubuntu/SoloSpeech/.venv/lib/python3.10/site-packages/diffusers/pipelines/animatediff/pipeline_animatediff.pyr;   n   s   


zAnimateDiffPipeline.__init__r1   r2   
lora_scale	clip_skipc
              
   C   s  |durt | tr|| _tst| j| nt| j| |dur't |tr'd}
n|dur5t |tr5t	|}
n|j
d }
|du rt | trJ| || j}| j|d| jjddd}|j}| j|ddd	j}|j
d
 |j
d
 krt||s| j|dd| jjd d
f }td| jj d|  t| jjdr| jjjr|j|}nd}|	du r| j|||d}|d }n| j|||dd}|d
 |	d   }| jj|}| jdur| jj}n| jdur| jj}n|j}|j||d}|j
\}}}|d|d}||| |d
}|r|du r|du rdg|
 }nC|dur8t |t |ur8t!dt | dt | dt |trB|g}n|
t	|kr\t"d| dt	| d| d|
 d	|}t | trk| || j}|j
d }| j|d|ddd}t| jjdr| jjjr|j|}nd}| j|j||d}|d }|r|j
d }|j||d}|d|d}||
| |d
}| jdurt | trtrt#| j| ||fS )a  
        Encodes the prompt into text encoder hidden states.

        Args:
            prompt (`str` or `List[str]`, *optional*):
                prompt to be encoded
            device: (`torch.device`):
                torch device
            num_images_per_prompt (`int`):
                number of images that should be generated per prompt
            do_classifier_free_guidance (`bool`):
                whether to use classifier free guidance or not
            negative_prompt (`str` or `List[str]`, *optional*):
                The prompt or prompts not to guide the image generation. If not defined, one has to pass
                `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
                less than `1`).
            prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
                provided, text embeddings will be generated from `prompt` input argument.
            negative_prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
                weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
                argument.
            lora_scale (`float`, *optional*):
                A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
            clip_skip (`int`, *optional*):
                Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
                the output of the pre-final layer will be used for computing the prompt embeddings.
        Nr*   r   
max_lengthTpt)paddingrJ   
truncationreturn_tensorslongest)rL   rN   z\The following part of your input was truncated because CLIP can only handle sequences up to z	 tokens: use_attention_mask)attention_mask)rR   output_hidden_states)dtypedevice z?`negative_prompt` should be the same type to `prompt`, but got z != .z`negative_prompt`: z has batch size z, but `prompt`: zT. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.)$r<   r   _lora_scaler   r   r4   r!   strlistr?   shaper   maybe_convert_promptr5   model_max_length	input_idstorchequalbatch_decodeloggerwarninghasattrr@   rQ   rR   to
text_modelfinal_layer_normrT   r6   repeatviewtype	TypeError
ValueErrorr"   )rC   promptrU   num_images_per_promptdo_classifier_free_guidancenegative_promptr1   r2   rH   rI   
batch_sizetext_inputstext_input_idsuntruncated_idsremoved_textrR   prompt_embeds_dtypebs_embedseq_len_uncond_tokensrJ   uncond_inputrF   rF   rG   encode_prompt   s   +











z!AnimateDiffPipeline.encode_promptc           
      C   s   t | j j}t|tjs| j|ddj}|j	||d}|rH| j|ddj
d }|j|dd}| jt|ddj
d }|j|dd}||fS | |j}|j|dd}t|}	||	fS )	NrK   )rN   )rU   rT   T)rS   r   dim)nextr.   
parametersrT   r<   r_   Tensorr-   pixel_valuesre   hidden_statesrepeat_interleave
zeros_likeimage_embeds)
rC   imagerU   rn   rS   rT   image_enc_hidden_statesuncond_image_enc_hidden_statesr   uncond_image_embedsrF   rF   rG   encode_imageI  s(   

z AnimateDiffPipeline.encode_imagec                 C   sl  g }|rg }|d u ret |ts|g}t|t| jjjkr/tdt| dt| jjj dt|| jjjD ],\}}	t |	t }
| 	||d|
\}}|
|d d d f  |rc|
|d d d f  q7n|D ]}|rw|d\}}|
| |
| qgg }t|D ]0\}}tj|g| dd}|rtj|| g| dd}tj||gdd}|j|d}|
| q|S )	NzK`ip_adapter_image` must have same length as the number of IP Adapters. Got z images and z IP Adapters.r*   r%   r   r~   rU   )r<   rZ   r?   r6   encoder_hid_projimage_projection_layersrl   zipr   r   appendchunk	enumerater_   catre   )rC   ip_adapter_imageip_adapter_image_embedsrU   rn   ro   r   negative_image_embedssingle_ip_adapter_imageimage_proj_layeroutput_hidden_statesingle_image_embedssingle_negative_image_embedsirF   rF   rG   prepare_ip_adapter_image_embedsb  sH   


z3AnimateDiffPipeline.prepare_ip_adapter_image_embeds   decode_chunk_sizec                 C   s   d| j jj | }|j\}}}}}|ddddd|| |||}g }td|jd |D ]}	||	|	|  }
| j |
j}
|	|
 q-t
|}|d d d f ||df|jdd   ddddd}| }|S )Nr*   r   r%   r      rP   )r3   r@   scaling_factorr[   permutereshaperangedecodesampler   r_   r   float)rC   r0   r   rq   channels
num_framesheightwidthvideor   batch_latentsrF   rF   rG   decode_latents  s   "
8z"AnimateDiffPipeline.decode_latentsc                 C   sX   dt t| jjj v }i }|r||d< dt t| jjj v }|r*||d< |S )Neta	generator)setinspect	signaturer7   stepr   keys)rC   r   r   accepts_etaextra_step_kwargsaccepts_generatorrF   rF   rG   prepare_extra_step_kwargs  s   z-AnimateDiffPipeline.prepare_extra_step_kwargsc                    s  |d dks|d dkrt d| d| d|d ur1t|tr$|dkr1t d| dt| d|
d urRt fdd	|
D sRt d
 j d fdd|
D  |d ure|d uret d| d| d|d u rq|d u rqt d|d urt|tst|tst dt| |d ur|d urt d| d| d|d ur|d ur|j|jkrt d|j d|j d|d ur|	d urt d|	d urt|	tst dt|	 |	d j	dvrt d|	d j	 dd S d S )N   r   z7`height` and `width` have to be divisible by 8 but are z and rW   z5`callback_steps` has to be a positive integer but is z	 of type c                 3   s    | ]}| j v V  qd S N_callback_tensor_inputs.0krC   rF   rG   	<genexpr>  s    

z3AnimateDiffPipeline.check_inputs.<locals>.<genexpr>z2`callback_on_step_end_tensor_inputs` has to be in z, but found c                    s   g | ]	}| j vr|qS rF   r   r   r   rF   rG   
<listcomp>  s    z4AnimateDiffPipeline.check_inputs.<locals>.<listcomp>zCannot forward both `prompt`: z and `prompt_embeds`: z2. Please make sure to only forward one of the two.zeProvide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.z2`prompt` has to be of type `str` or `list` but is z'Cannot forward both `negative_prompt`: z and `negative_prompt_embeds`: zu`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` z != `negative_prompt_embeds` zProvide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined.z:`ip_adapter_image_embeds` has to be of type `list` but is )r   r   zF`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is D)
rl   r<   intrj   allr   rY   rZ   r[   ndim)rC   rm   r   r   callback_stepsrp   r1   r2   r   r   "callback_on_step_end_tensor_inputsrF   r   rG   check_inputs  sl   
z AnimateDiffPipeline.check_inputsc
                 C   s   | j r| |||||||||		}	t|tr(t||kr(tdt| d| d||||| j || j f}
|	d u rBt|
|||d}	n|	|}	|	| j	j
 }	|	S )Nz/You have passed a list of generators of length z+, but requested an effective batch size of z@. Make sure the batch size matches the length of the generators.)r   rU   rT   )free_noise_enabled_prepare_latents_free_noiser<   rZ   r?   rl   r9   r#   re   r7   init_noise_sigma)rC   rq   num_channels_latentsr   r   r   rT   rU   r   r0   r[   rF   rF   rG   prepare_latents  s*   
z#AnimateDiffPipeline.prepare_latentsc                 C      | j S r   _guidance_scaler   rF   rF   rG   guidance_scale     z"AnimateDiffPipeline.guidance_scalec                 C   r   r   )
_clip_skipr   rF   rF   rG   rI     r   zAnimateDiffPipeline.clip_skipc                 C   s
   | j dkS )Nr*   r   r   rF   rF   rG   ro   $  s   
z/AnimateDiffPipeline.do_classifier_free_guidancec                 C   r   r   )_cross_attention_kwargsr   rF   rF   rG   cross_attention_kwargs(  r   z*AnimateDiffPipeline.cross_attention_kwargsc                 C   r   r   )_num_timestepsr   rF   rF   rG   num_timesteps,  r   z!AnimateDiffPipeline.num_timesteps2   g      @r*   g        pilTr0   rm   r   r   r   num_inference_stepsr   rp   num_videos_per_promptr   r   r   r   output_typereturn_dictr   callback_on_step_endr   c           1      K   s  | dd}| dd}|durtddd |dur tddd |p)| jjj| j }|p3| jjj| j }d}| ||||||||||
 || _|| _|| _	|durYt
|trYd}n|durgt
|trgt|}n|jd }| j}| jdur{| jd	dnd}| j|||| j||||| jd
	\}}| jrt||g}|dus|dur| ||||| | j}| jj||d | jj}| jjj}| || |||||j||
|	}| |
|	} |dus|durd|ind}!| jr| j nd}"t!|"D ]}#| jr| "||#|||j|
\}}t|| _#t||| jj$  }$| j%| j#d}%t&|D ]\}&}'| jr,t|gd n|}(| j'|(|'}(| j|(|'|||!dj(})| jrS|))d\}*}+|*||+|*   })| jj*|)|'|fi | j+}|duri },|D ]
}-t, |- |,|-< qi|| |&|'|,}.|. d|}|. d|}|. d|}|&t|d ks|&d |$kr|&d | jj$ dkr|%-  |dur|&| dkr||&|'| qW d   n	1 sw   Y  q|dkr|}/n| .||}0| j/j0|0|d}/| 1  |s|/fS t2|/dS )u  
        The call function to the pipeline for generation.

        Args:
            prompt (`str` or `List[str]`, *optional*):
                The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
            height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
                The height in pixels of the generated video.
            width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
                The width in pixels of the generated video.
            num_frames (`int`, *optional*, defaults to 16):
                The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds
                amounts to 2 seconds of video.
            num_inference_steps (`int`, *optional*, defaults to 50):
                The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
                expense of slower inference.
            guidance_scale (`float`, *optional*, defaults to 7.5):
                A higher guidance scale value encourages the model to generate images closely linked to the text
                `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
            negative_prompt (`str` or `List[str]`, *optional*):
                The prompt or prompts to guide what to not include in image generation. If not defined, you need to
                pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
            eta (`float`, *optional*, defaults to 0.0):
                Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
                to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
            generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
                A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
                generation deterministic.
            latents (`torch.Tensor`, *optional*):
                Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
                generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
                tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
                `(batch_size, num_channel, num_frames, height, width)`.
            prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
                provided, text embeddings are generated from the `prompt` input argument.
            negative_prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
                not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
            ip_adapter_image: (`PipelineImageInput`, *optional*):
                Optional image input to work with IP Adapters.
            ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
                Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
                IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
                contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
                provided, embeddings are computed from the `ip_adapter_image` input argument.
            output_type (`str`, *optional*, defaults to `"pil"`):
                The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or `np.array`.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead
                of a plain tuple.
            cross_attention_kwargs (`dict`, *optional*):
                A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
                [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
            clip_skip (`int`, *optional*):
                Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
                the output of the pre-final layer will be used for computing the prompt embeddings.
            callback_on_step_end (`Callable`, *optional*):
                A function that calls at the end of each denoising steps during the inference. The function is called
                with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
                callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
                `callback_on_step_end_tensor_inputs`.
            callback_on_step_end_tensor_inputs (`List`, *optional*):
                The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
                will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
                `._callback_tensor_inputs` attribute of your pipeline class.
            decode_chunk_size (`int`, defaults to `16`):
                The number of frames to decode at a time when calling `decode_latents` method.

        Examples:

        Returns:
            [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`:
                If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is
                returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
        callbackNr   z1.0.0zjPassing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`zpPassing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`r*   r   scale)r1   r2   rH   rI   r   r   )totalr%   )encoder_hidden_statesr   added_cond_kwargsr0   r1   r2   latent)r   r   )frames)3popr   r6   r@   sample_sizer9   r   r   r   r   r<   rY   rZ   r?   r[   _execution_devicer   getr|   ro   rI   r_   r   r   r7   set_timesteps	timestepsin_channelsr   rT   r   free_init_enabled_free_init_num_itersr   _apply_free_initr   orderprogress_barr   scale_model_inputr   r   r   prev_samplelocalsupdater   rB   postprocess_videomaybe_free_model_hooksr+   )1rC   rm   r   r   r   r   r   rp   r   r   r   r0   r1   r2   r   r   r   r   r   rI   r   r   r   kwargsr   r   rq   rU   text_encoder_lora_scaler   r   r   r   r   num_free_init_itersfree_init_iternum_warmup_stepsr   r   tlatent_model_input
noise_prednoise_pred_uncondnoise_pred_textcallback_kwargsr   callback_outputsr   video_tensorrF   rF   rG   __call__0  s  i


	

	
6
(
zAnimateDiffPipeline.__call__)NN)NNNNNr   )r   )NNNNNN)6__name__
__module____qualname____doc__model_cpu_offload_seq_optional_componentsr   r   r	   r
   r   r   r   r   r   r   r   r   r   r   r   r   r;   r   r_   r   r   r   r|   r   r   r   r   r   r   propertyr   rI   ro   r   r   no_gradr    EXAMPLE_DOC_STRINGrY   r   	Generatorr   boolr   r   r   r  __classcell__rF   rF   rD   rG   r,   E   s   	
*	

 
8-
F
 




	
r,   )=r   typingr   r   r   r   r   r   r_   transformersr   r	   r
   r   image_processorr   loadersr   r   r   modelsr   r   r   r   models.lorar   models.unets.unet_motion_modelr   
schedulersr   r   r   r   r   r   utilsr   r   r   r    r!   r"   utils.torch_utilsr#   rB   r$   free_init_utilsr&   free_noise_utilsr'   pipeline_utilsr(   r)   pipeline_outputr+   
get_loggerr	  rb   r  r,   rF   rF   rF   rG   <module>   s8      

