o
    ۷i                  
   @   s  d dl Z d dlZd dlmZmZ d dlZd dlZd dlm	Z	m
Z
mZ ddlmZmZ ddlmZ ddlmZmZ ddlmZ dd	lmZmZmZ dd
lmZ ddlmZ ddlmZ e rid dl m!  m"Z# dZ$ndZ$e%e&Z'dZ(				d,de)de)de*de*fddZ+				d-de)dB de,ej-B dB de.e) dB de.e* dB fd d!Z/	"d.d#ej0d$ej1dB d%e,fd&d'Z2d(d) Z3G d*d+ d+eeZ4dS )/    N)AnyCallable)"Qwen2_5_VLForConditionalGenerationQwen2TokenizerQwen2VLProcessor   )PipelineImageInputVaeImageProcessor)QwenImageLoraLoaderMixin)AutoencoderKLQwenImageQwenImageTransformer2DModel)FlowMatchEulerDiscreteScheduler)is_torch_xla_availableloggingreplace_example_docstring)randn_tensor   )DiffusionPipeline   )QwenImagePipelineOutputTFa_  
    Examples:
        ```py
        >>> import torch
        >>> from PIL import Image
        >>> from diffusers import QwenImageLayeredPipeline
        >>> from diffusers.utils import load_image

        >>> pipe = QwenImageLayeredPipeline.from_pretrained("Qwen/Qwen-Image-Layered", torch_dtype=torch.bfloat16)
        >>> pipe.to("cuda")
        >>> image = load_image(
        ...     "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/yarn-art-pikachu.png"
        ... ).convert("RGBA")
        >>> prompt = ""
        >>> # Depending on the variant being used, the pipeline call will slightly vary.
        >>> # Refer to the pipeline documentation for more details.
        >>> images = pipe(
        ...     image,
        ...     prompt,
        ...     num_inference_steps=50,
        ...     true_cfg_scale=4.0,
        ...     layers=4,
        ...     resolution=640,
        ...     cfg_normalize=False,
        ...     use_en_prompt=True,
        ... ).images[0]
        >>> for i, image in enumerate(images):
        ...     image.save(f"{i}.out.png")
        ```
            ?ffffff?base_seq_lenmax_seq_len
base_shift	max_shiftc                 C   s,   || ||  }|||  }| | | }|S N )image_seq_lenr   r   r   r   mbmur   r   n/home/ubuntu/vllm_env/lib/python3.10/site-packages/diffusers/pipelines/qwenimage/pipeline_qwenimage_layered.pycalculate_shiftL   s   r%   num_inference_stepsdevice	timestepssigmasc                 K   s  |dur|durt d|dur>dtt| jj v }|s(t d| j d| jd||d| | j}t	|}||fS |durpdtt| jj v }|sZt d| j d| jd||d	| | j}t	|}||fS | j|fd
|i| | j}||fS )a  
    Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
    custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.

    Args:
        scheduler (`SchedulerMixin`):
            The scheduler to get timesteps from.
        num_inference_steps (`int`):
            The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
            must be `None`.
        device (`str` or `torch.device`, *optional*):
            The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
        timesteps (`list[int]`, *optional*):
            Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
            `num_inference_steps` and `sigmas` must be `None`.
        sigmas (`list[float]`, *optional*):
            Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
            `num_inference_steps` and `timesteps` must be `None`.

    Returns:
        `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
        second element is the number of inference steps.
    NzYOnly one of `timesteps` or `sigmas` can be passed. Please choose one to set custom valuesr(   zThe current scheduler class zx's `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.)r(   r'   r)   zv's `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.)r)   r'   r'   r   )

ValueErrorsetinspect	signatureset_timesteps
parameterskeys	__class__r(   len)	schedulerr&   r'   r(   r)   kwargsaccepts_timestepsaccept_sigmasr   r   r$   retrieve_timestepsZ   s2   r7   sampleencoder_output	generatorsample_modec                 C   sR   t | dr|dkr| j|S t | dr|dkr| j S t | dr%| jS td)Nlatent_distr8   argmaxlatentsz3Could not access latents of provided encoder_output)hasattrr<   r8   moder>   AttributeError)r9   r:   r;   r   r   r$   retrieve_latents   s   

rB   c                 C   s>   t | | }|| }t|d d }t|d d }||fS )N    )mathsqrtround)target_arearatiowidthheightr   r   r$   calculate_dimensions   s
   rK   c                3       s  e Zd ZdZdZddgZdededede	d	e
d
ef fddZdejdejfddZ			dPdeee B dejdB dejdB fddZ					dQdeee B dejdB dedejdB dejdB defddZdRdd Z							dSd!d"Zed#d$ Zed%d& Zd'ejd(ejfd)d*Z	dTd+d,Zed-d. Z ed/d0 Z!ed1d2 Z"ed3d4 Z#ed5d6 Z$e% e&e'dddd7d8d9dddddddddd:ddddgd;d<d=d=fd'e(dB deee B d>eee B d?e)d@edB dAedBee) dB dCe)dB ded(ejeej B dB dejdB dejdB dejdB dDejdB dEejdB dFedB dGe*dHe+ee,f dB dIe-eegdf dB dJee dedKedLe*dMe*f0dNdOZ.  Z/S )UQwenImageLayeredPipelinea  
    The Qwen-Image-Layered pipeline for image decomposing.

    Args:
        transformer ([`QwenImageTransformer2DModel`]):
            Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
        scheduler ([`FlowMatchEulerDiscreteScheduler`]):
            A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
        vae ([`AutoencoderKL`]):
            Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
        text_encoder ([`Qwen2.5-VL-7B-Instruct`]):
            [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct), specifically the
            [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) variant.
        tokenizer (`QwenTokenizer`):
            Tokenizer of class
            [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
    ztext_encoder->transformer->vaer>   prompt_embedsr3   vaetext_encoder	tokenizer	processortransformerc                    s   t    | j||||||d t| dd rdt| jj nd| _t| dd r,| jjj	nd| _
t| jd d| _|| _d| _d| _d	| _d
| _d| _d| _d S )N)rN   rO   rP   rQ   rR   r3   rN   r         vae_scale_factor   z<|im_start|>system
Describe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>
<|im_start|>user
{}<|im_end|>
<|im_start|>assistant
"   u  <|im_start|>system
You are a helpful assistant.<|im_end|>
<|im_start|>user
# 图像标注器
你是一个专业的图像标注器。请基于输入图像，撰写图注:
1.
使用自然、描述性的语言撰写图注，不要使用结构化形式或富文本形式。
2. 通过加入以下内容，丰富图注细节：
 - 对象的属性：如数量、颜色、形状、大小、位置、材质、状态、动作等
 -
对象间的视觉关系：如空间关系、功能关系、动作关系、从属关系、比较关系、因果关系等
 - 环境细节：例如天气、光照、颜色、纹理、气氛等
 - 文字内容：识别图像中清晰可见的文字，不做翻译和解释，用引号在图注中强调
3.
保持真实性与准确性：
 - 不要使用笼统的描述
 -
描述图像中所有可见的信息，但不要加入没有在图像中出现的内容
<|vision_start|><|image_pad|><|vision_end|><|im_end|>
<|im_start|>assistant
a_  <|im_start|>system
You are a helpful assistant.<|im_end|>
<|im_start|>user
# Image Annotator
You are a professional
image annotator. Please write an image caption based on the input image:
1. Write the caption using natural,
descriptive language without structured formats or rich text.
2. Enrich caption details by including: 
 - Object
attributes, such as quantity, color, shape, size, material, state, position, actions, and so on
 - Vision Relations
between objects, such as spatial relations, functional relations, possessive relations, attachment relations, action
relations, comparative relations, causal relations, and so on
 - Environmental details, such as weather, lighting,
colors, textures, atmosphere, and so on
 - Identify the text clearly visible in the image, without translation or
explanation, and highlight it in the caption with quotation marks
3. Maintain authenticity and accuracy:
 - Avoid
generalizations
 - Describe all visible information in the image, while do not add information not explicitly shown in
the image
<|vision_start|><|image_pad|><|vision_end|><|im_end|>
<|im_start|>assistant
   )super__init__register_modulesgetattrr2   rN   temperal_downsamplerV   configz_dimlatent_channelsr	   image_processorvl_processortokenizer_max_lengthprompt_template_encode prompt_template_encode_start_idximage_caption_prompt_cnimage_caption_prompt_endefault_sample_size)selfr3   rN   rO   rP   rQ   rR   r1   r   r$   r[      s&   
	"

z!QwenImageLayeredPipeline.__init__hidden_statesmaskc                 C   s4   |  }|jdd}|| }tj|| dd}|S )Nr   dimr   )boolsumtorchsplittolist)rj   rl   rm   	bool_maskvalid_lengthsselectedsplit_resultr   r   r$   _extract_masked_hidden   s
   z/QwenImageLayeredPipeline._extract_masked_hiddenNpromptr'   dtypec                    s  |p| j }|p
| jj}t|tr|gn|}| j| j fdd|D }| j|ddd|}| j|j	|j
dd}|jd }| ||j
} fdd|D }d	d |D }	td
d |D tfdd|D }
tfdd|	D }|
j||d}
|
|fS )Nc                    s   g | ]}  |qS r   )format.0e)templater   r$   
<listcomp>      zDQwenImageLayeredPipeline._get_qwen_prompt_embeds.<locals>.<listcomp>Tpt)paddingreturn_tensors)	input_idsattention_maskoutput_hidden_statesc                    s   g | ]}| d  qS r   r   r}   )drop_idxr   r$   r     s    c                 S   s&   g | ]}t j|d t j|jdqS )r   r{   r'   )rr   onessizelongr'   r}   r   r   r$   r     s   & c                 S   s   g | ]}| d qS r   )r   r}   r   r   r$   r     r   c                    s2   g | ]}t || |d  |dgqS )r   r   rr   cat	new_zerosr   r~   ur   r   r$   r     s   2 c                    s*   g | ]}t || |d  gqS r   r   r   r   r   r$   r     s   * r   )_execution_devicerO   r{   
isinstancestrre   rf   rP   tor   r   rl   ry   maxrr   stack)rj   rz   r'   r{   txt
txt_tokensencoder_hidden_statesrl   split_hidden_statesattn_mask_listrM   encoder_attention_maskr   )r   r   r   r$   _get_qwen_prompt_embeds   s>   

z0QwenImageLayeredPipeline._get_qwen_prompt_embedsr   rW   num_images_per_promptprompt_embeds_maskmax_sequence_lengthc           
      C   s   |p| j }t|tr|gn|}|du rt|n|jd }|du r(| ||\}}|ddd|f }|j\}}	}|d|d}||| |	d}|durk|ddd|f }|d|d}||| |	}| rkd}||fS )a1  

        Args:
            prompt (`str` or `list[str]`, *optional*):
                prompt to be encoded
            device: (`torch.device`):
                torch device
            num_images_per_prompt (`int`):
                number of images that should be generated per prompt
            prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
                provided, text embeddings will be generated from `prompt` input argument.
        Nr   r   r   )	r   r   r   r2   shaper   repeatviewall)
rj   rz   r'   r   rM   r   r   
batch_size_seq_lenr   r   r$   encode_prompt$  s    
z&QwenImageLayeredPipeline.encode_promptTc           	      C   sx   |r| j }n| j}| j||ddd|}| jjdi |ddi}dd t|j|D }| jj|ddd	d
 }|	 S )NTr   )textimagesr   r   max_new_tokens   c                 S   s    g | ]\}}|t |d  qS r   )r2   )r~   in_idsout_idsr   r   r$   r   ]  s    z>QwenImageLayeredPipeline.get_image_caption.<locals>.<listcomp>F)skip_special_tokensclean_up_tokenization_spacesr   r   )
rh   rg   rc   r   rO   generatezipr   batch_decodestrip)	rj   prompt_imageuse_en_promptr'   rz   model_inputsgenerated_idsgenerated_ids_trimmedoutput_textr   r   r$   get_image_captionQ  s*   
z*QwenImageLayeredPipeline.get_image_captionc
           
   	      s  | j d  dks| j d  dkr$td j d  d| d| d |d urEt fdd|D sEtd	 j d
 fdd|D  |d urX|d urXtd| d| d|d urd|d u rdtd|d urp|d u rptd|	d ur|	dkrtd|	 d S d S )Nr   r   z-`height` and `width` have to be divisible by z	 but are z and z(. Dimensions will be resized accordinglyc                 3   s    | ]}| j v V  qd S r   _callback_tensor_inputsr~   krj   r   r$   	<genexpr>v  s    

z8QwenImageLayeredPipeline.check_inputs.<locals>.<genexpr>z2`callback_on_step_end_tensor_inputs` has to be in z, but found c                    s   g | ]	}| j vr|qS r   r   r   r   r   r$   r   z  s    z9QwenImageLayeredPipeline.check_inputs.<locals>.<listcomp>z'Cannot forward both `negative_prompt`: z and `negative_prompt_embeds`: z2. Please make sure to only forward one of the two.zIf `prompt_embeds` are provided, `prompt_embeds_mask` also have to be passed. Make sure to generate `prompt_embeds_mask` from the same text encoder that was used to generate `prompt_embeds`.zIf `negative_prompt_embeds` are provided, `negative_prompt_embeds_mask` also have to be passed. Make sure to generate `negative_prompt_embeds_mask` from the same text encoder that was used to generate `negative_prompt_embeds`.rW   z9`max_sequence_length` cannot be greater than 1024 but is )rV   loggerwarningr   r*   r   )
rj   rJ   rI   negative_promptrM   negative_prompt_embedsr   negative_prompt_embeds_mask"callback_on_step_end_tensor_inputsr   r   r   r$   check_inputse  s6   $z%QwenImageLayeredPipeline.check_inputsc              	   C   sZ   |  ||||d d|d d} | ddddddd} | |||d  |d  |d } | S )Nr   r   r   r            )r   permutereshape)r>   r   num_channels_latentsrJ   rI   layersr   r   r$   _pack_latents  s   "z&QwenImageLayeredPipeline._pack_latentsc              	   C   s   | j \}}}dt||d   }dt||d   }| ||d |d |d |d dd} | ddddddd} | ||d |d ||} | ddddd} | S )Nr   r   r   r   r   r   r   )r   intr   r   r   )r>   rJ   rI   r   rV   r   num_patcheschannelsr   r   r$   _unpack_latents  s   &z(QwenImageLayeredPipeline._unpack_latentsimager:   c                    s   t  tr fddtjd D }tj|dd}ntj dd}t	jj
jdjddd|j|j}t	jj
jdjddd|j|j}|| | }|S )Nc              	      s2   g | ]}t j||d    | ddqS )r   r=   r:   r;   )rB   rN   encode)r~   ir:   r   rj   r   r$   r     s    $z>QwenImageLayeredPipeline._encode_vae_image.<locals>.<listcomp>r   rn   r=   r   r   )r   listranger   rr   r   rB   rN   r   tensorr_   latents_meanr   ra   r   r'   r{   latents_std)rj   r   r:   image_latentsr   r   r   r   r$   _encode_vae_image  s    
z*QwenImageLayeredPipeline._encode_vae_imagec              	   C   s  dt || jd   }dt || jd   }||d |||f}d }|d ur|j||d}|jd | jkr<| j||	d}n|}||jd kr`||jd  dkr`||jd  }tj|g| dd}n&||jd kr~||jd  dkr~td|jd  d| d	tj|gdd}|jd
d  \}}|	dddd
d}| 
|||||d}t|	trt|	|krtdt|	 d| d|
d u rt||	||d}
| 
|
|||||d }
|
|fS |
j||d}
|
|fS )Nr   r   r'   r{   )r   r:   r   rn   z'Cannot duplicate `image` of batch size z to z text prompts.r   r   z/You have passed a list of generators of length z+, but requested an effective batch size of z@. Make sure the batch size matches the length of the generators.)r:   r'   r{   )r   rV   r   r   ra   r   rr   r   r*   r   r   r   r   r2   r   )rj   r   r   r   rJ   rI   r   r{   r'   r:   r>   r   r   additional_image_per_promptimage_latent_heightimage_latent_widthr   r   r$   prepare_latents  sN     z(QwenImageLayeredPipeline.prepare_latentsc                 C      | j S r   )_guidance_scaler   r   r   r$   guidance_scale     z'QwenImageLayeredPipeline.guidance_scalec                 C   r   r   )_attention_kwargsr   r   r   r$   attention_kwargs  r   z)QwenImageLayeredPipeline.attention_kwargsc                 C   r   r   )_num_timestepsr   r   r   r$   num_timesteps  r   z&QwenImageLayeredPipeline.num_timestepsc                 C   r   r   )_current_timestepr   r   r   r$   current_timestep  r   z)QwenImageLayeredPipeline.current_timestepc                 C   r   r   )
_interruptr   r   r   r$   	interrupt  r   z"QwenImageLayeredPipeline.interruptg      @r   2   pilr     Fr   true_cfg_scaler   r&   r)   r   r   r   output_typereturn_dictr   callback_on_step_endr   
resolutioncfg_normalizer   c           C         s  t |tr
|d jn|j}|dv sJ d| t|| |d |d  \}}| |jd }| |  | |  j |||||||d	 |_|_d_d_	j
}|durt |tjrk|djksj|||}|}j|||}|d}|jjjd	}|du s|d
ks|dkrj|||d}|durt |trd}n|durt |trt|}n|jd }|dup|duo|du} |dkr| std| d n|dkr| rtd |dko| }!j|||||	|d\}}|!rj|||||	|d\}}jjjd }"|||	 |" ||j||
|
\}}#g  fddt |d D d|j d |j d fg| }$|du rVt!"dd|d dd n|}|jd }%d}&|#jd |& d }'t#j$||||'d\}(}t%t|(|j$j&  d})t|(_'jjj(r|du rt)djjj(rtj*dg||tj+d}*|*,|jd }*n$jjj(s|durtd| d d}*njjj(s|du rd}*j-du ri _t.dg| j|tj/d}+j$0d j1|d;},t2|(D ]-\}-}.j3rq|._|}/|#durtj4||#gdd}/|.,|jd |j}0j5d( j|/|0d  |*|||$j-|+dd!	d }1|1ddd|df }1W d   n	1 sXw   Y  |!rj5d" j|/|0d  |*|||$j-|+dd!	d }2W d   n	1 sw   Y  |2ddd|df }2|2||1|2   }3|rtj6|1dd#d$}4tj6|3dd#d$}5|3|4|5  }1n|3}1|j}6j$j7|1|.|dd%d }|j|6krtj8j9: r||6}|duri }7|D ]
}8t; |8 |7|8< q||-|.|7}9|9<d&|}|9<d'|}|-t|(d ks|-d |)kr"|-d j$j& dkr"|,=  t>r)t?@  qW d   n	1 s6w   Y  d_|d(krF|}nA| |j}|jBj}t.jBjjCDdjBjjEddd|jF|j}:dt.jBjjGDdjBjjEddd|jF|j };||; |: }|j\}<}=}>}?}@|ddddddf }|Hdddd)dId|=d|?|@}jBjJ|dd%d }|Kd}jjL||d*}g }At |<D ]}B|AM||B|> |Bd |>   qшN  |s|AfS tO|Ad+S ),a  
        Function invoked when calling the pipeline for generation.

        Args:
            image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `list[torch.Tensor]`, `list[PIL.Image.Image]`, or `list[np.ndarray]`):
                `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both
                numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list
                or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a
                list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image
                latents as `image`, but if passing latents directly it is not encoded again.
            prompt (`str` or `list[str]`, *optional*):
                The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
                instead.
            negative_prompt (`str` or `list[str]`, *optional*):
                The prompt or prompts not to guide the image generation. If not defined, one has to pass
                `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is
                not greater than `1`).
            true_cfg_scale (`float`, *optional*, defaults to 1.0):
                true_cfg_scale (`float`, *optional*, defaults to 1.0): Guidance scale as defined in [Classifier-Free
                Diffusion Guidance](https://huggingface.co/papers/2207.12598). `true_cfg_scale` is defined as `w` of
                equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Classifier-free guidance is
                enabled by setting `true_cfg_scale > 1` and a provided `negative_prompt`. Higher guidance scale
                encourages to generate images that are closely linked to the text `prompt`, usually at the expense of
                lower image quality.
            num_inference_steps (`int`, *optional*, defaults to 50):
                The number of denoising steps. More denoising steps usually lead to a higher quality image at the
                expense of slower inference.
            sigmas (`list[float]`, *optional*):
                Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
                their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
                will be used.
            guidance_scale (`float`, *optional*, defaults to None):
                A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance
                where the guidance scale is applied during inference through noise prediction rescaling, guidance
                distilled models take the guidance scale directly as an input parameter during forward pass. Guidance
                scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images
                that are closely linked to the text `prompt`, usually at the expense of lower image quality. This
                parameter in the pipeline is there to support future guidance-distilled models when they come up. It is
                ignored when not using guidance distilled models. To enable traditional classifier-free guidance,
                please pass `true_cfg_scale > 1.0` and `negative_prompt` (even an empty negative prompt like " " should
                enable classifier-free guidance computations).
            num_images_per_prompt (`int`, *optional*, defaults to 1):
                The number of images to generate per prompt.
            generator (`torch.Generator` or `list[torch.Generator]`, *optional*):
                One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
                to make generation deterministic.
            latents (`torch.Tensor`, *optional*):
                Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
                generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
                tensor will be generated by sampling using the supplied random `generator`.
            prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
                provided, text embeddings will be generated from `prompt` input argument.
            negative_prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
                weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
                argument.
            output_type (`str`, *optional*, defaults to `"pil"`):
                The output format of the generate image. Choose between
                [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`~pipelines.qwenimage.QwenImagePipelineOutput`] instead of a plain tuple.
            attention_kwargs (`dict`, *optional*):
                A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
                `self.processor` in
                [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
            callback_on_step_end (`Callable`, *optional*):
                A function that calls at the end of each denoising steps during the inference. The function is called
                with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
                callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
                `callback_on_step_end_tensor_inputs`.
            callback_on_step_end_tensor_inputs (`List`, *optional*):
                The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
                will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
                `._callback_tensor_inputs` attribute of your pipeline class.
            max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`.
            resolution (`int`, *optional*, defaults to 640):
                using different bucket in (640, 1024) to determin the condition and output resolution
            cfg_normalize (`bool`, *optional*, defaults to `False`)
                whether enable cfg normalization.
            use_en_prompt (`bool`, *optional*, defaults to `False`)
                automatic caption language if user does not provide caption

        Examples:

        Returns:
            [`~pipelines.qwenimage.QwenImagePipelineOutput`] or `tuple`:
            [`~pipelines.qwenimage.QwenImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When
            returning a tuple, the first element is a list with the generated images.
        r   )r   rW   z/resolution must be either 640 or 1024, but got r   r   )r   rM   r   r   r   r   r   NF)r{     )r   r'   ztrue_cfg_scale is passed as zS, but classifier-free guidance is not enabled since no negative_prompt is provided.z` negative_prompt is passed but classifier-free guidance is not enabled since true_cfg_scale <= 1)rz   rM   r   r'   r   r   r   c                    s*   g | ]}d  j  d j  d fqS )r   r   rU   )r~   r   rJ   rj   rI   r   r$   r     s    z5QwenImageLayeredPipeline.__call__.<locals>.<listcomp>g      ?r   g      p@r   )r)   r#   z8guidance_scale is required for guidance-distilled model.r   zguidance_scale is passed as z8, but ignored since the model is not guidance-distilled.)totalrn   condi  )	rl   timestepguidanceencoder_hidden_states_maskr   
img_shapesr   additional_t_condr   uncondT)ro   keepdim)r   r>   rM   latentr   )r   )r   )Pr   r   r   rK   rV   r   r   r   r   r   r   rr   Tensorra   rb   resize
preprocess	unsqueezer   rO   r{   r   r   r2   r   r   r   r   rR   r_   in_channelsr   r   nplinspacer7   r3   r   orderr   guidance_embedsr*   fullfloat32expandr   r   r   set_begin_indexprogress_bar	enumerater   r   cache_contextnormstepbackendsmpsis_availablelocalspopupdateXLA_AVAILABLExm	mark_stepr   rN   r   r   r`   r'   r   r   r   decodesqueezepostprocessappendmaybe_free_model_hooksr   )Crj   r   rz   r   r   r   r&   r)   r   r   r:   r>   rM   r   r   r   r   r   r   r   r   r   r   r   r   
image_sizecalculated_widthcalculated_heightmultiple_ofr'   r   r   has_neg_promptdo_true_cfgr   r   r  r    base_seqlenr#   r(   num_warmup_stepsr  is_rgbr  r   tlatent_model_inputr  
noise_predneg_noise_pred	comb_pred	cond_norm
noise_normlatents_dtypecallback_kwargsr   callback_outputsr   r   r"   cfhwr   bidxr   r  r$   __call__  s  w
$







(








6K
& 
"
z!QwenImageLayeredPipeline.__call__)NNN)Nr   NNrW   )TN)NNNNNNNr   )0__name__
__module____qualname____doc__model_cpu_offload_seqr   r   r   r   r   r   r   r[   rr   r  ry   r   r   r'   r{   r   r   r   r   r   staticmethodr   r   	Generatorr   r   propertyr   r   r   r   r   no_gradr   EXAMPLE_DOC_STRINGr   floatrp   dictr   r   rE  __classcell__r   r   rk   r$   rL      s&   /


,


-
*

"
?







	
rL   )r   r   r   r   )NNNN)Nr8   )5r,   rD   typingr   r   numpyr  rr   transformersr   r   r   rb   r   r	   loadersr
   modelsr   r   
schedulersr   utilsr   r   r   utils.torch_utilsr   pipeline_utilsr   pipeline_outputr   torch_xla.core.xla_modelcore	xla_modelr&  r%  
get_loggerrF  r   rO  r   rP  r%   r   r'   r   r7   r  rL  rB   rK   rL   r   r   r   r$   <module>   sp   
#



=

