o
    GiP&                     @   s   d dl mZ d dlZd dlZd dlmZ d dlmZ ddlm	Z	m
Z
 ddlmZmZ ddlmZ eeZejjG d	d
 d
ZeG dd deZG dd dee	ZdS )    )	dataclassN)random   )ConfigMixinregister_to_config)
BaseOutputlogging   )FlaxSchedulerMixinc                   @   sJ   e Zd ZU dZeed< dZejdB ed< dZ	ejdB ed< e
dd ZdS )KarrasVeSchedulerStateNnum_inference_steps	timestepsschedulec                 C   s   |  S N )clsr   r   b/home/ubuntu/.local/lib/python3.10/site-packages/diffusers/schedulers/scheduling_karras_ve_flax.pycreate&   s   zKarrasVeSchedulerState.create)__name__
__module____qualname__r   int__annotations__r   jnpndarrayr   classmethodr   r   r   r   r   r      s   
 r   c                   @   s.   e Zd ZU dZejed< ejed< eed< dS )FlaxKarrasVeOutputa=  
    Output class for the scheduler's step function output.

    Args:
        prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images):
            Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
            denoising loop.
        derivative (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images):
            Derivative of predicted original image sample (x_0).
        state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class.
    prev_sample
derivativestateN)r   r   r   __doc__r   r   r   r   r   r   r   r   r   +   s
   
 

r   c                   @   s   e Zd ZdZedd Ze							d/d
edededededefddZdd Z		d0de
dedede
fddZde
dejdedejdeejef f
ddZ	 d1de
d!ejd"ed#ed$ejd%edeeB fd&d'Z	 d1de
d!ejd"ed#ed$ejd(ejd)ejd%edeeB fd*d+Zde
fd,d-Zd.S )2FlaxKarrasVeScheduleraP  
    Stochastic sampling from Karras et al. [1] tailored to the Variance-Expanding (VE) models [2]. Use Algorithm 2 and
    the VE column of Table 1 from [1] for reference.

    [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models."
    https://huggingface.co/papers/2206.00364 [2] Song, Yang, et al. "Score-based generative modeling through stochastic
    differential equations." https://huggingface.co/papers/2011.13456

    [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
    function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
    [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
    [`~SchedulerMixin.from_pretrained`] functions.

    For more details on the parameters, see the original paper's Appendix E.: "Elucidating the Design Space of
    Diffusion-Based Generative Models." https://huggingface.co/papers/2206.00364. The grid search values used to find
    the optimal {s_noise, s_churn, s_min, s_max} for a specific model are described in Table 5 of the paper.

    Args:
        sigma_min (`float`): minimum noise magnitude
        sigma_max (`float`): maximum noise magnitude
        s_noise (`float`): the amount of additional noise to counteract loss of detail during sampling.
            A reasonable range is [1.000, 1.011].
        s_churn (`float`): the parameter controlling the overall amount of stochasticity.
            A reasonable range is [0, 100].
        s_min (`float`): the start value of the sigma range where we add noise (enable stochasticity).
            A reasonable range is [0, 10].
        s_max (`float`): the end value of the sigma range where we add noise.
            A reasonable range is [0.2, 80].
    c                 C   s   dS )NTr   selfr   r   r   	has_state]   s   zFlaxKarrasVeScheduler.has_state{Gz?d   &1?P   皙?2   	sigma_min	sigma_maxs_noises_churns_mins_maxc                 C   s   t d d S )NzFlax classes are deprecated and will be removed in Diffusers v1.0.0. We recommend migrating to PyTorch classes or pinning your version of Diffusers.)loggerwarning)r#   r+   r,   r-   r.   r/   r0   r   r   r   __init__a   s   
zFlaxKarrasVeScheduler.__init__c                 C   s   t  S r   )r   r   r"   r   r   r   create_statep   s   z"FlaxKarrasVeScheduler.create_stater   r   r   shapereturnc                    sJ   t d ddd  } fdd|D }|j t j|t jd|dS )a  
        Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference.

        Args:
            state (`KarrasVeSchedulerState`):
                the `FlaxKarrasVeScheduler` state data class.
            num_inference_steps (`int`):
                the number of diffusion steps used when generating samples with a pre-trained model.

        r   Nc                    s<   g | ]}j jd  j jd  j jd   | d    qS )r   r	   )configr,   r+   ).0ir   r#   r   r   
<listcomp>   s    
"z7FlaxKarrasVeScheduler.set_timesteps.<locals>.<listcomp>)dtype)r   r   r   )r   arangecopyreplacearrayfloat32)r#   r   r   r5   r   r   r   r;   r   set_timestepss   s   z#FlaxKarrasVeScheduler.set_timestepssamplesigmakeyc           	      C   s   | j j|  kr| j jkrn nt| j j|j d}nd}tj|dd}| j jtj	||j
d }|||  }||d |d  d |  }||fS )u   
        Explicit Langevin-like "churn" step of adding noise to the sample according to a factor gamma_i ≥ 0 to reach a
        higher noise level sigma_hat = sigma_i + gamma_i*sigma_i.

        TODO Args:
        g4y?r   r	   )num)rF   r5   r         ?)r8   r/   r0   minr.   r   r   splitr-   normalr5   )	r#   r   rD   rE   rF   gammaeps	sigma_hat
sample_hatr   r   r   add_noise_to_input   s    z(FlaxKarrasVeScheduler.add_noise_to_inputTmodel_outputrN   
sigma_prevrO   return_dictc           
      C   sD   |||  }|| | }||| |  }	|s|	||fS t |	||dS )a  
        Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
        process from the learned model outputs (most often the predicted noise).

        Args:
            state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class.
            model_output (`torch.Tensor` or `np.ndarray`): direct output from learned diffusion model.
            sigma_hat (`float`): TODO
            sigma_prev (`float`): TODO
            sample_hat (`torch.Tensor` or `np.ndarray`): TODO
            return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class

        Returns:
            [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] or `tuple`: Updated sample in the diffusion
            chain and derivative. [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] if `return_dict` is
            True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
        r   r   r   r   )
r#   r   rQ   rN   rR   rO   rS   pred_original_sampler   sample_prevr   r   r   step   s   
zFlaxKarrasVeScheduler.steprW   r   c	                 C   sP   |||  }	||	 | }
||| d| d|
    }|s!|||fS t |||dS )a'  
        Correct the predicted sample based on the output model_output of the network. TODO complete description

        Args:
            state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class.
            model_output (`torch.Tensor` or `np.ndarray`): direct output from learned diffusion model.
            sigma_hat (`float`): TODO
            sigma_prev (`float`): TODO
            sample_hat (`torch.Tensor` or `np.ndarray`): TODO
            sample_prev (`torch.Tensor` or `np.ndarray`): TODO
            derivative (`torch.Tensor` or `np.ndarray`): TODO
            return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class

        Returns:
            prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO

        rH   rT   rU   )r#   r   rQ   rN   rR   rO   rW   r   rS   rV   derivative_corrr   r   r   step_correct   s   
z"FlaxKarrasVeScheduler.step_correctc                 C   s   t  r   )NotImplementedError)r#   r   original_samplesnoiser   r   r   r   	add_noise   s   zFlaxKarrasVeScheduler.add_noiseN)r%   r&   r'   r(   r)   r*   )r   )T)r   r   r   r    propertyr$   r   floatr3   r4   r   r   tuplerC   r   r   jaxArrayrP   boolr   rX   rZ   r^   r   r   r   r   r!   >   s    


!
-	

%r!   )dataclassesr   flaxrb   	jax.numpynumpyr   r   configuration_utilsr   r   utilsr   r   scheduling_utils_flaxr
   
get_loggerr   r1   structr   r   r!   r   r   r   r   <module>   s   
